Bluetooth: Remove "Power-on" check from Mesh feature
[platform/kernel/linux-starfive.git] / net / bluetooth / mgmt.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43
44 #define MGMT_VERSION    1
45 #define MGMT_REVISION   22
46
47 static const u16 mgmt_commands[] = {
48         MGMT_OP_READ_INDEX_LIST,
49         MGMT_OP_READ_INFO,
50         MGMT_OP_SET_POWERED,
51         MGMT_OP_SET_DISCOVERABLE,
52         MGMT_OP_SET_CONNECTABLE,
53         MGMT_OP_SET_FAST_CONNECTABLE,
54         MGMT_OP_SET_BONDABLE,
55         MGMT_OP_SET_LINK_SECURITY,
56         MGMT_OP_SET_SSP,
57         MGMT_OP_SET_HS,
58         MGMT_OP_SET_LE,
59         MGMT_OP_SET_DEV_CLASS,
60         MGMT_OP_SET_LOCAL_NAME,
61         MGMT_OP_ADD_UUID,
62         MGMT_OP_REMOVE_UUID,
63         MGMT_OP_LOAD_LINK_KEYS,
64         MGMT_OP_LOAD_LONG_TERM_KEYS,
65         MGMT_OP_DISCONNECT,
66         MGMT_OP_GET_CONNECTIONS,
67         MGMT_OP_PIN_CODE_REPLY,
68         MGMT_OP_PIN_CODE_NEG_REPLY,
69         MGMT_OP_SET_IO_CAPABILITY,
70         MGMT_OP_PAIR_DEVICE,
71         MGMT_OP_CANCEL_PAIR_DEVICE,
72         MGMT_OP_UNPAIR_DEVICE,
73         MGMT_OP_USER_CONFIRM_REPLY,
74         MGMT_OP_USER_CONFIRM_NEG_REPLY,
75         MGMT_OP_USER_PASSKEY_REPLY,
76         MGMT_OP_USER_PASSKEY_NEG_REPLY,
77         MGMT_OP_READ_LOCAL_OOB_DATA,
78         MGMT_OP_ADD_REMOTE_OOB_DATA,
79         MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80         MGMT_OP_START_DISCOVERY,
81         MGMT_OP_STOP_DISCOVERY,
82         MGMT_OP_CONFIRM_NAME,
83         MGMT_OP_BLOCK_DEVICE,
84         MGMT_OP_UNBLOCK_DEVICE,
85         MGMT_OP_SET_DEVICE_ID,
86         MGMT_OP_SET_ADVERTISING,
87         MGMT_OP_SET_BREDR,
88         MGMT_OP_SET_STATIC_ADDRESS,
89         MGMT_OP_SET_SCAN_PARAMS,
90         MGMT_OP_SET_SECURE_CONN,
91         MGMT_OP_SET_DEBUG_KEYS,
92         MGMT_OP_SET_PRIVACY,
93         MGMT_OP_LOAD_IRKS,
94         MGMT_OP_GET_CONN_INFO,
95         MGMT_OP_GET_CLOCK_INFO,
96         MGMT_OP_ADD_DEVICE,
97         MGMT_OP_REMOVE_DEVICE,
98         MGMT_OP_LOAD_CONN_PARAM,
99         MGMT_OP_READ_UNCONF_INDEX_LIST,
100         MGMT_OP_READ_CONFIG_INFO,
101         MGMT_OP_SET_EXTERNAL_CONFIG,
102         MGMT_OP_SET_PUBLIC_ADDRESS,
103         MGMT_OP_START_SERVICE_DISCOVERY,
104         MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105         MGMT_OP_READ_EXT_INDEX_LIST,
106         MGMT_OP_READ_ADV_FEATURES,
107         MGMT_OP_ADD_ADVERTISING,
108         MGMT_OP_REMOVE_ADVERTISING,
109         MGMT_OP_GET_ADV_SIZE_INFO,
110         MGMT_OP_START_LIMITED_DISCOVERY,
111         MGMT_OP_READ_EXT_INFO,
112         MGMT_OP_SET_APPEARANCE,
113         MGMT_OP_GET_PHY_CONFIGURATION,
114         MGMT_OP_SET_PHY_CONFIGURATION,
115         MGMT_OP_SET_BLOCKED_KEYS,
116         MGMT_OP_SET_WIDEBAND_SPEECH,
117         MGMT_OP_READ_CONTROLLER_CAP,
118         MGMT_OP_READ_EXP_FEATURES_INFO,
119         MGMT_OP_SET_EXP_FEATURE,
120         MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121         MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122         MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123         MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124         MGMT_OP_GET_DEVICE_FLAGS,
125         MGMT_OP_SET_DEVICE_FLAGS,
126         MGMT_OP_READ_ADV_MONITOR_FEATURES,
127         MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128         MGMT_OP_REMOVE_ADV_MONITOR,
129         MGMT_OP_ADD_EXT_ADV_PARAMS,
130         MGMT_OP_ADD_EXT_ADV_DATA,
131         MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132         MGMT_OP_SET_MESH_RECEIVER,
133         MGMT_OP_MESH_READ_FEATURES,
134         MGMT_OP_MESH_SEND,
135         MGMT_OP_MESH_SEND_CANCEL,
136 };
137
138 static const u16 mgmt_events[] = {
139         MGMT_EV_CONTROLLER_ERROR,
140         MGMT_EV_INDEX_ADDED,
141         MGMT_EV_INDEX_REMOVED,
142         MGMT_EV_NEW_SETTINGS,
143         MGMT_EV_CLASS_OF_DEV_CHANGED,
144         MGMT_EV_LOCAL_NAME_CHANGED,
145         MGMT_EV_NEW_LINK_KEY,
146         MGMT_EV_NEW_LONG_TERM_KEY,
147         MGMT_EV_DEVICE_CONNECTED,
148         MGMT_EV_DEVICE_DISCONNECTED,
149         MGMT_EV_CONNECT_FAILED,
150         MGMT_EV_PIN_CODE_REQUEST,
151         MGMT_EV_USER_CONFIRM_REQUEST,
152         MGMT_EV_USER_PASSKEY_REQUEST,
153         MGMT_EV_AUTH_FAILED,
154         MGMT_EV_DEVICE_FOUND,
155         MGMT_EV_DISCOVERING,
156         MGMT_EV_DEVICE_BLOCKED,
157         MGMT_EV_DEVICE_UNBLOCKED,
158         MGMT_EV_DEVICE_UNPAIRED,
159         MGMT_EV_PASSKEY_NOTIFY,
160         MGMT_EV_NEW_IRK,
161         MGMT_EV_NEW_CSRK,
162         MGMT_EV_DEVICE_ADDED,
163         MGMT_EV_DEVICE_REMOVED,
164         MGMT_EV_NEW_CONN_PARAM,
165         MGMT_EV_UNCONF_INDEX_ADDED,
166         MGMT_EV_UNCONF_INDEX_REMOVED,
167         MGMT_EV_NEW_CONFIG_OPTIONS,
168         MGMT_EV_EXT_INDEX_ADDED,
169         MGMT_EV_EXT_INDEX_REMOVED,
170         MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171         MGMT_EV_ADVERTISING_ADDED,
172         MGMT_EV_ADVERTISING_REMOVED,
173         MGMT_EV_EXT_INFO_CHANGED,
174         MGMT_EV_PHY_CONFIGURATION_CHANGED,
175         MGMT_EV_EXP_FEATURE_CHANGED,
176         MGMT_EV_DEVICE_FLAGS_CHANGED,
177         MGMT_EV_ADV_MONITOR_ADDED,
178         MGMT_EV_ADV_MONITOR_REMOVED,
179         MGMT_EV_CONTROLLER_SUSPEND,
180         MGMT_EV_CONTROLLER_RESUME,
181         MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182         MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184
185 static const u16 mgmt_untrusted_commands[] = {
186         MGMT_OP_READ_INDEX_LIST,
187         MGMT_OP_READ_INFO,
188         MGMT_OP_READ_UNCONF_INDEX_LIST,
189         MGMT_OP_READ_CONFIG_INFO,
190         MGMT_OP_READ_EXT_INDEX_LIST,
191         MGMT_OP_READ_EXT_INFO,
192         MGMT_OP_READ_CONTROLLER_CAP,
193         MGMT_OP_READ_EXP_FEATURES_INFO,
194         MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195         MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197
198 static const u16 mgmt_untrusted_events[] = {
199         MGMT_EV_INDEX_ADDED,
200         MGMT_EV_INDEX_REMOVED,
201         MGMT_EV_NEW_SETTINGS,
202         MGMT_EV_CLASS_OF_DEV_CHANGED,
203         MGMT_EV_LOCAL_NAME_CHANGED,
204         MGMT_EV_UNCONF_INDEX_ADDED,
205         MGMT_EV_UNCONF_INDEX_REMOVED,
206         MGMT_EV_NEW_CONFIG_OPTIONS,
207         MGMT_EV_EXT_INDEX_ADDED,
208         MGMT_EV_EXT_INDEX_REMOVED,
209         MGMT_EV_EXT_INFO_CHANGED,
210         MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212
213 #define CACHE_TIMEOUT   msecs_to_jiffies(2 * 1000)
214
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216                  "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220         MGMT_STATUS_SUCCESS,
221         MGMT_STATUS_UNKNOWN_COMMAND,    /* Unknown Command */
222         MGMT_STATUS_NOT_CONNECTED,      /* No Connection */
223         MGMT_STATUS_FAILED,             /* Hardware Failure */
224         MGMT_STATUS_CONNECT_FAILED,     /* Page Timeout */
225         MGMT_STATUS_AUTH_FAILED,        /* Authentication Failed */
226         MGMT_STATUS_AUTH_FAILED,        /* PIN or Key Missing */
227         MGMT_STATUS_NO_RESOURCES,       /* Memory Full */
228         MGMT_STATUS_TIMEOUT,            /* Connection Timeout */
229         MGMT_STATUS_NO_RESOURCES,       /* Max Number of Connections */
230         MGMT_STATUS_NO_RESOURCES,       /* Max Number of SCO Connections */
231         MGMT_STATUS_ALREADY_CONNECTED,  /* ACL Connection Exists */
232         MGMT_STATUS_BUSY,               /* Command Disallowed */
233         MGMT_STATUS_NO_RESOURCES,       /* Rejected Limited Resources */
234         MGMT_STATUS_REJECTED,           /* Rejected Security */
235         MGMT_STATUS_REJECTED,           /* Rejected Personal */
236         MGMT_STATUS_TIMEOUT,            /* Host Timeout */
237         MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported Feature */
238         MGMT_STATUS_INVALID_PARAMS,     /* Invalid Parameters */
239         MGMT_STATUS_DISCONNECTED,       /* OE User Ended Connection */
240         MGMT_STATUS_NO_RESOURCES,       /* OE Low Resources */
241         MGMT_STATUS_DISCONNECTED,       /* OE Power Off */
242         MGMT_STATUS_DISCONNECTED,       /* Connection Terminated */
243         MGMT_STATUS_BUSY,               /* Repeated Attempts */
244         MGMT_STATUS_REJECTED,           /* Pairing Not Allowed */
245         MGMT_STATUS_FAILED,             /* Unknown LMP PDU */
246         MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported Remote Feature */
247         MGMT_STATUS_REJECTED,           /* SCO Offset Rejected */
248         MGMT_STATUS_REJECTED,           /* SCO Interval Rejected */
249         MGMT_STATUS_REJECTED,           /* Air Mode Rejected */
250         MGMT_STATUS_INVALID_PARAMS,     /* Invalid LMP Parameters */
251         MGMT_STATUS_FAILED,             /* Unspecified Error */
252         MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported LMP Parameter Value */
253         MGMT_STATUS_FAILED,             /* Role Change Not Allowed */
254         MGMT_STATUS_TIMEOUT,            /* LMP Response Timeout */
255         MGMT_STATUS_FAILED,             /* LMP Error Transaction Collision */
256         MGMT_STATUS_FAILED,             /* LMP PDU Not Allowed */
257         MGMT_STATUS_REJECTED,           /* Encryption Mode Not Accepted */
258         MGMT_STATUS_FAILED,             /* Unit Link Key Used */
259         MGMT_STATUS_NOT_SUPPORTED,      /* QoS Not Supported */
260         MGMT_STATUS_TIMEOUT,            /* Instant Passed */
261         MGMT_STATUS_NOT_SUPPORTED,      /* Pairing Not Supported */
262         MGMT_STATUS_FAILED,             /* Transaction Collision */
263         MGMT_STATUS_FAILED,             /* Reserved for future use */
264         MGMT_STATUS_INVALID_PARAMS,     /* Unacceptable Parameter */
265         MGMT_STATUS_REJECTED,           /* QoS Rejected */
266         MGMT_STATUS_NOT_SUPPORTED,      /* Classification Not Supported */
267         MGMT_STATUS_REJECTED,           /* Insufficient Security */
268         MGMT_STATUS_INVALID_PARAMS,     /* Parameter Out Of Range */
269         MGMT_STATUS_FAILED,             /* Reserved for future use */
270         MGMT_STATUS_BUSY,               /* Role Switch Pending */
271         MGMT_STATUS_FAILED,             /* Reserved for future use */
272         MGMT_STATUS_FAILED,             /* Slot Violation */
273         MGMT_STATUS_FAILED,             /* Role Switch Failed */
274         MGMT_STATUS_INVALID_PARAMS,     /* EIR Too Large */
275         MGMT_STATUS_NOT_SUPPORTED,      /* Simple Pairing Not Supported */
276         MGMT_STATUS_BUSY,               /* Host Busy Pairing */
277         MGMT_STATUS_REJECTED,           /* Rejected, No Suitable Channel */
278         MGMT_STATUS_BUSY,               /* Controller Busy */
279         MGMT_STATUS_INVALID_PARAMS,     /* Unsuitable Connection Interval */
280         MGMT_STATUS_TIMEOUT,            /* Directed Advertising Timeout */
281         MGMT_STATUS_AUTH_FAILED,        /* Terminated Due to MIC Failure */
282         MGMT_STATUS_CONNECT_FAILED,     /* Connection Establishment Failed */
283         MGMT_STATUS_CONNECT_FAILED,     /* MAC Connection Failed */
284 };
285
286 static u8 mgmt_errno_status(int err)
287 {
288         switch (err) {
289         case 0:
290                 return MGMT_STATUS_SUCCESS;
291         case -EPERM:
292                 return MGMT_STATUS_REJECTED;
293         case -EINVAL:
294                 return MGMT_STATUS_INVALID_PARAMS;
295         case -EOPNOTSUPP:
296                 return MGMT_STATUS_NOT_SUPPORTED;
297         case -EBUSY:
298                 return MGMT_STATUS_BUSY;
299         case -ETIMEDOUT:
300                 return MGMT_STATUS_AUTH_FAILED;
301         case -ENOMEM:
302                 return MGMT_STATUS_NO_RESOURCES;
303         case -EISCONN:
304                 return MGMT_STATUS_ALREADY_CONNECTED;
305         case -ENOTCONN:
306                 return MGMT_STATUS_DISCONNECTED;
307         }
308
309         return MGMT_STATUS_FAILED;
310 }
311
312 static u8 mgmt_status(int err)
313 {
314         if (err < 0)
315                 return mgmt_errno_status(err);
316
317         if (err < ARRAY_SIZE(mgmt_status_table))
318                 return mgmt_status_table[err];
319
320         return MGMT_STATUS_FAILED;
321 }
322
323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324                             u16 len, int flag)
325 {
326         return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327                                flag, NULL);
328 }
329
330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331                               u16 len, int flag, struct sock *skip_sk)
332 {
333         return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334                                flag, skip_sk);
335 }
336
337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338                       struct sock *skip_sk)
339 {
340         return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341                                HCI_SOCK_TRUSTED, skip_sk);
342 }
343
344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346         return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347                                    skip_sk);
348 }
349
350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352         if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353                 return ADDR_LE_DEV_PUBLIC;
354         else
355                 return ADDR_LE_DEV_RANDOM;
356 }
357
358 void mgmt_fill_version_info(void *ver)
359 {
360         struct mgmt_rp_read_version *rp = ver;
361
362         rp->version = MGMT_VERSION;
363         rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365
366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367                         u16 data_len)
368 {
369         struct mgmt_rp_read_version rp;
370
371         bt_dev_dbg(hdev, "sock %p", sk);
372
373         mgmt_fill_version_info(&rp);
374
375         return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376                                  &rp, sizeof(rp));
377 }
378
379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380                          u16 data_len)
381 {
382         struct mgmt_rp_read_commands *rp;
383         u16 num_commands, num_events;
384         size_t rp_size;
385         int i, err;
386
387         bt_dev_dbg(hdev, "sock %p", sk);
388
389         if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390                 num_commands = ARRAY_SIZE(mgmt_commands);
391                 num_events = ARRAY_SIZE(mgmt_events);
392         } else {
393                 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394                 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395         }
396
397         rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399         rp = kmalloc(rp_size, GFP_KERNEL);
400         if (!rp)
401                 return -ENOMEM;
402
403         rp->num_commands = cpu_to_le16(num_commands);
404         rp->num_events = cpu_to_le16(num_events);
405
406         if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407                 __le16 *opcode = rp->opcodes;
408
409                 for (i = 0; i < num_commands; i++, opcode++)
410                         put_unaligned_le16(mgmt_commands[i], opcode);
411
412                 for (i = 0; i < num_events; i++, opcode++)
413                         put_unaligned_le16(mgmt_events[i], opcode);
414         } else {
415                 __le16 *opcode = rp->opcodes;
416
417                 for (i = 0; i < num_commands; i++, opcode++)
418                         put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420                 for (i = 0; i < num_events; i++, opcode++)
421                         put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422         }
423
424         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425                                 rp, rp_size);
426         kfree(rp);
427
428         return err;
429 }
430
431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432                            u16 data_len)
433 {
434         struct mgmt_rp_read_index_list *rp;
435         struct hci_dev *d;
436         size_t rp_len;
437         u16 count;
438         int err;
439
440         bt_dev_dbg(hdev, "sock %p", sk);
441
442         read_lock(&hci_dev_list_lock);
443
444         count = 0;
445         list_for_each_entry(d, &hci_dev_list, list) {
446                 if (d->dev_type == HCI_PRIMARY &&
447                     !hci_dev_test_flag(d, HCI_UNCONFIGURED))
448                         count++;
449         }
450
451         rp_len = sizeof(*rp) + (2 * count);
452         rp = kmalloc(rp_len, GFP_ATOMIC);
453         if (!rp) {
454                 read_unlock(&hci_dev_list_lock);
455                 return -ENOMEM;
456         }
457
458         count = 0;
459         list_for_each_entry(d, &hci_dev_list, list) {
460                 if (hci_dev_test_flag(d, HCI_SETUP) ||
461                     hci_dev_test_flag(d, HCI_CONFIG) ||
462                     hci_dev_test_flag(d, HCI_USER_CHANNEL))
463                         continue;
464
465                 /* Devices marked as raw-only are neither configured
466                  * nor unconfigured controllers.
467                  */
468                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
469                         continue;
470
471                 if (d->dev_type == HCI_PRIMARY &&
472                     !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
473                         rp->index[count++] = cpu_to_le16(d->id);
474                         bt_dev_dbg(hdev, "Added hci%u", d->id);
475                 }
476         }
477
478         rp->num_controllers = cpu_to_le16(count);
479         rp_len = sizeof(*rp) + (2 * count);
480
481         read_unlock(&hci_dev_list_lock);
482
483         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
484                                 0, rp, rp_len);
485
486         kfree(rp);
487
488         return err;
489 }
490
491 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
492                                   void *data, u16 data_len)
493 {
494         struct mgmt_rp_read_unconf_index_list *rp;
495         struct hci_dev *d;
496         size_t rp_len;
497         u16 count;
498         int err;
499
500         bt_dev_dbg(hdev, "sock %p", sk);
501
502         read_lock(&hci_dev_list_lock);
503
504         count = 0;
505         list_for_each_entry(d, &hci_dev_list, list) {
506                 if (d->dev_type == HCI_PRIMARY &&
507                     hci_dev_test_flag(d, HCI_UNCONFIGURED))
508                         count++;
509         }
510
511         rp_len = sizeof(*rp) + (2 * count);
512         rp = kmalloc(rp_len, GFP_ATOMIC);
513         if (!rp) {
514                 read_unlock(&hci_dev_list_lock);
515                 return -ENOMEM;
516         }
517
518         count = 0;
519         list_for_each_entry(d, &hci_dev_list, list) {
520                 if (hci_dev_test_flag(d, HCI_SETUP) ||
521                     hci_dev_test_flag(d, HCI_CONFIG) ||
522                     hci_dev_test_flag(d, HCI_USER_CHANNEL))
523                         continue;
524
525                 /* Devices marked as raw-only are neither configured
526                  * nor unconfigured controllers.
527                  */
528                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
529                         continue;
530
531                 if (d->dev_type == HCI_PRIMARY &&
532                     hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
533                         rp->index[count++] = cpu_to_le16(d->id);
534                         bt_dev_dbg(hdev, "Added hci%u", d->id);
535                 }
536         }
537
538         rp->num_controllers = cpu_to_le16(count);
539         rp_len = sizeof(*rp) + (2 * count);
540
541         read_unlock(&hci_dev_list_lock);
542
543         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
544                                 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
545
546         kfree(rp);
547
548         return err;
549 }
550
551 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
552                                void *data, u16 data_len)
553 {
554         struct mgmt_rp_read_ext_index_list *rp;
555         struct hci_dev *d;
556         u16 count;
557         int err;
558
559         bt_dev_dbg(hdev, "sock %p", sk);
560
561         read_lock(&hci_dev_list_lock);
562
563         count = 0;
564         list_for_each_entry(d, &hci_dev_list, list) {
565                 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
566                         count++;
567         }
568
569         rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
570         if (!rp) {
571                 read_unlock(&hci_dev_list_lock);
572                 return -ENOMEM;
573         }
574
575         count = 0;
576         list_for_each_entry(d, &hci_dev_list, list) {
577                 if (hci_dev_test_flag(d, HCI_SETUP) ||
578                     hci_dev_test_flag(d, HCI_CONFIG) ||
579                     hci_dev_test_flag(d, HCI_USER_CHANNEL))
580                         continue;
581
582                 /* Devices marked as raw-only are neither configured
583                  * nor unconfigured controllers.
584                  */
585                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
586                         continue;
587
588                 if (d->dev_type == HCI_PRIMARY) {
589                         if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
590                                 rp->entry[count].type = 0x01;
591                         else
592                                 rp->entry[count].type = 0x00;
593                 } else if (d->dev_type == HCI_AMP) {
594                         rp->entry[count].type = 0x02;
595                 } else {
596                         continue;
597                 }
598
599                 rp->entry[count].bus = d->bus;
600                 rp->entry[count++].index = cpu_to_le16(d->id);
601                 bt_dev_dbg(hdev, "Added hci%u", d->id);
602         }
603
604         rp->num_controllers = cpu_to_le16(count);
605
606         read_unlock(&hci_dev_list_lock);
607
608         /* If this command is called at least once, then all the
609          * default index and unconfigured index events are disabled
610          * and from now on only extended index events are used.
611          */
612         hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
613         hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
614         hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
615
616         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
617                                 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
618                                 struct_size(rp, entry, count));
619
620         kfree(rp);
621
622         return err;
623 }
624
625 static bool is_configured(struct hci_dev *hdev)
626 {
627         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
628             !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
629                 return false;
630
631         if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
632              test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
633             !bacmp(&hdev->public_addr, BDADDR_ANY))
634                 return false;
635
636         return true;
637 }
638
639 static __le32 get_missing_options(struct hci_dev *hdev)
640 {
641         u32 options = 0;
642
643         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
644             !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
645                 options |= MGMT_OPTION_EXTERNAL_CONFIG;
646
647         if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
648              test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
649             !bacmp(&hdev->public_addr, BDADDR_ANY))
650                 options |= MGMT_OPTION_PUBLIC_ADDRESS;
651
652         return cpu_to_le32(options);
653 }
654
655 static int new_options(struct hci_dev *hdev, struct sock *skip)
656 {
657         __le32 options = get_missing_options(hdev);
658
659         return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
660                                   sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
661 }
662
663 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
664 {
665         __le32 options = get_missing_options(hdev);
666
667         return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
668                                  sizeof(options));
669 }
670
671 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
672                             void *data, u16 data_len)
673 {
674         struct mgmt_rp_read_config_info rp;
675         u32 options = 0;
676
677         bt_dev_dbg(hdev, "sock %p", sk);
678
679         hci_dev_lock(hdev);
680
681         memset(&rp, 0, sizeof(rp));
682         rp.manufacturer = cpu_to_le16(hdev->manufacturer);
683
684         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
685                 options |= MGMT_OPTION_EXTERNAL_CONFIG;
686
687         if (hdev->set_bdaddr)
688                 options |= MGMT_OPTION_PUBLIC_ADDRESS;
689
690         rp.supported_options = cpu_to_le32(options);
691         rp.missing_options = get_missing_options(hdev);
692
693         hci_dev_unlock(hdev);
694
695         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
696                                  &rp, sizeof(rp));
697 }
698
699 static u32 get_supported_phys(struct hci_dev *hdev)
700 {
701         u32 supported_phys = 0;
702
703         if (lmp_bredr_capable(hdev)) {
704                 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
705
706                 if (hdev->features[0][0] & LMP_3SLOT)
707                         supported_phys |= MGMT_PHY_BR_1M_3SLOT;
708
709                 if (hdev->features[0][0] & LMP_5SLOT)
710                         supported_phys |= MGMT_PHY_BR_1M_5SLOT;
711
712                 if (lmp_edr_2m_capable(hdev)) {
713                         supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
714
715                         if (lmp_edr_3slot_capable(hdev))
716                                 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
717
718                         if (lmp_edr_5slot_capable(hdev))
719                                 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
720
721                         if (lmp_edr_3m_capable(hdev)) {
722                                 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
723
724                                 if (lmp_edr_3slot_capable(hdev))
725                                         supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
726
727                                 if (lmp_edr_5slot_capable(hdev))
728                                         supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
729                         }
730                 }
731         }
732
733         if (lmp_le_capable(hdev)) {
734                 supported_phys |= MGMT_PHY_LE_1M_TX;
735                 supported_phys |= MGMT_PHY_LE_1M_RX;
736
737                 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
738                         supported_phys |= MGMT_PHY_LE_2M_TX;
739                         supported_phys |= MGMT_PHY_LE_2M_RX;
740                 }
741
742                 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
743                         supported_phys |= MGMT_PHY_LE_CODED_TX;
744                         supported_phys |= MGMT_PHY_LE_CODED_RX;
745                 }
746         }
747
748         return supported_phys;
749 }
750
751 static u32 get_selected_phys(struct hci_dev *hdev)
752 {
753         u32 selected_phys = 0;
754
755         if (lmp_bredr_capable(hdev)) {
756                 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
757
758                 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
759                         selected_phys |= MGMT_PHY_BR_1M_3SLOT;
760
761                 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
762                         selected_phys |= MGMT_PHY_BR_1M_5SLOT;
763
764                 if (lmp_edr_2m_capable(hdev)) {
765                         if (!(hdev->pkt_type & HCI_2DH1))
766                                 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
767
768                         if (lmp_edr_3slot_capable(hdev) &&
769                             !(hdev->pkt_type & HCI_2DH3))
770                                 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
771
772                         if (lmp_edr_5slot_capable(hdev) &&
773                             !(hdev->pkt_type & HCI_2DH5))
774                                 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
775
776                         if (lmp_edr_3m_capable(hdev)) {
777                                 if (!(hdev->pkt_type & HCI_3DH1))
778                                         selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
779
780                                 if (lmp_edr_3slot_capable(hdev) &&
781                                     !(hdev->pkt_type & HCI_3DH3))
782                                         selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
783
784                                 if (lmp_edr_5slot_capable(hdev) &&
785                                     !(hdev->pkt_type & HCI_3DH5))
786                                         selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
787                         }
788                 }
789         }
790
791         if (lmp_le_capable(hdev)) {
792                 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
793                         selected_phys |= MGMT_PHY_LE_1M_TX;
794
795                 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
796                         selected_phys |= MGMT_PHY_LE_1M_RX;
797
798                 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
799                         selected_phys |= MGMT_PHY_LE_2M_TX;
800
801                 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
802                         selected_phys |= MGMT_PHY_LE_2M_RX;
803
804                 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
805                         selected_phys |= MGMT_PHY_LE_CODED_TX;
806
807                 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
808                         selected_phys |= MGMT_PHY_LE_CODED_RX;
809         }
810
811         return selected_phys;
812 }
813
814 static u32 get_configurable_phys(struct hci_dev *hdev)
815 {
816         return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
817                 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
818 }
819
820 static u32 get_supported_settings(struct hci_dev *hdev)
821 {
822         u32 settings = 0;
823
824         settings |= MGMT_SETTING_POWERED;
825         settings |= MGMT_SETTING_BONDABLE;
826         settings |= MGMT_SETTING_DEBUG_KEYS;
827         settings |= MGMT_SETTING_CONNECTABLE;
828         settings |= MGMT_SETTING_DISCOVERABLE;
829
830         if (lmp_bredr_capable(hdev)) {
831                 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
832                         settings |= MGMT_SETTING_FAST_CONNECTABLE;
833                 settings |= MGMT_SETTING_BREDR;
834                 settings |= MGMT_SETTING_LINK_SECURITY;
835
836                 if (lmp_ssp_capable(hdev)) {
837                         settings |= MGMT_SETTING_SSP;
838                         if (IS_ENABLED(CONFIG_BT_HS))
839                                 settings |= MGMT_SETTING_HS;
840                 }
841
842                 if (lmp_sc_capable(hdev))
843                         settings |= MGMT_SETTING_SECURE_CONN;
844
845                 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
846                              &hdev->quirks))
847                         settings |= MGMT_SETTING_WIDEBAND_SPEECH;
848         }
849
850         if (lmp_le_capable(hdev)) {
851                 settings |= MGMT_SETTING_LE;
852                 settings |= MGMT_SETTING_SECURE_CONN;
853                 settings |= MGMT_SETTING_PRIVACY;
854                 settings |= MGMT_SETTING_STATIC_ADDRESS;
855                 settings |= MGMT_SETTING_ADVERTISING;
856         }
857
858         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
859             hdev->set_bdaddr)
860                 settings |= MGMT_SETTING_CONFIGURATION;
861
862         settings |= MGMT_SETTING_PHY_CONFIGURATION;
863
864         return settings;
865 }
866
867 static u32 get_current_settings(struct hci_dev *hdev)
868 {
869         u32 settings = 0;
870
871         if (hdev_is_powered(hdev))
872                 settings |= MGMT_SETTING_POWERED;
873
874         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
875                 settings |= MGMT_SETTING_CONNECTABLE;
876
877         if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
878                 settings |= MGMT_SETTING_FAST_CONNECTABLE;
879
880         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
881                 settings |= MGMT_SETTING_DISCOVERABLE;
882
883         if (hci_dev_test_flag(hdev, HCI_BONDABLE))
884                 settings |= MGMT_SETTING_BONDABLE;
885
886         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
887                 settings |= MGMT_SETTING_BREDR;
888
889         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
890                 settings |= MGMT_SETTING_LE;
891
892         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
893                 settings |= MGMT_SETTING_LINK_SECURITY;
894
895         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
896                 settings |= MGMT_SETTING_SSP;
897
898         if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
899                 settings |= MGMT_SETTING_HS;
900
901         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
902                 settings |= MGMT_SETTING_ADVERTISING;
903
904         if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
905                 settings |= MGMT_SETTING_SECURE_CONN;
906
907         if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
908                 settings |= MGMT_SETTING_DEBUG_KEYS;
909
910         if (hci_dev_test_flag(hdev, HCI_PRIVACY))
911                 settings |= MGMT_SETTING_PRIVACY;
912
913         /* The current setting for static address has two purposes. The
914          * first is to indicate if the static address will be used and
915          * the second is to indicate if it is actually set.
916          *
917          * This means if the static address is not configured, this flag
918          * will never be set. If the address is configured, then if the
919          * address is actually used decides if the flag is set or not.
920          *
921          * For single mode LE only controllers and dual-mode controllers
922          * with BR/EDR disabled, the existence of the static address will
923          * be evaluated.
924          */
925         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
926             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
927             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
928                 if (bacmp(&hdev->static_addr, BDADDR_ANY))
929                         settings |= MGMT_SETTING_STATIC_ADDRESS;
930         }
931
932         if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
933                 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
934
935         return settings;
936 }
937
938 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
939 {
940         return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
941 }
942
943 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
944 {
945         struct mgmt_pending_cmd *cmd;
946
947         /* If there's a pending mgmt command the flags will not yet have
948          * their final values, so check for this first.
949          */
950         cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
951         if (cmd) {
952                 struct mgmt_mode *cp = cmd->param;
953                 if (cp->val == 0x01)
954                         return LE_AD_GENERAL;
955                 else if (cp->val == 0x02)
956                         return LE_AD_LIMITED;
957         } else {
958                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
959                         return LE_AD_LIMITED;
960                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
961                         return LE_AD_GENERAL;
962         }
963
964         return 0;
965 }
966
967 bool mgmt_get_connectable(struct hci_dev *hdev)
968 {
969         struct mgmt_pending_cmd *cmd;
970
971         /* If there's a pending mgmt command the flag will not yet have
972          * it's final value, so check for this first.
973          */
974         cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
975         if (cmd) {
976                 struct mgmt_mode *cp = cmd->param;
977
978                 return cp->val;
979         }
980
981         return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
982 }
983
984 static int service_cache_sync(struct hci_dev *hdev, void *data)
985 {
986         hci_update_eir_sync(hdev);
987         hci_update_class_sync(hdev);
988
989         return 0;
990 }
991
992 static void service_cache_off(struct work_struct *work)
993 {
994         struct hci_dev *hdev = container_of(work, struct hci_dev,
995                                             service_cache.work);
996
997         if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
998                 return;
999
1000         hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1001 }
1002
1003 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1004 {
1005         /* The generation of a new RPA and programming it into the
1006          * controller happens in the hci_req_enable_advertising()
1007          * function.
1008          */
1009         if (ext_adv_capable(hdev))
1010                 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1011         else
1012                 return hci_enable_advertising_sync(hdev);
1013 }
1014
1015 static void rpa_expired(struct work_struct *work)
1016 {
1017         struct hci_dev *hdev = container_of(work, struct hci_dev,
1018                                             rpa_expired.work);
1019
1020         bt_dev_dbg(hdev, "");
1021
1022         hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1023
1024         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1025                 return;
1026
1027         hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1028 }
1029
1030 static void discov_off(struct work_struct *work)
1031 {
1032         struct hci_dev *hdev = container_of(work, struct hci_dev,
1033                                             discov_off.work);
1034
1035         bt_dev_dbg(hdev, "");
1036
1037         hci_dev_lock(hdev);
1038
1039         /* When discoverable timeout triggers, then just make sure
1040          * the limited discoverable flag is cleared. Even in the case
1041          * of a timeout triggered from general discoverable, it is
1042          * safe to unconditionally clear the flag.
1043          */
1044         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1045         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1046         hdev->discov_timeout = 0;
1047
1048         hci_update_discoverable(hdev);
1049
1050         mgmt_new_settings(hdev);
1051
1052         hci_dev_unlock(hdev);
1053 }
1054
1055 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1056
1057 static void mesh_send_complete(struct hci_dev *hdev,
1058                                struct mgmt_mesh_tx *mesh_tx, bool silent)
1059 {
1060         u8 handle = mesh_tx->handle;
1061
1062         if (!silent)
1063                 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1064                            sizeof(handle), NULL);
1065
1066         mgmt_mesh_remove(mesh_tx);
1067 }
1068
1069 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1070 {
1071         struct mgmt_mesh_tx *mesh_tx;
1072
1073         hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1074         hci_disable_advertising_sync(hdev);
1075         mesh_tx = mgmt_mesh_next(hdev, NULL);
1076
1077         if (mesh_tx)
1078                 mesh_send_complete(hdev, mesh_tx, false);
1079
1080         return 0;
1081 }
1082
1083 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1084 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1085 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1086 {
1087         struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1088
1089         if (!mesh_tx)
1090                 return;
1091
1092         err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1093                                  mesh_send_start_complete);
1094
1095         if (err < 0)
1096                 mesh_send_complete(hdev, mesh_tx, false);
1097         else
1098                 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1099 }
1100
1101 static void mesh_send_done(struct work_struct *work)
1102 {
1103         struct hci_dev *hdev = container_of(work, struct hci_dev,
1104                                             mesh_send_done.work);
1105
1106         if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1107                 return;
1108
1109         hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1110 }
1111
1112 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1113 {
1114         if (hci_dev_test_flag(hdev, HCI_MGMT))
1115                 return;
1116
1117         BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1118
1119         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1120         INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1121         INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1122         INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1123
1124         /* Non-mgmt controlled devices get this bit set
1125          * implicitly so that pairing works for them, however
1126          * for mgmt we require user-space to explicitly enable
1127          * it
1128          */
1129         hci_dev_clear_flag(hdev, HCI_BONDABLE);
1130
1131         hci_dev_set_flag(hdev, HCI_MGMT);
1132 }
1133
1134 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1135                                 void *data, u16 data_len)
1136 {
1137         struct mgmt_rp_read_info rp;
1138
1139         bt_dev_dbg(hdev, "sock %p", sk);
1140
1141         hci_dev_lock(hdev);
1142
1143         memset(&rp, 0, sizeof(rp));
1144
1145         bacpy(&rp.bdaddr, &hdev->bdaddr);
1146
1147         rp.version = hdev->hci_ver;
1148         rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1149
1150         rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1151         rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1152
1153         memcpy(rp.dev_class, hdev->dev_class, 3);
1154
1155         memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1156         memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1157
1158         hci_dev_unlock(hdev);
1159
1160         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1161                                  sizeof(rp));
1162 }
1163
1164 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1165 {
1166         u16 eir_len = 0;
1167         size_t name_len;
1168
1169         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1170                 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1171                                           hdev->dev_class, 3);
1172
1173         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1174                 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1175                                           hdev->appearance);
1176
1177         name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1178         eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1179                                   hdev->dev_name, name_len);
1180
1181         name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1182         eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1183                                   hdev->short_name, name_len);
1184
1185         return eir_len;
1186 }
1187
1188 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1189                                     void *data, u16 data_len)
1190 {
1191         char buf[512];
1192         struct mgmt_rp_read_ext_info *rp = (void *)buf;
1193         u16 eir_len;
1194
1195         bt_dev_dbg(hdev, "sock %p", sk);
1196
1197         memset(&buf, 0, sizeof(buf));
1198
1199         hci_dev_lock(hdev);
1200
1201         bacpy(&rp->bdaddr, &hdev->bdaddr);
1202
1203         rp->version = hdev->hci_ver;
1204         rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1205
1206         rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1207         rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1208
1209
1210         eir_len = append_eir_data_to_buf(hdev, rp->eir);
1211         rp->eir_len = cpu_to_le16(eir_len);
1212
1213         hci_dev_unlock(hdev);
1214
1215         /* If this command is called at least once, then the events
1216          * for class of device and local name changes are disabled
1217          * and only the new extended controller information event
1218          * is used.
1219          */
1220         hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1221         hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1222         hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1223
1224         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1225                                  sizeof(*rp) + eir_len);
1226 }
1227
1228 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1229 {
1230         char buf[512];
1231         struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1232         u16 eir_len;
1233
1234         memset(buf, 0, sizeof(buf));
1235
1236         eir_len = append_eir_data_to_buf(hdev, ev->eir);
1237         ev->eir_len = cpu_to_le16(eir_len);
1238
1239         return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1240                                   sizeof(*ev) + eir_len,
1241                                   HCI_MGMT_EXT_INFO_EVENTS, skip);
1242 }
1243
1244 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1245 {
1246         __le32 settings = cpu_to_le32(get_current_settings(hdev));
1247
1248         return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1249                                  sizeof(settings));
1250 }
1251
1252 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1253 {
1254         struct mgmt_ev_advertising_added ev;
1255
1256         ev.instance = instance;
1257
1258         mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1259 }
1260
1261 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1262                               u8 instance)
1263 {
1264         struct mgmt_ev_advertising_removed ev;
1265
1266         ev.instance = instance;
1267
1268         mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1269 }
1270
1271 static void cancel_adv_timeout(struct hci_dev *hdev)
1272 {
1273         if (hdev->adv_instance_timeout) {
1274                 hdev->adv_instance_timeout = 0;
1275                 cancel_delayed_work(&hdev->adv_instance_expire);
1276         }
1277 }
1278
1279 /* This function requires the caller holds hdev->lock */
1280 static void restart_le_actions(struct hci_dev *hdev)
1281 {
1282         struct hci_conn_params *p;
1283
1284         list_for_each_entry(p, &hdev->le_conn_params, list) {
1285                 /* Needed for AUTO_OFF case where might not "really"
1286                  * have been powered off.
1287                  */
1288                 list_del_init(&p->action);
1289
1290                 switch (p->auto_connect) {
1291                 case HCI_AUTO_CONN_DIRECT:
1292                 case HCI_AUTO_CONN_ALWAYS:
1293                         list_add(&p->action, &hdev->pend_le_conns);
1294                         break;
1295                 case HCI_AUTO_CONN_REPORT:
1296                         list_add(&p->action, &hdev->pend_le_reports);
1297                         break;
1298                 default:
1299                         break;
1300                 }
1301         }
1302 }
1303
1304 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1305 {
1306         __le32 ev = cpu_to_le32(get_current_settings(hdev));
1307
1308         return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1309                                   sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1310 }
1311
1312 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1313 {
1314         struct mgmt_pending_cmd *cmd = data;
1315         struct mgmt_mode *cp;
1316
1317         /* Make sure cmd still outstanding. */
1318         if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1319                 return;
1320
1321         cp = cmd->param;
1322
1323         bt_dev_dbg(hdev, "err %d", err);
1324
1325         if (!err) {
1326                 if (cp->val) {
1327                         hci_dev_lock(hdev);
1328                         restart_le_actions(hdev);
1329                         hci_update_passive_scan(hdev);
1330                         hci_dev_unlock(hdev);
1331                 }
1332
1333                 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1334
1335                 /* Only call new_setting for power on as power off is deferred
1336                  * to hdev->power_off work which does call hci_dev_do_close.
1337                  */
1338                 if (cp->val)
1339                         new_settings(hdev, cmd->sk);
1340         } else {
1341                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1342                                 mgmt_status(err));
1343         }
1344
1345         mgmt_pending_remove(cmd);
1346 }
1347
1348 static int set_powered_sync(struct hci_dev *hdev, void *data)
1349 {
1350         struct mgmt_pending_cmd *cmd = data;
1351         struct mgmt_mode *cp = cmd->param;
1352
1353         BT_DBG("%s", hdev->name);
1354
1355         return hci_set_powered_sync(hdev, cp->val);
1356 }
1357
1358 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1359                        u16 len)
1360 {
1361         struct mgmt_mode *cp = data;
1362         struct mgmt_pending_cmd *cmd;
1363         int err;
1364
1365         bt_dev_dbg(hdev, "sock %p", sk);
1366
1367         if (cp->val != 0x00 && cp->val != 0x01)
1368                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1369                                        MGMT_STATUS_INVALID_PARAMS);
1370
1371         hci_dev_lock(hdev);
1372
1373         if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1374                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1375                                       MGMT_STATUS_BUSY);
1376                 goto failed;
1377         }
1378
1379         if (!!cp->val == hdev_is_powered(hdev)) {
1380                 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1381                 goto failed;
1382         }
1383
1384         cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1385         if (!cmd) {
1386                 err = -ENOMEM;
1387                 goto failed;
1388         }
1389
1390         err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1391                                  mgmt_set_powered_complete);
1392
1393         if (err < 0)
1394                 mgmt_pending_remove(cmd);
1395
1396 failed:
1397         hci_dev_unlock(hdev);
1398         return err;
1399 }
1400
1401 int mgmt_new_settings(struct hci_dev *hdev)
1402 {
1403         return new_settings(hdev, NULL);
1404 }
1405
1406 struct cmd_lookup {
1407         struct sock *sk;
1408         struct hci_dev *hdev;
1409         u8 mgmt_status;
1410 };
1411
1412 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1413 {
1414         struct cmd_lookup *match = data;
1415
1416         send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1417
1418         list_del(&cmd->list);
1419
1420         if (match->sk == NULL) {
1421                 match->sk = cmd->sk;
1422                 sock_hold(match->sk);
1423         }
1424
1425         mgmt_pending_free(cmd);
1426 }
1427
1428 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1429 {
1430         u8 *status = data;
1431
1432         mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1433         mgmt_pending_remove(cmd);
1434 }
1435
1436 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1437 {
1438         if (cmd->cmd_complete) {
1439                 u8 *status = data;
1440
1441                 cmd->cmd_complete(cmd, *status);
1442                 mgmt_pending_remove(cmd);
1443
1444                 return;
1445         }
1446
1447         cmd_status_rsp(cmd, data);
1448 }
1449
1450 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1451 {
1452         return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1453                                  cmd->param, cmd->param_len);
1454 }
1455
1456 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1457 {
1458         return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1459                                  cmd->param, sizeof(struct mgmt_addr_info));
1460 }
1461
1462 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1463 {
1464         if (!lmp_bredr_capable(hdev))
1465                 return MGMT_STATUS_NOT_SUPPORTED;
1466         else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1467                 return MGMT_STATUS_REJECTED;
1468         else
1469                 return MGMT_STATUS_SUCCESS;
1470 }
1471
1472 static u8 mgmt_le_support(struct hci_dev *hdev)
1473 {
1474         if (!lmp_le_capable(hdev))
1475                 return MGMT_STATUS_NOT_SUPPORTED;
1476         else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1477                 return MGMT_STATUS_REJECTED;
1478         else
1479                 return MGMT_STATUS_SUCCESS;
1480 }
1481
1482 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1483                                            int err)
1484 {
1485         struct mgmt_pending_cmd *cmd = data;
1486
1487         bt_dev_dbg(hdev, "err %d", err);
1488
1489         /* Make sure cmd still outstanding. */
1490         if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1491                 return;
1492
1493         hci_dev_lock(hdev);
1494
1495         if (err) {
1496                 u8 mgmt_err = mgmt_status(err);
1497                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1498                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1499                 goto done;
1500         }
1501
1502         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1503             hdev->discov_timeout > 0) {
1504                 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1505                 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1506         }
1507
1508         send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1509         new_settings(hdev, cmd->sk);
1510
1511 done:
1512         mgmt_pending_remove(cmd);
1513         hci_dev_unlock(hdev);
1514 }
1515
1516 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1517 {
1518         BT_DBG("%s", hdev->name);
1519
1520         return hci_update_discoverable_sync(hdev);
1521 }
1522
1523 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1524                             u16 len)
1525 {
1526         struct mgmt_cp_set_discoverable *cp = data;
1527         struct mgmt_pending_cmd *cmd;
1528         u16 timeout;
1529         int err;
1530
1531         bt_dev_dbg(hdev, "sock %p", sk);
1532
1533         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1534             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1535                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1536                                        MGMT_STATUS_REJECTED);
1537
1538         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1539                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1540                                        MGMT_STATUS_INVALID_PARAMS);
1541
1542         timeout = __le16_to_cpu(cp->timeout);
1543
1544         /* Disabling discoverable requires that no timeout is set,
1545          * and enabling limited discoverable requires a timeout.
1546          */
1547         if ((cp->val == 0x00 && timeout > 0) ||
1548             (cp->val == 0x02 && timeout == 0))
1549                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1550                                        MGMT_STATUS_INVALID_PARAMS);
1551
1552         hci_dev_lock(hdev);
1553
1554         if (!hdev_is_powered(hdev) && timeout > 0) {
1555                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1556                                       MGMT_STATUS_NOT_POWERED);
1557                 goto failed;
1558         }
1559
1560         if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1561             pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1562                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1563                                       MGMT_STATUS_BUSY);
1564                 goto failed;
1565         }
1566
1567         if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1568                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1569                                       MGMT_STATUS_REJECTED);
1570                 goto failed;
1571         }
1572
1573         if (hdev->advertising_paused) {
1574                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1575                                       MGMT_STATUS_BUSY);
1576                 goto failed;
1577         }
1578
1579         if (!hdev_is_powered(hdev)) {
1580                 bool changed = false;
1581
1582                 /* Setting limited discoverable when powered off is
1583                  * not a valid operation since it requires a timeout
1584                  * and so no need to check HCI_LIMITED_DISCOVERABLE.
1585                  */
1586                 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1587                         hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1588                         changed = true;
1589                 }
1590
1591                 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1592                 if (err < 0)
1593                         goto failed;
1594
1595                 if (changed)
1596                         err = new_settings(hdev, sk);
1597
1598                 goto failed;
1599         }
1600
1601         /* If the current mode is the same, then just update the timeout
1602          * value with the new value. And if only the timeout gets updated,
1603          * then no need for any HCI transactions.
1604          */
1605         if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1606             (cp->val == 0x02) == hci_dev_test_flag(hdev,
1607                                                    HCI_LIMITED_DISCOVERABLE)) {
1608                 cancel_delayed_work(&hdev->discov_off);
1609                 hdev->discov_timeout = timeout;
1610
1611                 if (cp->val && hdev->discov_timeout > 0) {
1612                         int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1613                         queue_delayed_work(hdev->req_workqueue,
1614                                            &hdev->discov_off, to);
1615                 }
1616
1617                 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1618                 goto failed;
1619         }
1620
1621         cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1622         if (!cmd) {
1623                 err = -ENOMEM;
1624                 goto failed;
1625         }
1626
1627         /* Cancel any potential discoverable timeout that might be
1628          * still active and store new timeout value. The arming of
1629          * the timeout happens in the complete handler.
1630          */
1631         cancel_delayed_work(&hdev->discov_off);
1632         hdev->discov_timeout = timeout;
1633
1634         if (cp->val)
1635                 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1636         else
1637                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1638
1639         /* Limited discoverable mode */
1640         if (cp->val == 0x02)
1641                 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1642         else
1643                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1644
1645         err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1646                                  mgmt_set_discoverable_complete);
1647
1648         if (err < 0)
1649                 mgmt_pending_remove(cmd);
1650
1651 failed:
1652         hci_dev_unlock(hdev);
1653         return err;
1654 }
1655
1656 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1657                                           int err)
1658 {
1659         struct mgmt_pending_cmd *cmd = data;
1660
1661         bt_dev_dbg(hdev, "err %d", err);
1662
1663         /* Make sure cmd still outstanding. */
1664         if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1665                 return;
1666
1667         hci_dev_lock(hdev);
1668
1669         if (err) {
1670                 u8 mgmt_err = mgmt_status(err);
1671                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1672                 goto done;
1673         }
1674
1675         send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1676         new_settings(hdev, cmd->sk);
1677
1678 done:
1679         if (cmd)
1680                 mgmt_pending_remove(cmd);
1681
1682         hci_dev_unlock(hdev);
1683 }
1684
1685 static int set_connectable_update_settings(struct hci_dev *hdev,
1686                                            struct sock *sk, u8 val)
1687 {
1688         bool changed = false;
1689         int err;
1690
1691         if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1692                 changed = true;
1693
1694         if (val) {
1695                 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1696         } else {
1697                 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1698                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1699         }
1700
1701         err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1702         if (err < 0)
1703                 return err;
1704
1705         if (changed) {
1706                 hci_update_scan(hdev);
1707                 hci_update_passive_scan(hdev);
1708                 return new_settings(hdev, sk);
1709         }
1710
1711         return 0;
1712 }
1713
1714 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1715 {
1716         BT_DBG("%s", hdev->name);
1717
1718         return hci_update_connectable_sync(hdev);
1719 }
1720
1721 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1722                            u16 len)
1723 {
1724         struct mgmt_mode *cp = data;
1725         struct mgmt_pending_cmd *cmd;
1726         int err;
1727
1728         bt_dev_dbg(hdev, "sock %p", sk);
1729
1730         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1731             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1732                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1733                                        MGMT_STATUS_REJECTED);
1734
1735         if (cp->val != 0x00 && cp->val != 0x01)
1736                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1737                                        MGMT_STATUS_INVALID_PARAMS);
1738
1739         hci_dev_lock(hdev);
1740
1741         if (!hdev_is_powered(hdev)) {
1742                 err = set_connectable_update_settings(hdev, sk, cp->val);
1743                 goto failed;
1744         }
1745
1746         if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1747             pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1748                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1749                                       MGMT_STATUS_BUSY);
1750                 goto failed;
1751         }
1752
1753         cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1754         if (!cmd) {
1755                 err = -ENOMEM;
1756                 goto failed;
1757         }
1758
1759         if (cp->val) {
1760                 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1761         } else {
1762                 if (hdev->discov_timeout > 0)
1763                         cancel_delayed_work(&hdev->discov_off);
1764
1765                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1766                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1767                 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1768         }
1769
1770         err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1771                                  mgmt_set_connectable_complete);
1772
1773         if (err < 0)
1774                 mgmt_pending_remove(cmd);
1775
1776 failed:
1777         hci_dev_unlock(hdev);
1778         return err;
1779 }
1780
1781 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1782                         u16 len)
1783 {
1784         struct mgmt_mode *cp = data;
1785         bool changed;
1786         int err;
1787
1788         bt_dev_dbg(hdev, "sock %p", sk);
1789
1790         if (cp->val != 0x00 && cp->val != 0x01)
1791                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1792                                        MGMT_STATUS_INVALID_PARAMS);
1793
1794         hci_dev_lock(hdev);
1795
1796         if (cp->val)
1797                 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1798         else
1799                 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1800
1801         err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1802         if (err < 0)
1803                 goto unlock;
1804
1805         if (changed) {
1806                 /* In limited privacy mode the change of bondable mode
1807                  * may affect the local advertising address.
1808                  */
1809                 hci_update_discoverable(hdev);
1810
1811                 err = new_settings(hdev, sk);
1812         }
1813
1814 unlock:
1815         hci_dev_unlock(hdev);
1816         return err;
1817 }
1818
1819 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1820                              u16 len)
1821 {
1822         struct mgmt_mode *cp = data;
1823         struct mgmt_pending_cmd *cmd;
1824         u8 val, status;
1825         int err;
1826
1827         bt_dev_dbg(hdev, "sock %p", sk);
1828
1829         status = mgmt_bredr_support(hdev);
1830         if (status)
1831                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1832                                        status);
1833
1834         if (cp->val != 0x00 && cp->val != 0x01)
1835                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1836                                        MGMT_STATUS_INVALID_PARAMS);
1837
1838         hci_dev_lock(hdev);
1839
1840         if (!hdev_is_powered(hdev)) {
1841                 bool changed = false;
1842
1843                 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1844                         hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1845                         changed = true;
1846                 }
1847
1848                 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1849                 if (err < 0)
1850                         goto failed;
1851
1852                 if (changed)
1853                         err = new_settings(hdev, sk);
1854
1855                 goto failed;
1856         }
1857
1858         if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1859                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1860                                       MGMT_STATUS_BUSY);
1861                 goto failed;
1862         }
1863
1864         val = !!cp->val;
1865
1866         if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1867                 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1868                 goto failed;
1869         }
1870
1871         cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1872         if (!cmd) {
1873                 err = -ENOMEM;
1874                 goto failed;
1875         }
1876
1877         err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1878         if (err < 0) {
1879                 mgmt_pending_remove(cmd);
1880                 goto failed;
1881         }
1882
1883 failed:
1884         hci_dev_unlock(hdev);
1885         return err;
1886 }
1887
1888 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1889 {
1890         struct cmd_lookup match = { NULL, hdev };
1891         struct mgmt_pending_cmd *cmd = data;
1892         struct mgmt_mode *cp = cmd->param;
1893         u8 enable = cp->val;
1894         bool changed;
1895
1896         /* Make sure cmd still outstanding. */
1897         if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1898                 return;
1899
1900         if (err) {
1901                 u8 mgmt_err = mgmt_status(err);
1902
1903                 if (enable && hci_dev_test_and_clear_flag(hdev,
1904                                                           HCI_SSP_ENABLED)) {
1905                         hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1906                         new_settings(hdev, NULL);
1907                 }
1908
1909                 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1910                                      &mgmt_err);
1911                 return;
1912         }
1913
1914         if (enable) {
1915                 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1916         } else {
1917                 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1918
1919                 if (!changed)
1920                         changed = hci_dev_test_and_clear_flag(hdev,
1921                                                               HCI_HS_ENABLED);
1922                 else
1923                         hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1924         }
1925
1926         mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1927
1928         if (changed)
1929                 new_settings(hdev, match.sk);
1930
1931         if (match.sk)
1932                 sock_put(match.sk);
1933
1934         hci_update_eir_sync(hdev);
1935 }
1936
1937 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1938 {
1939         struct mgmt_pending_cmd *cmd = data;
1940         struct mgmt_mode *cp = cmd->param;
1941         bool changed = false;
1942         int err;
1943
1944         if (cp->val)
1945                 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1946
1947         err = hci_write_ssp_mode_sync(hdev, cp->val);
1948
1949         if (!err && changed)
1950                 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1951
1952         return err;
1953 }
1954
1955 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1956 {
1957         struct mgmt_mode *cp = data;
1958         struct mgmt_pending_cmd *cmd;
1959         u8 status;
1960         int err;
1961
1962         bt_dev_dbg(hdev, "sock %p", sk);
1963
1964         status = mgmt_bredr_support(hdev);
1965         if (status)
1966                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1967
1968         if (!lmp_ssp_capable(hdev))
1969                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1970                                        MGMT_STATUS_NOT_SUPPORTED);
1971
1972         if (cp->val != 0x00 && cp->val != 0x01)
1973                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1974                                        MGMT_STATUS_INVALID_PARAMS);
1975
1976         hci_dev_lock(hdev);
1977
1978         if (!hdev_is_powered(hdev)) {
1979                 bool changed;
1980
1981                 if (cp->val) {
1982                         changed = !hci_dev_test_and_set_flag(hdev,
1983                                                              HCI_SSP_ENABLED);
1984                 } else {
1985                         changed = hci_dev_test_and_clear_flag(hdev,
1986                                                               HCI_SSP_ENABLED);
1987                         if (!changed)
1988                                 changed = hci_dev_test_and_clear_flag(hdev,
1989                                                                       HCI_HS_ENABLED);
1990                         else
1991                                 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1992                 }
1993
1994                 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1995                 if (err < 0)
1996                         goto failed;
1997
1998                 if (changed)
1999                         err = new_settings(hdev, sk);
2000
2001                 goto failed;
2002         }
2003
2004         if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2005                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2006                                       MGMT_STATUS_BUSY);
2007                 goto failed;
2008         }
2009
2010         if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2011                 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2012                 goto failed;
2013         }
2014
2015         cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2016         if (!cmd)
2017                 err = -ENOMEM;
2018         else
2019                 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2020                                          set_ssp_complete);
2021
2022         if (err < 0) {
2023                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2024                                       MGMT_STATUS_FAILED);
2025
2026                 if (cmd)
2027                         mgmt_pending_remove(cmd);
2028         }
2029
2030 failed:
2031         hci_dev_unlock(hdev);
2032         return err;
2033 }
2034
2035 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2036 {
2037         struct mgmt_mode *cp = data;
2038         bool changed;
2039         u8 status;
2040         int err;
2041
2042         bt_dev_dbg(hdev, "sock %p", sk);
2043
2044         if (!IS_ENABLED(CONFIG_BT_HS))
2045                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2046                                        MGMT_STATUS_NOT_SUPPORTED);
2047
2048         status = mgmt_bredr_support(hdev);
2049         if (status)
2050                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2051
2052         if (!lmp_ssp_capable(hdev))
2053                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2054                                        MGMT_STATUS_NOT_SUPPORTED);
2055
2056         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2057                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2058                                        MGMT_STATUS_REJECTED);
2059
2060         if (cp->val != 0x00 && cp->val != 0x01)
2061                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2062                                        MGMT_STATUS_INVALID_PARAMS);
2063
2064         hci_dev_lock(hdev);
2065
2066         if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2067                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2068                                       MGMT_STATUS_BUSY);
2069                 goto unlock;
2070         }
2071
2072         if (cp->val) {
2073                 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2074         } else {
2075                 if (hdev_is_powered(hdev)) {
2076                         err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2077                                               MGMT_STATUS_REJECTED);
2078                         goto unlock;
2079                 }
2080
2081                 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2082         }
2083
2084         err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2085         if (err < 0)
2086                 goto unlock;
2087
2088         if (changed)
2089                 err = new_settings(hdev, sk);
2090
2091 unlock:
2092         hci_dev_unlock(hdev);
2093         return err;
2094 }
2095
2096 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2097 {
2098         struct cmd_lookup match = { NULL, hdev };
2099         u8 status = mgmt_status(err);
2100
2101         bt_dev_dbg(hdev, "err %d", err);
2102
2103         if (status) {
2104                 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2105                                                         &status);
2106                 return;
2107         }
2108
2109         mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2110
2111         new_settings(hdev, match.sk);
2112
2113         if (match.sk)
2114                 sock_put(match.sk);
2115 }
2116
2117 static int set_le_sync(struct hci_dev *hdev, void *data)
2118 {
2119         struct mgmt_pending_cmd *cmd = data;
2120         struct mgmt_mode *cp = cmd->param;
2121         u8 val = !!cp->val;
2122         int err;
2123
2124         if (!val) {
2125                 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2126
2127                 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2128                         hci_disable_advertising_sync(hdev);
2129
2130                 if (ext_adv_capable(hdev))
2131                         hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2132         } else {
2133                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2134         }
2135
2136         err = hci_write_le_host_supported_sync(hdev, val, 0);
2137
2138         /* Make sure the controller has a good default for
2139          * advertising data. Restrict the update to when LE
2140          * has actually been enabled. During power on, the
2141          * update in powered_update_hci will take care of it.
2142          */
2143         if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2144                 if (ext_adv_capable(hdev)) {
2145                         int status;
2146
2147                         status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2148                         if (!status)
2149                                 hci_update_scan_rsp_data_sync(hdev, 0x00);
2150                 } else {
2151                         hci_update_adv_data_sync(hdev, 0x00);
2152                         hci_update_scan_rsp_data_sync(hdev, 0x00);
2153                 }
2154
2155                 hci_update_passive_scan(hdev);
2156         }
2157
2158         return err;
2159 }
2160
2161 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2162 {
2163         struct mgmt_pending_cmd *cmd = data;
2164         u8 status = mgmt_status(err);
2165         struct sock *sk = cmd->sk;
2166
2167         if (status) {
2168                 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2169                                      cmd_status_rsp, &status);
2170                 return;
2171         }
2172
2173         mgmt_pending_remove(cmd);
2174         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2175 }
2176
2177 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2178 {
2179         struct mgmt_pending_cmd *cmd = data;
2180         struct mgmt_cp_set_mesh *cp = cmd->param;
2181         size_t len = cmd->param_len;
2182
2183         memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2184
2185         if (cp->enable)
2186                 hci_dev_set_flag(hdev, HCI_MESH);
2187         else
2188                 hci_dev_clear_flag(hdev, HCI_MESH);
2189
2190         len -= sizeof(*cp);
2191
2192         /* If filters don't fit, forward all adv pkts */
2193         if (len <= sizeof(hdev->mesh_ad_types))
2194                 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2195
2196         hci_update_passive_scan_sync(hdev);
2197         return 0;
2198 }
2199
2200 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2201 {
2202         struct mgmt_cp_set_mesh *cp = data;
2203         struct mgmt_pending_cmd *cmd;
2204         int err = 0;
2205
2206         bt_dev_dbg(hdev, "sock %p", sk);
2207
2208         if (!lmp_le_capable(hdev) ||
2209             !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2210                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2211                                        MGMT_STATUS_NOT_SUPPORTED);
2212
2213         if (cp->enable != 0x00 && cp->enable != 0x01)
2214                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2215                                        MGMT_STATUS_INVALID_PARAMS);
2216
2217         hci_dev_lock(hdev);
2218
2219         cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2220         if (!cmd)
2221                 err = -ENOMEM;
2222         else
2223                 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2224                                          set_mesh_complete);
2225
2226         if (err < 0) {
2227                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2228                                       MGMT_STATUS_FAILED);
2229
2230                 if (cmd)
2231                         mgmt_pending_remove(cmd);
2232         }
2233
2234         hci_dev_unlock(hdev);
2235         return err;
2236 }
2237
2238 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2239 {
2240         struct mgmt_mesh_tx *mesh_tx = data;
2241         struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2242         unsigned long mesh_send_interval;
2243         u8 mgmt_err = mgmt_status(err);
2244
2245         /* Report any errors here, but don't report completion */
2246
2247         if (mgmt_err) {
2248                 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2249                 /* Send Complete Error Code for handle */
2250                 mesh_send_complete(hdev, mesh_tx, false);
2251                 return;
2252         }
2253
2254         mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2255         queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2256                            mesh_send_interval);
2257 }
2258
2259 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2260 {
2261         struct mgmt_mesh_tx *mesh_tx = data;
2262         struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2263         struct adv_info *adv, *next_instance;
2264         u8 instance = hdev->le_num_of_adv_sets + 1;
2265         u16 timeout, duration;
2266         int err = 0;
2267
2268         if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2269                 return MGMT_STATUS_BUSY;
2270
2271         timeout = 1000;
2272         duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2273         adv = hci_add_adv_instance(hdev, instance, 0,
2274                                    send->adv_data_len, send->adv_data,
2275                                    0, NULL,
2276                                    timeout, duration,
2277                                    HCI_ADV_TX_POWER_NO_PREFERENCE,
2278                                    hdev->le_adv_min_interval,
2279                                    hdev->le_adv_max_interval,
2280                                    mesh_tx->handle);
2281
2282         if (!IS_ERR(adv))
2283                 mesh_tx->instance = instance;
2284         else
2285                 err = PTR_ERR(adv);
2286
2287         if (hdev->cur_adv_instance == instance) {
2288                 /* If the currently advertised instance is being changed then
2289                  * cancel the current advertising and schedule the next
2290                  * instance. If there is only one instance then the overridden
2291                  * advertising data will be visible right away.
2292                  */
2293                 cancel_adv_timeout(hdev);
2294
2295                 next_instance = hci_get_next_instance(hdev, instance);
2296                 if (next_instance)
2297                         instance = next_instance->instance;
2298                 else
2299                         instance = 0;
2300         } else if (hdev->adv_instance_timeout) {
2301                 /* Immediately advertise the new instance if no other, or
2302                  * let it go naturally from queue if ADV is already happening
2303                  */
2304                 instance = 0;
2305         }
2306
2307         if (instance)
2308                 return hci_schedule_adv_instance_sync(hdev, instance, true);
2309
2310         return err;
2311 }
2312
2313 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2314 {
2315         struct mgmt_rp_mesh_read_features *rp = data;
2316
2317         if (rp->used_handles >= rp->max_handles)
2318                 return;
2319
2320         rp->handles[rp->used_handles++] = mesh_tx->handle;
2321 }
2322
2323 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2324                          void *data, u16 len)
2325 {
2326         struct mgmt_rp_mesh_read_features rp;
2327
2328         if (!lmp_le_capable(hdev) ||
2329             !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2330                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2331                                        MGMT_STATUS_NOT_SUPPORTED);
2332
2333         memset(&rp, 0, sizeof(rp));
2334         rp.index = cpu_to_le16(hdev->id);
2335         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2336                 rp.max_handles = MESH_HANDLES_MAX;
2337
2338         hci_dev_lock(hdev);
2339
2340         if (rp.max_handles)
2341                 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2342
2343         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2344                           rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2345
2346         hci_dev_unlock(hdev);
2347         return 0;
2348 }
2349
2350 static int send_cancel(struct hci_dev *hdev, void *data)
2351 {
2352         struct mgmt_pending_cmd *cmd = data;
2353         struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2354         struct mgmt_mesh_tx *mesh_tx;
2355
2356         if (!cancel->handle) {
2357                 do {
2358                         mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2359
2360                         if (mesh_tx)
2361                                 mesh_send_complete(hdev, mesh_tx, false);
2362                 } while (mesh_tx);
2363         } else {
2364                 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2365
2366                 if (mesh_tx && mesh_tx->sk == cmd->sk)
2367                         mesh_send_complete(hdev, mesh_tx, false);
2368         }
2369
2370         mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2371                           0, NULL, 0);
2372         mgmt_pending_free(cmd);
2373
2374         return 0;
2375 }
2376
2377 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2378                             void *data, u16 len)
2379 {
2380         struct mgmt_pending_cmd *cmd;
2381         int err;
2382
2383         if (!lmp_le_capable(hdev) ||
2384             !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2385                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2386                                        MGMT_STATUS_NOT_SUPPORTED);
2387
2388         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2389                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2390                                        MGMT_STATUS_REJECTED);
2391
2392         hci_dev_lock(hdev);
2393         cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2394         if (!cmd)
2395                 err = -ENOMEM;
2396         else
2397                 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2398
2399         if (err < 0) {
2400                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2401                                       MGMT_STATUS_FAILED);
2402
2403                 if (cmd)
2404                         mgmt_pending_free(cmd);
2405         }
2406
2407         hci_dev_unlock(hdev);
2408         return err;
2409 }
2410
2411 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2412 {
2413         struct mgmt_mesh_tx *mesh_tx;
2414         struct mgmt_cp_mesh_send *send = data;
2415         struct mgmt_rp_mesh_read_features rp;
2416         bool sending;
2417         int err = 0;
2418
2419         if (!lmp_le_capable(hdev) ||
2420             !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2421                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2422                                        MGMT_STATUS_NOT_SUPPORTED);
2423         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2424             len <= MGMT_MESH_SEND_SIZE ||
2425             len > (MGMT_MESH_SEND_SIZE + 31))
2426                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2427                                        MGMT_STATUS_REJECTED);
2428
2429         hci_dev_lock(hdev);
2430
2431         memset(&rp, 0, sizeof(rp));
2432         rp.max_handles = MESH_HANDLES_MAX;
2433
2434         mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2435
2436         if (rp.max_handles <= rp.used_handles) {
2437                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2438                                       MGMT_STATUS_BUSY);
2439                 goto done;
2440         }
2441
2442         sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2443         mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2444
2445         if (!mesh_tx)
2446                 err = -ENOMEM;
2447         else if (!sending)
2448                 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2449                                          mesh_send_start_complete);
2450
2451         if (err < 0) {
2452                 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2453                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2454                                       MGMT_STATUS_FAILED);
2455
2456                 if (mesh_tx) {
2457                         if (sending)
2458                                 mgmt_mesh_remove(mesh_tx);
2459                 }
2460         } else {
2461                 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2462
2463                 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2464                                   &mesh_tx->handle, 1);
2465         }
2466
2467 done:
2468         hci_dev_unlock(hdev);
2469         return err;
2470 }
2471
2472 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2473 {
2474         struct mgmt_mode *cp = data;
2475         struct mgmt_pending_cmd *cmd;
2476         int err;
2477         u8 val, enabled;
2478
2479         bt_dev_dbg(hdev, "sock %p", sk);
2480
2481         if (!lmp_le_capable(hdev))
2482                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2483                                        MGMT_STATUS_NOT_SUPPORTED);
2484
2485         if (cp->val != 0x00 && cp->val != 0x01)
2486                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2487                                        MGMT_STATUS_INVALID_PARAMS);
2488
2489         /* Bluetooth single mode LE only controllers or dual-mode
2490          * controllers configured as LE only devices, do not allow
2491          * switching LE off. These have either LE enabled explicitly
2492          * or BR/EDR has been previously switched off.
2493          *
2494          * When trying to enable an already enabled LE, then gracefully
2495          * send a positive response. Trying to disable it however will
2496          * result into rejection.
2497          */
2498         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2499                 if (cp->val == 0x01)
2500                         return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2501
2502                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2503                                        MGMT_STATUS_REJECTED);
2504         }
2505
2506         hci_dev_lock(hdev);
2507
2508         val = !!cp->val;
2509         enabled = lmp_host_le_capable(hdev);
2510
2511         if (!hdev_is_powered(hdev) || val == enabled) {
2512                 bool changed = false;
2513
2514                 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2515                         hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2516                         changed = true;
2517                 }
2518
2519                 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2520                         hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2521                         changed = true;
2522                 }
2523
2524                 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2525                 if (err < 0)
2526                         goto unlock;
2527
2528                 if (changed)
2529                         err = new_settings(hdev, sk);
2530
2531                 goto unlock;
2532         }
2533
2534         if (pending_find(MGMT_OP_SET_LE, hdev) ||
2535             pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2536                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2537                                       MGMT_STATUS_BUSY);
2538                 goto unlock;
2539         }
2540
2541         cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2542         if (!cmd)
2543                 err = -ENOMEM;
2544         else
2545                 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2546                                          set_le_complete);
2547
2548         if (err < 0) {
2549                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2550                                       MGMT_STATUS_FAILED);
2551
2552                 if (cmd)
2553                         mgmt_pending_remove(cmd);
2554         }
2555
2556 unlock:
2557         hci_dev_unlock(hdev);
2558         return err;
2559 }
2560
2561 /* This is a helper function to test for pending mgmt commands that can
2562  * cause CoD or EIR HCI commands. We can only allow one such pending
2563  * mgmt command at a time since otherwise we cannot easily track what
2564  * the current values are, will be, and based on that calculate if a new
2565  * HCI command needs to be sent and if yes with what value.
2566  */
2567 static bool pending_eir_or_class(struct hci_dev *hdev)
2568 {
2569         struct mgmt_pending_cmd *cmd;
2570
2571         list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2572                 switch (cmd->opcode) {
2573                 case MGMT_OP_ADD_UUID:
2574                 case MGMT_OP_REMOVE_UUID:
2575                 case MGMT_OP_SET_DEV_CLASS:
2576                 case MGMT_OP_SET_POWERED:
2577                         return true;
2578                 }
2579         }
2580
2581         return false;
2582 }
2583
2584 static const u8 bluetooth_base_uuid[] = {
2585                         0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2586                         0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2587 };
2588
2589 static u8 get_uuid_size(const u8 *uuid)
2590 {
2591         u32 val;
2592
2593         if (memcmp(uuid, bluetooth_base_uuid, 12))
2594                 return 128;
2595
2596         val = get_unaligned_le32(&uuid[12]);
2597         if (val > 0xffff)
2598                 return 32;
2599
2600         return 16;
2601 }
2602
2603 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2604 {
2605         struct mgmt_pending_cmd *cmd = data;
2606
2607         bt_dev_dbg(hdev, "err %d", err);
2608
2609         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2610                           mgmt_status(err), hdev->dev_class, 3);
2611
2612         mgmt_pending_free(cmd);
2613 }
2614
2615 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2616 {
2617         int err;
2618
2619         err = hci_update_class_sync(hdev);
2620         if (err)
2621                 return err;
2622
2623         return hci_update_eir_sync(hdev);
2624 }
2625
2626 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2627 {
2628         struct mgmt_cp_add_uuid *cp = data;
2629         struct mgmt_pending_cmd *cmd;
2630         struct bt_uuid *uuid;
2631         int err;
2632
2633         bt_dev_dbg(hdev, "sock %p", sk);
2634
2635         hci_dev_lock(hdev);
2636
2637         if (pending_eir_or_class(hdev)) {
2638                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2639                                       MGMT_STATUS_BUSY);
2640                 goto failed;
2641         }
2642
2643         uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2644         if (!uuid) {
2645                 err = -ENOMEM;
2646                 goto failed;
2647         }
2648
2649         memcpy(uuid->uuid, cp->uuid, 16);
2650         uuid->svc_hint = cp->svc_hint;
2651         uuid->size = get_uuid_size(cp->uuid);
2652
2653         list_add_tail(&uuid->list, &hdev->uuids);
2654
2655         cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2656         if (!cmd) {
2657                 err = -ENOMEM;
2658                 goto failed;
2659         }
2660
2661         err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2662         if (err < 0) {
2663                 mgmt_pending_free(cmd);
2664                 goto failed;
2665         }
2666
2667 failed:
2668         hci_dev_unlock(hdev);
2669         return err;
2670 }
2671
2672 static bool enable_service_cache(struct hci_dev *hdev)
2673 {
2674         if (!hdev_is_powered(hdev))
2675                 return false;
2676
2677         if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2678                 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2679                                    CACHE_TIMEOUT);
2680                 return true;
2681         }
2682
2683         return false;
2684 }
2685
2686 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2687 {
2688         int err;
2689
2690         err = hci_update_class_sync(hdev);
2691         if (err)
2692                 return err;
2693
2694         return hci_update_eir_sync(hdev);
2695 }
2696
2697 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2698                        u16 len)
2699 {
2700         struct mgmt_cp_remove_uuid *cp = data;
2701         struct mgmt_pending_cmd *cmd;
2702         struct bt_uuid *match, *tmp;
2703         static const u8 bt_uuid_any[] = {
2704                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2705         };
2706         int err, found;
2707
2708         bt_dev_dbg(hdev, "sock %p", sk);
2709
2710         hci_dev_lock(hdev);
2711
2712         if (pending_eir_or_class(hdev)) {
2713                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2714                                       MGMT_STATUS_BUSY);
2715                 goto unlock;
2716         }
2717
2718         if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2719                 hci_uuids_clear(hdev);
2720
2721                 if (enable_service_cache(hdev)) {
2722                         err = mgmt_cmd_complete(sk, hdev->id,
2723                                                 MGMT_OP_REMOVE_UUID,
2724                                                 0, hdev->dev_class, 3);
2725                         goto unlock;
2726                 }
2727
2728                 goto update_class;
2729         }
2730
2731         found = 0;
2732
2733         list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2734                 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2735                         continue;
2736
2737                 list_del(&match->list);
2738                 kfree(match);
2739                 found++;
2740         }
2741
2742         if (found == 0) {
2743                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2744                                       MGMT_STATUS_INVALID_PARAMS);
2745                 goto unlock;
2746         }
2747
2748 update_class:
2749         cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2750         if (!cmd) {
2751                 err = -ENOMEM;
2752                 goto unlock;
2753         }
2754
2755         err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2756                                  mgmt_class_complete);
2757         if (err < 0)
2758                 mgmt_pending_free(cmd);
2759
2760 unlock:
2761         hci_dev_unlock(hdev);
2762         return err;
2763 }
2764
2765 static int set_class_sync(struct hci_dev *hdev, void *data)
2766 {
2767         int err = 0;
2768
2769         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2770                 cancel_delayed_work_sync(&hdev->service_cache);
2771                 err = hci_update_eir_sync(hdev);
2772         }
2773
2774         if (err)
2775                 return err;
2776
2777         return hci_update_class_sync(hdev);
2778 }
2779
2780 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2781                          u16 len)
2782 {
2783         struct mgmt_cp_set_dev_class *cp = data;
2784         struct mgmt_pending_cmd *cmd;
2785         int err;
2786
2787         bt_dev_dbg(hdev, "sock %p", sk);
2788
2789         if (!lmp_bredr_capable(hdev))
2790                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2791                                        MGMT_STATUS_NOT_SUPPORTED);
2792
2793         hci_dev_lock(hdev);
2794
2795         if (pending_eir_or_class(hdev)) {
2796                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2797                                       MGMT_STATUS_BUSY);
2798                 goto unlock;
2799         }
2800
2801         if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2802                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2803                                       MGMT_STATUS_INVALID_PARAMS);
2804                 goto unlock;
2805         }
2806
2807         hdev->major_class = cp->major;
2808         hdev->minor_class = cp->minor;
2809
2810         if (!hdev_is_powered(hdev)) {
2811                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2812                                         hdev->dev_class, 3);
2813                 goto unlock;
2814         }
2815
2816         cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2817         if (!cmd) {
2818                 err = -ENOMEM;
2819                 goto unlock;
2820         }
2821
2822         err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2823                                  mgmt_class_complete);
2824         if (err < 0)
2825                 mgmt_pending_free(cmd);
2826
2827 unlock:
2828         hci_dev_unlock(hdev);
2829         return err;
2830 }
2831
2832 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2833                           u16 len)
2834 {
2835         struct mgmt_cp_load_link_keys *cp = data;
2836         const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2837                                    sizeof(struct mgmt_link_key_info));
2838         u16 key_count, expected_len;
2839         bool changed;
2840         int i;
2841
2842         bt_dev_dbg(hdev, "sock %p", sk);
2843
2844         if (!lmp_bredr_capable(hdev))
2845                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2846                                        MGMT_STATUS_NOT_SUPPORTED);
2847
2848         key_count = __le16_to_cpu(cp->key_count);
2849         if (key_count > max_key_count) {
2850                 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2851                            key_count);
2852                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2853                                        MGMT_STATUS_INVALID_PARAMS);
2854         }
2855
2856         expected_len = struct_size(cp, keys, key_count);
2857         if (expected_len != len) {
2858                 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2859                            expected_len, len);
2860                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2861                                        MGMT_STATUS_INVALID_PARAMS);
2862         }
2863
2864         if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2865                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2866                                        MGMT_STATUS_INVALID_PARAMS);
2867
2868         bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2869                    key_count);
2870
2871         for (i = 0; i < key_count; i++) {
2872                 struct mgmt_link_key_info *key = &cp->keys[i];
2873
2874                 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2875                         return mgmt_cmd_status(sk, hdev->id,
2876                                                MGMT_OP_LOAD_LINK_KEYS,
2877                                                MGMT_STATUS_INVALID_PARAMS);
2878         }
2879
2880         hci_dev_lock(hdev);
2881
2882         hci_link_keys_clear(hdev);
2883
2884         if (cp->debug_keys)
2885                 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2886         else
2887                 changed = hci_dev_test_and_clear_flag(hdev,
2888                                                       HCI_KEEP_DEBUG_KEYS);
2889
2890         if (changed)
2891                 new_settings(hdev, NULL);
2892
2893         for (i = 0; i < key_count; i++) {
2894                 struct mgmt_link_key_info *key = &cp->keys[i];
2895
2896                 if (hci_is_blocked_key(hdev,
2897                                        HCI_BLOCKED_KEY_TYPE_LINKKEY,
2898                                        key->val)) {
2899                         bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2900                                     &key->addr.bdaddr);
2901                         continue;
2902                 }
2903
2904                 /* Always ignore debug keys and require a new pairing if
2905                  * the user wants to use them.
2906                  */
2907                 if (key->type == HCI_LK_DEBUG_COMBINATION)
2908                         continue;
2909
2910                 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2911                                  key->type, key->pin_len, NULL);
2912         }
2913
2914         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2915
2916         hci_dev_unlock(hdev);
2917
2918         return 0;
2919 }
2920
2921 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2922                            u8 addr_type, struct sock *skip_sk)
2923 {
2924         struct mgmt_ev_device_unpaired ev;
2925
2926         bacpy(&ev.addr.bdaddr, bdaddr);
2927         ev.addr.type = addr_type;
2928
2929         return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2930                           skip_sk);
2931 }
2932
2933 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2934 {
2935         struct mgmt_pending_cmd *cmd = data;
2936         struct mgmt_cp_unpair_device *cp = cmd->param;
2937
2938         if (!err)
2939                 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2940
2941         cmd->cmd_complete(cmd, err);
2942         mgmt_pending_free(cmd);
2943 }
2944
2945 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2946 {
2947         struct mgmt_pending_cmd *cmd = data;
2948         struct mgmt_cp_unpair_device *cp = cmd->param;
2949         struct hci_conn *conn;
2950
2951         if (cp->addr.type == BDADDR_BREDR)
2952                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2953                                                &cp->addr.bdaddr);
2954         else
2955                 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2956                                                le_addr_type(cp->addr.type));
2957
2958         if (!conn)
2959                 return 0;
2960
2961         return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2962 }
2963
2964 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2965                          u16 len)
2966 {
2967         struct mgmt_cp_unpair_device *cp = data;
2968         struct mgmt_rp_unpair_device rp;
2969         struct hci_conn_params *params;
2970         struct mgmt_pending_cmd *cmd;
2971         struct hci_conn *conn;
2972         u8 addr_type;
2973         int err;
2974
2975         memset(&rp, 0, sizeof(rp));
2976         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2977         rp.addr.type = cp->addr.type;
2978
2979         if (!bdaddr_type_is_valid(cp->addr.type))
2980                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2981                                          MGMT_STATUS_INVALID_PARAMS,
2982                                          &rp, sizeof(rp));
2983
2984         if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2985                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2986                                          MGMT_STATUS_INVALID_PARAMS,
2987                                          &rp, sizeof(rp));
2988
2989         hci_dev_lock(hdev);
2990
2991         if (!hdev_is_powered(hdev)) {
2992                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2993                                         MGMT_STATUS_NOT_POWERED, &rp,
2994                                         sizeof(rp));
2995                 goto unlock;
2996         }
2997
2998         if (cp->addr.type == BDADDR_BREDR) {
2999                 /* If disconnection is requested, then look up the
3000                  * connection. If the remote device is connected, it
3001                  * will be later used to terminate the link.
3002                  *
3003                  * Setting it to NULL explicitly will cause no
3004                  * termination of the link.
3005                  */
3006                 if (cp->disconnect)
3007                         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3008                                                        &cp->addr.bdaddr);
3009                 else
3010                         conn = NULL;
3011
3012                 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3013                 if (err < 0) {
3014                         err = mgmt_cmd_complete(sk, hdev->id,
3015                                                 MGMT_OP_UNPAIR_DEVICE,
3016                                                 MGMT_STATUS_NOT_PAIRED, &rp,
3017                                                 sizeof(rp));
3018                         goto unlock;
3019                 }
3020
3021                 goto done;
3022         }
3023
3024         /* LE address type */
3025         addr_type = le_addr_type(cp->addr.type);
3026
3027         /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3028         err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3029         if (err < 0) {
3030                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3031                                         MGMT_STATUS_NOT_PAIRED, &rp,
3032                                         sizeof(rp));
3033                 goto unlock;
3034         }
3035
3036         conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3037         if (!conn) {
3038                 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3039                 goto done;
3040         }
3041
3042
3043         /* Defer clearing up the connection parameters until closing to
3044          * give a chance of keeping them if a repairing happens.
3045          */
3046         set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3047
3048         /* Disable auto-connection parameters if present */
3049         params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3050         if (params) {
3051                 if (params->explicit_connect)
3052                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3053                 else
3054                         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3055         }
3056
3057         /* If disconnection is not requested, then clear the connection
3058          * variable so that the link is not terminated.
3059          */
3060         if (!cp->disconnect)
3061                 conn = NULL;
3062
3063 done:
3064         /* If the connection variable is set, then termination of the
3065          * link is requested.
3066          */
3067         if (!conn) {
3068                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3069                                         &rp, sizeof(rp));
3070                 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3071                 goto unlock;
3072         }
3073
3074         cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3075                                sizeof(*cp));
3076         if (!cmd) {
3077                 err = -ENOMEM;
3078                 goto unlock;
3079         }
3080
3081         cmd->cmd_complete = addr_cmd_complete;
3082
3083         err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3084                                  unpair_device_complete);
3085         if (err < 0)
3086                 mgmt_pending_free(cmd);
3087
3088 unlock:
3089         hci_dev_unlock(hdev);
3090         return err;
3091 }
3092
3093 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3094                       u16 len)
3095 {
3096         struct mgmt_cp_disconnect *cp = data;
3097         struct mgmt_rp_disconnect rp;
3098         struct mgmt_pending_cmd *cmd;
3099         struct hci_conn *conn;
3100         int err;
3101
3102         bt_dev_dbg(hdev, "sock %p", sk);
3103
3104         memset(&rp, 0, sizeof(rp));
3105         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3106         rp.addr.type = cp->addr.type;
3107
3108         if (!bdaddr_type_is_valid(cp->addr.type))
3109                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3110                                          MGMT_STATUS_INVALID_PARAMS,
3111                                          &rp, sizeof(rp));
3112
3113         hci_dev_lock(hdev);
3114
3115         if (!test_bit(HCI_UP, &hdev->flags)) {
3116                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3117                                         MGMT_STATUS_NOT_POWERED, &rp,
3118                                         sizeof(rp));
3119                 goto failed;
3120         }
3121
3122         if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3123                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3124                                         MGMT_STATUS_BUSY, &rp, sizeof(rp));
3125                 goto failed;
3126         }
3127
3128         if (cp->addr.type == BDADDR_BREDR)
3129                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3130                                                &cp->addr.bdaddr);
3131         else
3132                 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3133                                                le_addr_type(cp->addr.type));
3134
3135         if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3136                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3137                                         MGMT_STATUS_NOT_CONNECTED, &rp,
3138                                         sizeof(rp));
3139                 goto failed;
3140         }
3141
3142         cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3143         if (!cmd) {
3144                 err = -ENOMEM;
3145                 goto failed;
3146         }
3147
3148         cmd->cmd_complete = generic_cmd_complete;
3149
3150         err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3151         if (err < 0)
3152                 mgmt_pending_remove(cmd);
3153
3154 failed:
3155         hci_dev_unlock(hdev);
3156         return err;
3157 }
3158
3159 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3160 {
3161         switch (link_type) {
3162         case LE_LINK:
3163                 switch (addr_type) {
3164                 case ADDR_LE_DEV_PUBLIC:
3165                         return BDADDR_LE_PUBLIC;
3166
3167                 default:
3168                         /* Fallback to LE Random address type */
3169                         return BDADDR_LE_RANDOM;
3170                 }
3171
3172         default:
3173                 /* Fallback to BR/EDR type */
3174                 return BDADDR_BREDR;
3175         }
3176 }
3177
3178 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3179                            u16 data_len)
3180 {
3181         struct mgmt_rp_get_connections *rp;
3182         struct hci_conn *c;
3183         int err;
3184         u16 i;
3185
3186         bt_dev_dbg(hdev, "sock %p", sk);
3187
3188         hci_dev_lock(hdev);
3189
3190         if (!hdev_is_powered(hdev)) {
3191                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3192                                       MGMT_STATUS_NOT_POWERED);
3193                 goto unlock;
3194         }
3195
3196         i = 0;
3197         list_for_each_entry(c, &hdev->conn_hash.list, list) {
3198                 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3199                         i++;
3200         }
3201
3202         rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3203         if (!rp) {
3204                 err = -ENOMEM;
3205                 goto unlock;
3206         }
3207
3208         i = 0;
3209         list_for_each_entry(c, &hdev->conn_hash.list, list) {
3210                 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3211                         continue;
3212                 bacpy(&rp->addr[i].bdaddr, &c->dst);
3213                 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3214                 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3215                         continue;
3216                 i++;
3217         }
3218
3219         rp->conn_count = cpu_to_le16(i);
3220
3221         /* Recalculate length in case of filtered SCO connections, etc */
3222         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3223                                 struct_size(rp, addr, i));
3224
3225         kfree(rp);
3226
3227 unlock:
3228         hci_dev_unlock(hdev);
3229         return err;
3230 }
3231
3232 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3233                                    struct mgmt_cp_pin_code_neg_reply *cp)
3234 {
3235         struct mgmt_pending_cmd *cmd;
3236         int err;
3237
3238         cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3239                                sizeof(*cp));
3240         if (!cmd)
3241                 return -ENOMEM;
3242
3243         cmd->cmd_complete = addr_cmd_complete;
3244
3245         err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3246                            sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3247         if (err < 0)
3248                 mgmt_pending_remove(cmd);
3249
3250         return err;
3251 }
3252
3253 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3254                           u16 len)
3255 {
3256         struct hci_conn *conn;
3257         struct mgmt_cp_pin_code_reply *cp = data;
3258         struct hci_cp_pin_code_reply reply;
3259         struct mgmt_pending_cmd *cmd;
3260         int err;
3261
3262         bt_dev_dbg(hdev, "sock %p", sk);
3263
3264         hci_dev_lock(hdev);
3265
3266         if (!hdev_is_powered(hdev)) {
3267                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3268                                       MGMT_STATUS_NOT_POWERED);
3269                 goto failed;
3270         }
3271
3272         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3273         if (!conn) {
3274                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3275                                       MGMT_STATUS_NOT_CONNECTED);
3276                 goto failed;
3277         }
3278
3279         if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3280                 struct mgmt_cp_pin_code_neg_reply ncp;
3281
3282                 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3283
3284                 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3285
3286                 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3287                 if (err >= 0)
3288                         err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3289                                               MGMT_STATUS_INVALID_PARAMS);
3290
3291                 goto failed;
3292         }
3293
3294         cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3295         if (!cmd) {
3296                 err = -ENOMEM;
3297                 goto failed;
3298         }
3299
3300         cmd->cmd_complete = addr_cmd_complete;
3301
3302         bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3303         reply.pin_len = cp->pin_len;
3304         memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3305
3306         err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3307         if (err < 0)
3308                 mgmt_pending_remove(cmd);
3309
3310 failed:
3311         hci_dev_unlock(hdev);
3312         return err;
3313 }
3314
3315 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3316                              u16 len)
3317 {
3318         struct mgmt_cp_set_io_capability *cp = data;
3319
3320         bt_dev_dbg(hdev, "sock %p", sk);
3321
3322         if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3323                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3324                                        MGMT_STATUS_INVALID_PARAMS);
3325
3326         hci_dev_lock(hdev);
3327
3328         hdev->io_capability = cp->io_capability;
3329
3330         bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3331
3332         hci_dev_unlock(hdev);
3333
3334         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3335                                  NULL, 0);
3336 }
3337
3338 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3339 {
3340         struct hci_dev *hdev = conn->hdev;
3341         struct mgmt_pending_cmd *cmd;
3342
3343         list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3344                 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3345                         continue;
3346
3347                 if (cmd->user_data != conn)
3348                         continue;
3349
3350                 return cmd;
3351         }
3352
3353         return NULL;
3354 }
3355
3356 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3357 {
3358         struct mgmt_rp_pair_device rp;
3359         struct hci_conn *conn = cmd->user_data;
3360         int err;
3361
3362         bacpy(&rp.addr.bdaddr, &conn->dst);
3363         rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3364
3365         err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3366                                 status, &rp, sizeof(rp));
3367
3368         /* So we don't get further callbacks for this connection */
3369         conn->connect_cfm_cb = NULL;
3370         conn->security_cfm_cb = NULL;
3371         conn->disconn_cfm_cb = NULL;
3372
3373         hci_conn_drop(conn);
3374
3375         /* The device is paired so there is no need to remove
3376          * its connection parameters anymore.
3377          */
3378         clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3379
3380         hci_conn_put(conn);
3381
3382         return err;
3383 }
3384
3385 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3386 {
3387         u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3388         struct mgmt_pending_cmd *cmd;
3389
3390         cmd = find_pairing(conn);
3391         if (cmd) {
3392                 cmd->cmd_complete(cmd, status);
3393                 mgmt_pending_remove(cmd);
3394         }
3395 }
3396
3397 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3398 {
3399         struct mgmt_pending_cmd *cmd;
3400
3401         BT_DBG("status %u", status);
3402
3403         cmd = find_pairing(conn);
3404         if (!cmd) {
3405                 BT_DBG("Unable to find a pending command");
3406                 return;
3407         }
3408
3409         cmd->cmd_complete(cmd, mgmt_status(status));
3410         mgmt_pending_remove(cmd);
3411 }
3412
3413 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3414 {
3415         struct mgmt_pending_cmd *cmd;
3416
3417         BT_DBG("status %u", status);
3418
3419         if (!status)
3420                 return;
3421
3422         cmd = find_pairing(conn);
3423         if (!cmd) {
3424                 BT_DBG("Unable to find a pending command");
3425                 return;
3426         }
3427
3428         cmd->cmd_complete(cmd, mgmt_status(status));
3429         mgmt_pending_remove(cmd);
3430 }
3431
3432 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3433                        u16 len)
3434 {
3435         struct mgmt_cp_pair_device *cp = data;
3436         struct mgmt_rp_pair_device rp;
3437         struct mgmt_pending_cmd *cmd;
3438         u8 sec_level, auth_type;
3439         struct hci_conn *conn;
3440         int err;
3441
3442         bt_dev_dbg(hdev, "sock %p", sk);
3443
3444         memset(&rp, 0, sizeof(rp));
3445         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3446         rp.addr.type = cp->addr.type;
3447
3448         if (!bdaddr_type_is_valid(cp->addr.type))
3449                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3450                                          MGMT_STATUS_INVALID_PARAMS,
3451                                          &rp, sizeof(rp));
3452
3453         if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3454                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3455                                          MGMT_STATUS_INVALID_PARAMS,
3456                                          &rp, sizeof(rp));
3457
3458         hci_dev_lock(hdev);
3459
3460         if (!hdev_is_powered(hdev)) {
3461                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3462                                         MGMT_STATUS_NOT_POWERED, &rp,
3463                                         sizeof(rp));
3464                 goto unlock;
3465         }
3466
3467         if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3468                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3469                                         MGMT_STATUS_ALREADY_PAIRED, &rp,
3470                                         sizeof(rp));
3471                 goto unlock;
3472         }
3473
3474         sec_level = BT_SECURITY_MEDIUM;
3475         auth_type = HCI_AT_DEDICATED_BONDING;
3476
3477         if (cp->addr.type == BDADDR_BREDR) {
3478                 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3479                                        auth_type, CONN_REASON_PAIR_DEVICE);
3480         } else {
3481                 u8 addr_type = le_addr_type(cp->addr.type);
3482                 struct hci_conn_params *p;
3483
3484                 /* When pairing a new device, it is expected to remember
3485                  * this device for future connections. Adding the connection
3486                  * parameter information ahead of time allows tracking
3487                  * of the peripheral preferred values and will speed up any
3488                  * further connection establishment.
3489                  *
3490                  * If connection parameters already exist, then they
3491                  * will be kept and this function does nothing.
3492                  */
3493                 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3494
3495                 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3496                         p->auto_connect = HCI_AUTO_CONN_DISABLED;
3497
3498                 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3499                                            sec_level, HCI_LE_CONN_TIMEOUT,
3500                                            CONN_REASON_PAIR_DEVICE);
3501         }
3502
3503         if (IS_ERR(conn)) {
3504                 int status;
3505
3506                 if (PTR_ERR(conn) == -EBUSY)
3507                         status = MGMT_STATUS_BUSY;
3508                 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3509                         status = MGMT_STATUS_NOT_SUPPORTED;
3510                 else if (PTR_ERR(conn) == -ECONNREFUSED)
3511                         status = MGMT_STATUS_REJECTED;
3512                 else
3513                         status = MGMT_STATUS_CONNECT_FAILED;
3514
3515                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3516                                         status, &rp, sizeof(rp));
3517                 goto unlock;
3518         }
3519
3520         if (conn->connect_cfm_cb) {
3521                 hci_conn_drop(conn);
3522                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3523                                         MGMT_STATUS_BUSY, &rp, sizeof(rp));
3524                 goto unlock;
3525         }
3526
3527         cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3528         if (!cmd) {
3529                 err = -ENOMEM;
3530                 hci_conn_drop(conn);
3531                 goto unlock;
3532         }
3533
3534         cmd->cmd_complete = pairing_complete;
3535
3536         /* For LE, just connecting isn't a proof that the pairing finished */
3537         if (cp->addr.type == BDADDR_BREDR) {
3538                 conn->connect_cfm_cb = pairing_complete_cb;
3539                 conn->security_cfm_cb = pairing_complete_cb;
3540                 conn->disconn_cfm_cb = pairing_complete_cb;
3541         } else {
3542                 conn->connect_cfm_cb = le_pairing_complete_cb;
3543                 conn->security_cfm_cb = le_pairing_complete_cb;
3544                 conn->disconn_cfm_cb = le_pairing_complete_cb;
3545         }
3546
3547         conn->io_capability = cp->io_cap;
3548         cmd->user_data = hci_conn_get(conn);
3549
3550         if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3551             hci_conn_security(conn, sec_level, auth_type, true)) {
3552                 cmd->cmd_complete(cmd, 0);
3553                 mgmt_pending_remove(cmd);
3554         }
3555
3556         err = 0;
3557
3558 unlock:
3559         hci_dev_unlock(hdev);
3560         return err;
3561 }
3562
3563 static int abort_conn_sync(struct hci_dev *hdev, void *data)
3564 {
3565         struct hci_conn *conn;
3566         u16 handle = PTR_ERR(data);
3567
3568         conn = hci_conn_hash_lookup_handle(hdev, handle);
3569         if (!conn)
3570                 return 0;
3571
3572         return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
3573 }
3574
3575 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3576                               u16 len)
3577 {
3578         struct mgmt_addr_info *addr = data;
3579         struct mgmt_pending_cmd *cmd;
3580         struct hci_conn *conn;
3581         int err;
3582
3583         bt_dev_dbg(hdev, "sock %p", sk);
3584
3585         hci_dev_lock(hdev);
3586
3587         if (!hdev_is_powered(hdev)) {
3588                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3589                                       MGMT_STATUS_NOT_POWERED);
3590                 goto unlock;
3591         }
3592
3593         cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3594         if (!cmd) {
3595                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3596                                       MGMT_STATUS_INVALID_PARAMS);
3597                 goto unlock;
3598         }
3599
3600         conn = cmd->user_data;
3601
3602         if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3603                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3604                                       MGMT_STATUS_INVALID_PARAMS);
3605                 goto unlock;
3606         }
3607
3608         cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3609         mgmt_pending_remove(cmd);
3610
3611         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3612                                 addr, sizeof(*addr));
3613
3614         /* Since user doesn't want to proceed with the connection, abort any
3615          * ongoing pairing and then terminate the link if it was created
3616          * because of the pair device action.
3617          */
3618         if (addr->type == BDADDR_BREDR)
3619                 hci_remove_link_key(hdev, &addr->bdaddr);
3620         else
3621                 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3622                                               le_addr_type(addr->type));
3623
3624         if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3625                 hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
3626                                    NULL);
3627
3628 unlock:
3629         hci_dev_unlock(hdev);
3630         return err;
3631 }
3632
3633 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3634                              struct mgmt_addr_info *addr, u16 mgmt_op,
3635                              u16 hci_op, __le32 passkey)
3636 {
3637         struct mgmt_pending_cmd *cmd;
3638         struct hci_conn *conn;
3639         int err;
3640
3641         hci_dev_lock(hdev);
3642
3643         if (!hdev_is_powered(hdev)) {
3644                 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3645                                         MGMT_STATUS_NOT_POWERED, addr,
3646                                         sizeof(*addr));
3647                 goto done;
3648         }
3649
3650         if (addr->type == BDADDR_BREDR)
3651                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3652         else
3653                 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3654                                                le_addr_type(addr->type));
3655
3656         if (!conn) {
3657                 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3658                                         MGMT_STATUS_NOT_CONNECTED, addr,
3659                                         sizeof(*addr));
3660                 goto done;
3661         }
3662
3663         if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3664                 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3665                 if (!err)
3666                         err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3667                                                 MGMT_STATUS_SUCCESS, addr,
3668                                                 sizeof(*addr));
3669                 else
3670                         err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3671                                                 MGMT_STATUS_FAILED, addr,
3672                                                 sizeof(*addr));
3673
3674                 goto done;
3675         }
3676
3677         cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3678         if (!cmd) {
3679                 err = -ENOMEM;
3680                 goto done;
3681         }
3682
3683         cmd->cmd_complete = addr_cmd_complete;
3684
3685         /* Continue with pairing via HCI */
3686         if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3687                 struct hci_cp_user_passkey_reply cp;
3688
3689                 bacpy(&cp.bdaddr, &addr->bdaddr);
3690                 cp.passkey = passkey;
3691                 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3692         } else
3693                 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3694                                    &addr->bdaddr);
3695
3696         if (err < 0)
3697                 mgmt_pending_remove(cmd);
3698
3699 done:
3700         hci_dev_unlock(hdev);
3701         return err;
3702 }
3703
3704 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3705                               void *data, u16 len)
3706 {
3707         struct mgmt_cp_pin_code_neg_reply *cp = data;
3708
3709         bt_dev_dbg(hdev, "sock %p", sk);
3710
3711         return user_pairing_resp(sk, hdev, &cp->addr,
3712                                 MGMT_OP_PIN_CODE_NEG_REPLY,
3713                                 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3714 }
3715
3716 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3717                               u16 len)
3718 {
3719         struct mgmt_cp_user_confirm_reply *cp = data;
3720
3721         bt_dev_dbg(hdev, "sock %p", sk);
3722
3723         if (len != sizeof(*cp))
3724                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3725                                        MGMT_STATUS_INVALID_PARAMS);
3726
3727         return user_pairing_resp(sk, hdev, &cp->addr,
3728                                  MGMT_OP_USER_CONFIRM_REPLY,
3729                                  HCI_OP_USER_CONFIRM_REPLY, 0);
3730 }
3731
3732 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3733                                   void *data, u16 len)
3734 {
3735         struct mgmt_cp_user_confirm_neg_reply *cp = data;
3736
3737         bt_dev_dbg(hdev, "sock %p", sk);
3738
3739         return user_pairing_resp(sk, hdev, &cp->addr,
3740                                  MGMT_OP_USER_CONFIRM_NEG_REPLY,
3741                                  HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3742 }
3743
3744 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3745                               u16 len)
3746 {
3747         struct mgmt_cp_user_passkey_reply *cp = data;
3748
3749         bt_dev_dbg(hdev, "sock %p", sk);
3750
3751         return user_pairing_resp(sk, hdev, &cp->addr,
3752                                  MGMT_OP_USER_PASSKEY_REPLY,
3753                                  HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3754 }
3755
3756 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3757                                   void *data, u16 len)
3758 {
3759         struct mgmt_cp_user_passkey_neg_reply *cp = data;
3760
3761         bt_dev_dbg(hdev, "sock %p", sk);
3762
3763         return user_pairing_resp(sk, hdev, &cp->addr,
3764                                  MGMT_OP_USER_PASSKEY_NEG_REPLY,
3765                                  HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3766 }
3767
3768 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3769 {
3770         struct adv_info *adv_instance;
3771
3772         adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3773         if (!adv_instance)
3774                 return 0;
3775
3776         /* stop if current instance doesn't need to be changed */
3777         if (!(adv_instance->flags & flags))
3778                 return 0;
3779
3780         cancel_adv_timeout(hdev);
3781
3782         adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3783         if (!adv_instance)
3784                 return 0;
3785
3786         hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3787
3788         return 0;
3789 }
3790
3791 static int name_changed_sync(struct hci_dev *hdev, void *data)
3792 {
3793         return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3794 }
3795
3796 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3797 {
3798         struct mgmt_pending_cmd *cmd = data;
3799         struct mgmt_cp_set_local_name *cp = cmd->param;
3800         u8 status = mgmt_status(err);
3801
3802         bt_dev_dbg(hdev, "err %d", err);
3803
3804         if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3805                 return;
3806
3807         if (status) {
3808                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3809                                 status);
3810         } else {
3811                 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3812                                   cp, sizeof(*cp));
3813
3814                 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3815                         hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3816         }
3817
3818         mgmt_pending_remove(cmd);
3819 }
3820
3821 static int set_name_sync(struct hci_dev *hdev, void *data)
3822 {
3823         if (lmp_bredr_capable(hdev)) {
3824                 hci_update_name_sync(hdev);
3825                 hci_update_eir_sync(hdev);
3826         }
3827
3828         /* The name is stored in the scan response data and so
3829          * no need to update the advertising data here.
3830          */
3831         if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3832                 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3833
3834         return 0;
3835 }
3836
3837 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3838                           u16 len)
3839 {
3840         struct mgmt_cp_set_local_name *cp = data;
3841         struct mgmt_pending_cmd *cmd;
3842         int err;
3843
3844         bt_dev_dbg(hdev, "sock %p", sk);
3845
3846         hci_dev_lock(hdev);
3847
3848         /* If the old values are the same as the new ones just return a
3849          * direct command complete event.
3850          */
3851         if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3852             !memcmp(hdev->short_name, cp->short_name,
3853                     sizeof(hdev->short_name))) {
3854                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3855                                         data, len);
3856                 goto failed;
3857         }
3858
3859         memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3860
3861         if (!hdev_is_powered(hdev)) {
3862                 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3863
3864                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3865                                         data, len);
3866                 if (err < 0)
3867                         goto failed;
3868
3869                 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3870                                          len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3871                 ext_info_changed(hdev, sk);
3872
3873                 goto failed;
3874         }
3875
3876         cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3877         if (!cmd)
3878                 err = -ENOMEM;
3879         else
3880                 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3881                                          set_name_complete);
3882
3883         if (err < 0) {
3884                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3885                                       MGMT_STATUS_FAILED);
3886
3887                 if (cmd)
3888                         mgmt_pending_remove(cmd);
3889
3890                 goto failed;
3891         }
3892
3893         memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3894
3895 failed:
3896         hci_dev_unlock(hdev);
3897         return err;
3898 }
3899
3900 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3901 {
3902         return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3903 }
3904
3905 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3906                           u16 len)
3907 {
3908         struct mgmt_cp_set_appearance *cp = data;
3909         u16 appearance;
3910         int err;
3911
3912         bt_dev_dbg(hdev, "sock %p", sk);
3913
3914         if (!lmp_le_capable(hdev))
3915                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3916                                        MGMT_STATUS_NOT_SUPPORTED);
3917
3918         appearance = le16_to_cpu(cp->appearance);
3919
3920         hci_dev_lock(hdev);
3921
3922         if (hdev->appearance != appearance) {
3923                 hdev->appearance = appearance;
3924
3925                 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3926                         hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3927                                            NULL);
3928
3929                 ext_info_changed(hdev, sk);
3930         }
3931
3932         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3933                                 0);
3934
3935         hci_dev_unlock(hdev);
3936
3937         return err;
3938 }
3939
3940 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3941                                  void *data, u16 len)
3942 {
3943         struct mgmt_rp_get_phy_configuration rp;
3944
3945         bt_dev_dbg(hdev, "sock %p", sk);
3946
3947         hci_dev_lock(hdev);
3948
3949         memset(&rp, 0, sizeof(rp));
3950
3951         rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3952         rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3953         rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3954
3955         hci_dev_unlock(hdev);
3956
3957         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3958                                  &rp, sizeof(rp));
3959 }
3960
3961 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3962 {
3963         struct mgmt_ev_phy_configuration_changed ev;
3964
3965         memset(&ev, 0, sizeof(ev));
3966
3967         ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3968
3969         return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3970                           sizeof(ev), skip);
3971 }
3972
3973 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3974 {
3975         struct mgmt_pending_cmd *cmd = data;
3976         struct sk_buff *skb = cmd->skb;
3977         u8 status = mgmt_status(err);
3978
3979         if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3980                 return;
3981
3982         if (!status) {
3983                 if (!skb)
3984                         status = MGMT_STATUS_FAILED;
3985                 else if (IS_ERR(skb))
3986                         status = mgmt_status(PTR_ERR(skb));
3987                 else
3988                         status = mgmt_status(skb->data[0]);
3989         }
3990
3991         bt_dev_dbg(hdev, "status %d", status);
3992
3993         if (status) {
3994                 mgmt_cmd_status(cmd->sk, hdev->id,
3995                                 MGMT_OP_SET_PHY_CONFIGURATION, status);
3996         } else {
3997                 mgmt_cmd_complete(cmd->sk, hdev->id,
3998                                   MGMT_OP_SET_PHY_CONFIGURATION, 0,
3999                                   NULL, 0);
4000
4001                 mgmt_phy_configuration_changed(hdev, cmd->sk);
4002         }
4003
4004         if (skb && !IS_ERR(skb))
4005                 kfree_skb(skb);
4006
4007         mgmt_pending_remove(cmd);
4008 }
4009
4010 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4011 {
4012         struct mgmt_pending_cmd *cmd = data;
4013         struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4014         struct hci_cp_le_set_default_phy cp_phy;
4015         u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4016
4017         memset(&cp_phy, 0, sizeof(cp_phy));
4018
4019         if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4020                 cp_phy.all_phys |= 0x01;
4021
4022         if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4023                 cp_phy.all_phys |= 0x02;
4024
4025         if (selected_phys & MGMT_PHY_LE_1M_TX)
4026                 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4027
4028         if (selected_phys & MGMT_PHY_LE_2M_TX)
4029                 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4030
4031         if (selected_phys & MGMT_PHY_LE_CODED_TX)
4032                 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4033
4034         if (selected_phys & MGMT_PHY_LE_1M_RX)
4035                 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4036
4037         if (selected_phys & MGMT_PHY_LE_2M_RX)
4038                 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4039
4040         if (selected_phys & MGMT_PHY_LE_CODED_RX)
4041                 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4042
4043         cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4044                                    sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4045
4046         return 0;
4047 }
4048
4049 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4050                                  void *data, u16 len)
4051 {
4052         struct mgmt_cp_set_phy_configuration *cp = data;
4053         struct mgmt_pending_cmd *cmd;
4054         u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4055         u16 pkt_type = (HCI_DH1 | HCI_DM1);
4056         bool changed = false;
4057         int err;
4058
4059         bt_dev_dbg(hdev, "sock %p", sk);
4060
4061         configurable_phys = get_configurable_phys(hdev);
4062         supported_phys = get_supported_phys(hdev);
4063         selected_phys = __le32_to_cpu(cp->selected_phys);
4064
4065         if (selected_phys & ~supported_phys)
4066                 return mgmt_cmd_status(sk, hdev->id,
4067                                        MGMT_OP_SET_PHY_CONFIGURATION,
4068                                        MGMT_STATUS_INVALID_PARAMS);
4069
4070         unconfigure_phys = supported_phys & ~configurable_phys;
4071
4072         if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4073                 return mgmt_cmd_status(sk, hdev->id,
4074                                        MGMT_OP_SET_PHY_CONFIGURATION,
4075                                        MGMT_STATUS_INVALID_PARAMS);
4076
4077         if (selected_phys == get_selected_phys(hdev))
4078                 return mgmt_cmd_complete(sk, hdev->id,
4079                                          MGMT_OP_SET_PHY_CONFIGURATION,
4080                                          0, NULL, 0);
4081
4082         hci_dev_lock(hdev);
4083
4084         if (!hdev_is_powered(hdev)) {
4085                 err = mgmt_cmd_status(sk, hdev->id,
4086                                       MGMT_OP_SET_PHY_CONFIGURATION,
4087                                       MGMT_STATUS_REJECTED);
4088                 goto unlock;
4089         }
4090
4091         if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4092                 err = mgmt_cmd_status(sk, hdev->id,
4093                                       MGMT_OP_SET_PHY_CONFIGURATION,
4094                                       MGMT_STATUS_BUSY);
4095                 goto unlock;
4096         }
4097
4098         if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4099                 pkt_type |= (HCI_DH3 | HCI_DM3);
4100         else
4101                 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4102
4103         if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4104                 pkt_type |= (HCI_DH5 | HCI_DM5);
4105         else
4106                 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4107
4108         if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4109                 pkt_type &= ~HCI_2DH1;
4110         else
4111                 pkt_type |= HCI_2DH1;
4112
4113         if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4114                 pkt_type &= ~HCI_2DH3;
4115         else
4116                 pkt_type |= HCI_2DH3;
4117
4118         if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4119                 pkt_type &= ~HCI_2DH5;
4120         else
4121                 pkt_type |= HCI_2DH5;
4122
4123         if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4124                 pkt_type &= ~HCI_3DH1;
4125         else
4126                 pkt_type |= HCI_3DH1;
4127
4128         if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4129                 pkt_type &= ~HCI_3DH3;
4130         else
4131                 pkt_type |= HCI_3DH3;
4132
4133         if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4134                 pkt_type &= ~HCI_3DH5;
4135         else
4136                 pkt_type |= HCI_3DH5;
4137
4138         if (pkt_type != hdev->pkt_type) {
4139                 hdev->pkt_type = pkt_type;
4140                 changed = true;
4141         }
4142
4143         if ((selected_phys & MGMT_PHY_LE_MASK) ==
4144             (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4145                 if (changed)
4146                         mgmt_phy_configuration_changed(hdev, sk);
4147
4148                 err = mgmt_cmd_complete(sk, hdev->id,
4149                                         MGMT_OP_SET_PHY_CONFIGURATION,
4150                                         0, NULL, 0);
4151
4152                 goto unlock;
4153         }
4154
4155         cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4156                                len);
4157         if (!cmd)
4158                 err = -ENOMEM;
4159         else
4160                 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4161                                          set_default_phy_complete);
4162
4163         if (err < 0) {
4164                 err = mgmt_cmd_status(sk, hdev->id,
4165                                       MGMT_OP_SET_PHY_CONFIGURATION,
4166                                       MGMT_STATUS_FAILED);
4167
4168                 if (cmd)
4169                         mgmt_pending_remove(cmd);
4170         }
4171
4172 unlock:
4173         hci_dev_unlock(hdev);
4174
4175         return err;
4176 }
4177
4178 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4179                             u16 len)
4180 {
4181         int err = MGMT_STATUS_SUCCESS;
4182         struct mgmt_cp_set_blocked_keys *keys = data;
4183         const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4184                                    sizeof(struct mgmt_blocked_key_info));
4185         u16 key_count, expected_len;
4186         int i;
4187
4188         bt_dev_dbg(hdev, "sock %p", sk);
4189
4190         key_count = __le16_to_cpu(keys->key_count);
4191         if (key_count > max_key_count) {
4192                 bt_dev_err(hdev, "too big key_count value %u", key_count);
4193                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4194                                        MGMT_STATUS_INVALID_PARAMS);
4195         }
4196
4197         expected_len = struct_size(keys, keys, key_count);
4198         if (expected_len != len) {
4199                 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4200                            expected_len, len);
4201                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4202                                        MGMT_STATUS_INVALID_PARAMS);
4203         }
4204
4205         hci_dev_lock(hdev);
4206
4207         hci_blocked_keys_clear(hdev);
4208
4209         for (i = 0; i < key_count; ++i) {
4210                 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4211
4212                 if (!b) {
4213                         err = MGMT_STATUS_NO_RESOURCES;
4214                         break;
4215                 }
4216
4217                 b->type = keys->keys[i].type;
4218                 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4219                 list_add_rcu(&b->list, &hdev->blocked_keys);
4220         }
4221         hci_dev_unlock(hdev);
4222
4223         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4224                                 err, NULL, 0);
4225 }
4226
4227 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4228                                void *data, u16 len)
4229 {
4230         struct mgmt_mode *cp = data;
4231         int err;
4232         bool changed = false;
4233
4234         bt_dev_dbg(hdev, "sock %p", sk);
4235
4236         if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4237                 return mgmt_cmd_status(sk, hdev->id,
4238                                        MGMT_OP_SET_WIDEBAND_SPEECH,
4239                                        MGMT_STATUS_NOT_SUPPORTED);
4240
4241         if (cp->val != 0x00 && cp->val != 0x01)
4242                 return mgmt_cmd_status(sk, hdev->id,
4243                                        MGMT_OP_SET_WIDEBAND_SPEECH,
4244                                        MGMT_STATUS_INVALID_PARAMS);
4245
4246         hci_dev_lock(hdev);
4247
4248         if (hdev_is_powered(hdev) &&
4249             !!cp->val != hci_dev_test_flag(hdev,
4250                                            HCI_WIDEBAND_SPEECH_ENABLED)) {
4251                 err = mgmt_cmd_status(sk, hdev->id,
4252                                       MGMT_OP_SET_WIDEBAND_SPEECH,
4253                                       MGMT_STATUS_REJECTED);
4254                 goto unlock;
4255         }
4256
4257         if (cp->val)
4258                 changed = !hci_dev_test_and_set_flag(hdev,
4259                                                    HCI_WIDEBAND_SPEECH_ENABLED);
4260         else
4261                 changed = hci_dev_test_and_clear_flag(hdev,
4262                                                    HCI_WIDEBAND_SPEECH_ENABLED);
4263
4264         err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4265         if (err < 0)
4266                 goto unlock;
4267
4268         if (changed)
4269                 err = new_settings(hdev, sk);
4270
4271 unlock:
4272         hci_dev_unlock(hdev);
4273         return err;
4274 }
4275
4276 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4277                                void *data, u16 data_len)
4278 {
4279         char buf[20];
4280         struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4281         u16 cap_len = 0;
4282         u8 flags = 0;
4283         u8 tx_power_range[2];
4284
4285         bt_dev_dbg(hdev, "sock %p", sk);
4286
4287         memset(&buf, 0, sizeof(buf));
4288
4289         hci_dev_lock(hdev);
4290
4291         /* When the Read Simple Pairing Options command is supported, then
4292          * the remote public key validation is supported.
4293          *
4294          * Alternatively, when Microsoft extensions are available, they can
4295          * indicate support for public key validation as well.
4296          */
4297         if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4298                 flags |= 0x01;  /* Remote public key validation (BR/EDR) */
4299
4300         flags |= 0x02;          /* Remote public key validation (LE) */
4301
4302         /* When the Read Encryption Key Size command is supported, then the
4303          * encryption key size is enforced.
4304          */
4305         if (hdev->commands[20] & 0x10)
4306                 flags |= 0x04;  /* Encryption key size enforcement (BR/EDR) */
4307
4308         flags |= 0x08;          /* Encryption key size enforcement (LE) */
4309
4310         cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4311                                   &flags, 1);
4312
4313         /* When the Read Simple Pairing Options command is supported, then
4314          * also max encryption key size information is provided.
4315          */
4316         if (hdev->commands[41] & 0x08)
4317                 cap_len = eir_append_le16(rp->cap, cap_len,
4318                                           MGMT_CAP_MAX_ENC_KEY_SIZE,
4319                                           hdev->max_enc_key_size);
4320
4321         cap_len = eir_append_le16(rp->cap, cap_len,
4322                                   MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4323                                   SMP_MAX_ENC_KEY_SIZE);
4324
4325         /* Append the min/max LE tx power parameters if we were able to fetch
4326          * it from the controller
4327          */
4328         if (hdev->commands[38] & 0x80) {
4329                 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4330                 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4331                 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4332                                           tx_power_range, 2);
4333         }
4334
4335         rp->cap_len = cpu_to_le16(cap_len);
4336
4337         hci_dev_unlock(hdev);
4338
4339         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4340                                  rp, sizeof(*rp) + cap_len);
4341 }
4342
4343 #ifdef CONFIG_BT_FEATURE_DEBUG
4344 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4345 static const u8 debug_uuid[16] = {
4346         0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4347         0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4348 };
4349 #endif
4350
4351 /* 330859bc-7506-492d-9370-9a6f0614037f */
4352 static const u8 quality_report_uuid[16] = {
4353         0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4354         0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4355 };
4356
4357 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4358 static const u8 offload_codecs_uuid[16] = {
4359         0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4360         0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4361 };
4362
4363 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4364 static const u8 le_simultaneous_roles_uuid[16] = {
4365         0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4366         0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4367 };
4368
4369 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4370 static const u8 rpa_resolution_uuid[16] = {
4371         0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4372         0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4373 };
4374
4375 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4376 static const u8 iso_socket_uuid[16] = {
4377         0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4378         0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4379 };
4380
4381 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4382 static const u8 mgmt_mesh_uuid[16] = {
4383         0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4384         0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4385 };
4386
4387 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4388                                   void *data, u16 data_len)
4389 {
4390         struct mgmt_rp_read_exp_features_info *rp;
4391         size_t len;
4392         u16 idx = 0;
4393         u32 flags;
4394         int status;
4395
4396         bt_dev_dbg(hdev, "sock %p", sk);
4397
4398         /* Enough space for 7 features */
4399         len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4400         rp = kzalloc(len, GFP_KERNEL);
4401         if (!rp)
4402                 return -ENOMEM;
4403
4404 #ifdef CONFIG_BT_FEATURE_DEBUG
4405         if (!hdev) {
4406                 flags = bt_dbg_get() ? BIT(0) : 0;
4407
4408                 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4409                 rp->features[idx].flags = cpu_to_le32(flags);
4410                 idx++;
4411         }
4412 #endif
4413
4414         if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4415                 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4416                         flags = BIT(0);
4417                 else
4418                         flags = 0;
4419
4420                 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4421                 rp->features[idx].flags = cpu_to_le32(flags);
4422                 idx++;
4423         }
4424
4425         if (hdev && ll_privacy_capable(hdev)) {
4426                 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4427                         flags = BIT(0) | BIT(1);
4428                 else
4429                         flags = BIT(1);
4430
4431                 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4432                 rp->features[idx].flags = cpu_to_le32(flags);
4433                 idx++;
4434         }
4435
4436         if (hdev && (aosp_has_quality_report(hdev) ||
4437                      hdev->set_quality_report)) {
4438                 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4439                         flags = BIT(0);
4440                 else
4441                         flags = 0;
4442
4443                 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4444                 rp->features[idx].flags = cpu_to_le32(flags);
4445                 idx++;
4446         }
4447
4448         if (hdev && hdev->get_data_path_id) {
4449                 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4450                         flags = BIT(0);
4451                 else
4452                         flags = 0;
4453
4454                 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4455                 rp->features[idx].flags = cpu_to_le32(flags);
4456                 idx++;
4457         }
4458
4459         if (IS_ENABLED(CONFIG_BT_LE)) {
4460                 flags = iso_enabled() ? BIT(0) : 0;
4461                 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4462                 rp->features[idx].flags = cpu_to_le32(flags);
4463                 idx++;
4464         }
4465
4466         if (hdev && lmp_le_capable(hdev)) {
4467                 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4468                         flags = BIT(0);
4469                 else
4470                         flags = 0;
4471
4472                 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4473                 rp->features[idx].flags = cpu_to_le32(flags);
4474                 idx++;
4475         }
4476
4477         rp->feature_count = cpu_to_le16(idx);
4478
4479         /* After reading the experimental features information, enable
4480          * the events to update client on any future change.
4481          */
4482         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4483
4484         status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4485                                    MGMT_OP_READ_EXP_FEATURES_INFO,
4486                                    0, rp, sizeof(*rp) + (20 * idx));
4487
4488         kfree(rp);
4489         return status;
4490 }
4491
4492 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4493                                           struct sock *skip)
4494 {
4495         struct mgmt_ev_exp_feature_changed ev;
4496
4497         memset(&ev, 0, sizeof(ev));
4498         memcpy(ev.uuid, rpa_resolution_uuid, 16);
4499         ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4500
4501         // Do we need to be atomic with the conn_flags?
4502         if (enabled && privacy_mode_capable(hdev))
4503                 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4504         else
4505                 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4506
4507         return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4508                                   &ev, sizeof(ev),
4509                                   HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4510
4511 }
4512
4513 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4514                                bool enabled, struct sock *skip)
4515 {
4516         struct mgmt_ev_exp_feature_changed ev;
4517
4518         memset(&ev, 0, sizeof(ev));
4519         memcpy(ev.uuid, uuid, 16);
4520         ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4521
4522         return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4523                                   &ev, sizeof(ev),
4524                                   HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4525 }
4526
4527 #define EXP_FEAT(_uuid, _set_func)      \
4528 {                                       \
4529         .uuid = _uuid,                  \
4530         .set_func = _set_func,          \
4531 }
4532
4533 /* The zero key uuid is special. Multiple exp features are set through it. */
4534 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4535                              struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4536 {
4537         struct mgmt_rp_set_exp_feature rp;
4538
4539         memset(rp.uuid, 0, 16);
4540         rp.flags = cpu_to_le32(0);
4541
4542 #ifdef CONFIG_BT_FEATURE_DEBUG
4543         if (!hdev) {
4544                 bool changed = bt_dbg_get();
4545
4546                 bt_dbg_set(false);
4547
4548                 if (changed)
4549                         exp_feature_changed(NULL, ZERO_KEY, false, sk);
4550         }
4551 #endif
4552
4553         if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4554                 bool changed;
4555
4556                 changed = hci_dev_test_and_clear_flag(hdev,
4557                                                       HCI_ENABLE_LL_PRIVACY);
4558                 if (changed)
4559                         exp_feature_changed(hdev, rpa_resolution_uuid, false,
4560                                             sk);
4561         }
4562
4563         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4564
4565         return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4566                                  MGMT_OP_SET_EXP_FEATURE, 0,
4567                                  &rp, sizeof(rp));
4568 }
4569
4570 #ifdef CONFIG_BT_FEATURE_DEBUG
4571 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4572                           struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4573 {
4574         struct mgmt_rp_set_exp_feature rp;
4575
4576         bool val, changed;
4577         int err;
4578
4579         /* Command requires to use the non-controller index */
4580         if (hdev)
4581                 return mgmt_cmd_status(sk, hdev->id,
4582                                        MGMT_OP_SET_EXP_FEATURE,
4583                                        MGMT_STATUS_INVALID_INDEX);
4584
4585         /* Parameters are limited to a single octet */
4586         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4587                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4588                                        MGMT_OP_SET_EXP_FEATURE,
4589                                        MGMT_STATUS_INVALID_PARAMS);
4590
4591         /* Only boolean on/off is supported */
4592         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4593                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4594                                        MGMT_OP_SET_EXP_FEATURE,
4595                                        MGMT_STATUS_INVALID_PARAMS);
4596
4597         val = !!cp->param[0];
4598         changed = val ? !bt_dbg_get() : bt_dbg_get();
4599         bt_dbg_set(val);
4600
4601         memcpy(rp.uuid, debug_uuid, 16);
4602         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4603
4604         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4605
4606         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4607                                 MGMT_OP_SET_EXP_FEATURE, 0,
4608                                 &rp, sizeof(rp));
4609
4610         if (changed)
4611                 exp_feature_changed(hdev, debug_uuid, val, sk);
4612
4613         return err;
4614 }
4615 #endif
4616
4617 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4618                               struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4619 {
4620         struct mgmt_rp_set_exp_feature rp;
4621         bool val, changed;
4622         int err;
4623
4624         /* Command requires to use the controller index */
4625         if (!hdev)
4626                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4627                                        MGMT_OP_SET_EXP_FEATURE,
4628                                        MGMT_STATUS_INVALID_INDEX);
4629
4630         /* Parameters are limited to a single octet */
4631         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4632                 return mgmt_cmd_status(sk, hdev->id,
4633                                        MGMT_OP_SET_EXP_FEATURE,
4634                                        MGMT_STATUS_INVALID_PARAMS);
4635
4636         /* Only boolean on/off is supported */
4637         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4638                 return mgmt_cmd_status(sk, hdev->id,
4639                                        MGMT_OP_SET_EXP_FEATURE,
4640                                        MGMT_STATUS_INVALID_PARAMS);
4641
4642         val = !!cp->param[0];
4643
4644         if (val) {
4645                 changed = !hci_dev_test_and_set_flag(hdev,
4646                                                      HCI_MESH_EXPERIMENTAL);
4647         } else {
4648                 hci_dev_clear_flag(hdev, HCI_MESH);
4649                 changed = hci_dev_test_and_clear_flag(hdev,
4650                                                       HCI_MESH_EXPERIMENTAL);
4651         }
4652
4653         memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4654         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4655
4656         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4657
4658         err = mgmt_cmd_complete(sk, hdev->id,
4659                                 MGMT_OP_SET_EXP_FEATURE, 0,
4660                                 &rp, sizeof(rp));
4661
4662         if (changed)
4663                 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4664
4665         return err;
4666 }
4667
4668 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4669                                    struct mgmt_cp_set_exp_feature *cp,
4670                                    u16 data_len)
4671 {
4672         struct mgmt_rp_set_exp_feature rp;
4673         bool val, changed;
4674         int err;
4675         u32 flags;
4676
4677         /* Command requires to use the controller index */
4678         if (!hdev)
4679                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4680                                        MGMT_OP_SET_EXP_FEATURE,
4681                                        MGMT_STATUS_INVALID_INDEX);
4682
4683         /* Changes can only be made when controller is powered down */
4684         if (hdev_is_powered(hdev))
4685                 return mgmt_cmd_status(sk, hdev->id,
4686                                        MGMT_OP_SET_EXP_FEATURE,
4687                                        MGMT_STATUS_REJECTED);
4688
4689         /* Parameters are limited to a single octet */
4690         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4691                 return mgmt_cmd_status(sk, hdev->id,
4692                                        MGMT_OP_SET_EXP_FEATURE,
4693                                        MGMT_STATUS_INVALID_PARAMS);
4694
4695         /* Only boolean on/off is supported */
4696         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4697                 return mgmt_cmd_status(sk, hdev->id,
4698                                        MGMT_OP_SET_EXP_FEATURE,
4699                                        MGMT_STATUS_INVALID_PARAMS);
4700
4701         val = !!cp->param[0];
4702
4703         if (val) {
4704                 changed = !hci_dev_test_and_set_flag(hdev,
4705                                                      HCI_ENABLE_LL_PRIVACY);
4706                 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4707
4708                 /* Enable LL privacy + supported settings changed */
4709                 flags = BIT(0) | BIT(1);
4710         } else {
4711                 changed = hci_dev_test_and_clear_flag(hdev,
4712                                                       HCI_ENABLE_LL_PRIVACY);
4713
4714                 /* Disable LL privacy + supported settings changed */
4715                 flags = BIT(1);
4716         }
4717
4718         memcpy(rp.uuid, rpa_resolution_uuid, 16);
4719         rp.flags = cpu_to_le32(flags);
4720
4721         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4722
4723         err = mgmt_cmd_complete(sk, hdev->id,
4724                                 MGMT_OP_SET_EXP_FEATURE, 0,
4725                                 &rp, sizeof(rp));
4726
4727         if (changed)
4728                 exp_ll_privacy_feature_changed(val, hdev, sk);
4729
4730         return err;
4731 }
4732
4733 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4734                                    struct mgmt_cp_set_exp_feature *cp,
4735                                    u16 data_len)
4736 {
4737         struct mgmt_rp_set_exp_feature rp;
4738         bool val, changed;
4739         int err;
4740
4741         /* Command requires to use a valid controller index */
4742         if (!hdev)
4743                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4744                                        MGMT_OP_SET_EXP_FEATURE,
4745                                        MGMT_STATUS_INVALID_INDEX);
4746
4747         /* Parameters are limited to a single octet */
4748         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4749                 return mgmt_cmd_status(sk, hdev->id,
4750                                        MGMT_OP_SET_EXP_FEATURE,
4751                                        MGMT_STATUS_INVALID_PARAMS);
4752
4753         /* Only boolean on/off is supported */
4754         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4755                 return mgmt_cmd_status(sk, hdev->id,
4756                                        MGMT_OP_SET_EXP_FEATURE,
4757                                        MGMT_STATUS_INVALID_PARAMS);
4758
4759         hci_req_sync_lock(hdev);
4760
4761         val = !!cp->param[0];
4762         changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4763
4764         if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4765                 err = mgmt_cmd_status(sk, hdev->id,
4766                                       MGMT_OP_SET_EXP_FEATURE,
4767                                       MGMT_STATUS_NOT_SUPPORTED);
4768                 goto unlock_quality_report;
4769         }
4770
4771         if (changed) {
4772                 if (hdev->set_quality_report)
4773                         err = hdev->set_quality_report(hdev, val);
4774                 else
4775                         err = aosp_set_quality_report(hdev, val);
4776
4777                 if (err) {
4778                         err = mgmt_cmd_status(sk, hdev->id,
4779                                               MGMT_OP_SET_EXP_FEATURE,
4780                                               MGMT_STATUS_FAILED);
4781                         goto unlock_quality_report;
4782                 }
4783
4784                 if (val)
4785                         hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4786                 else
4787                         hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4788         }
4789
4790         bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4791
4792         memcpy(rp.uuid, quality_report_uuid, 16);
4793         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4794         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4795
4796         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4797                                 &rp, sizeof(rp));
4798
4799         if (changed)
4800                 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4801
4802 unlock_quality_report:
4803         hci_req_sync_unlock(hdev);
4804         return err;
4805 }
4806
4807 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4808                                   struct mgmt_cp_set_exp_feature *cp,
4809                                   u16 data_len)
4810 {
4811         bool val, changed;
4812         int err;
4813         struct mgmt_rp_set_exp_feature rp;
4814
4815         /* Command requires to use a valid controller index */
4816         if (!hdev)
4817                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4818                                        MGMT_OP_SET_EXP_FEATURE,
4819                                        MGMT_STATUS_INVALID_INDEX);
4820
4821         /* Parameters are limited to a single octet */
4822         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4823                 return mgmt_cmd_status(sk, hdev->id,
4824                                        MGMT_OP_SET_EXP_FEATURE,
4825                                        MGMT_STATUS_INVALID_PARAMS);
4826
4827         /* Only boolean on/off is supported */
4828         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4829                 return mgmt_cmd_status(sk, hdev->id,
4830                                        MGMT_OP_SET_EXP_FEATURE,
4831                                        MGMT_STATUS_INVALID_PARAMS);
4832
4833         val = !!cp->param[0];
4834         changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4835
4836         if (!hdev->get_data_path_id) {
4837                 return mgmt_cmd_status(sk, hdev->id,
4838                                        MGMT_OP_SET_EXP_FEATURE,
4839                                        MGMT_STATUS_NOT_SUPPORTED);
4840         }
4841
4842         if (changed) {
4843                 if (val)
4844                         hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4845                 else
4846                         hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4847         }
4848
4849         bt_dev_info(hdev, "offload codecs enable %d changed %d",
4850                     val, changed);
4851
4852         memcpy(rp.uuid, offload_codecs_uuid, 16);
4853         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4854         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4855         err = mgmt_cmd_complete(sk, hdev->id,
4856                                 MGMT_OP_SET_EXP_FEATURE, 0,
4857                                 &rp, sizeof(rp));
4858
4859         if (changed)
4860                 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4861
4862         return err;
4863 }
4864
4865 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4866                                           struct mgmt_cp_set_exp_feature *cp,
4867                                           u16 data_len)
4868 {
4869         bool val, changed;
4870         int err;
4871         struct mgmt_rp_set_exp_feature rp;
4872
4873         /* Command requires to use a valid controller index */
4874         if (!hdev)
4875                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4876                                        MGMT_OP_SET_EXP_FEATURE,
4877                                        MGMT_STATUS_INVALID_INDEX);
4878
4879         /* Parameters are limited to a single octet */
4880         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4881                 return mgmt_cmd_status(sk, hdev->id,
4882                                        MGMT_OP_SET_EXP_FEATURE,
4883                                        MGMT_STATUS_INVALID_PARAMS);
4884
4885         /* Only boolean on/off is supported */
4886         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4887                 return mgmt_cmd_status(sk, hdev->id,
4888                                        MGMT_OP_SET_EXP_FEATURE,
4889                                        MGMT_STATUS_INVALID_PARAMS);
4890
4891         val = !!cp->param[0];
4892         changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4893
4894         if (!hci_dev_le_state_simultaneous(hdev)) {
4895                 return mgmt_cmd_status(sk, hdev->id,
4896                                        MGMT_OP_SET_EXP_FEATURE,
4897                                        MGMT_STATUS_NOT_SUPPORTED);
4898         }
4899
4900         if (changed) {
4901                 if (val)
4902                         hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4903                 else
4904                         hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4905         }
4906
4907         bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4908                     val, changed);
4909
4910         memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4911         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4912         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4913         err = mgmt_cmd_complete(sk, hdev->id,
4914                                 MGMT_OP_SET_EXP_FEATURE, 0,
4915                                 &rp, sizeof(rp));
4916
4917         if (changed)
4918                 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4919
4920         return err;
4921 }
4922
4923 #ifdef CONFIG_BT_LE
4924 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4925                                struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4926 {
4927         struct mgmt_rp_set_exp_feature rp;
4928         bool val, changed = false;
4929         int err;
4930
4931         /* Command requires to use the non-controller index */
4932         if (hdev)
4933                 return mgmt_cmd_status(sk, hdev->id,
4934                                        MGMT_OP_SET_EXP_FEATURE,
4935                                        MGMT_STATUS_INVALID_INDEX);
4936
4937         /* Parameters are limited to a single octet */
4938         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4939                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4940                                        MGMT_OP_SET_EXP_FEATURE,
4941                                        MGMT_STATUS_INVALID_PARAMS);
4942
4943         /* Only boolean on/off is supported */
4944         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4945                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4946                                        MGMT_OP_SET_EXP_FEATURE,
4947                                        MGMT_STATUS_INVALID_PARAMS);
4948
4949         val = cp->param[0] ? true : false;
4950         if (val)
4951                 err = iso_init();
4952         else
4953                 err = iso_exit();
4954
4955         if (!err)
4956                 changed = true;
4957
4958         memcpy(rp.uuid, iso_socket_uuid, 16);
4959         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4960
4961         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4962
4963         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4964                                 MGMT_OP_SET_EXP_FEATURE, 0,
4965                                 &rp, sizeof(rp));
4966
4967         if (changed)
4968                 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4969
4970         return err;
4971 }
4972 #endif
4973
4974 static const struct mgmt_exp_feature {
4975         const u8 *uuid;
4976         int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4977                         struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4978 } exp_features[] = {
4979         EXP_FEAT(ZERO_KEY, set_zero_key_func),
4980 #ifdef CONFIG_BT_FEATURE_DEBUG
4981         EXP_FEAT(debug_uuid, set_debug_func),
4982 #endif
4983         EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4984         EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4985         EXP_FEAT(quality_report_uuid, set_quality_report_func),
4986         EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4987         EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4988 #ifdef CONFIG_BT_LE
4989         EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4990 #endif
4991
4992         /* end with a null feature */
4993         EXP_FEAT(NULL, NULL)
4994 };
4995
4996 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4997                            void *data, u16 data_len)
4998 {
4999         struct mgmt_cp_set_exp_feature *cp = data;
5000         size_t i = 0;
5001
5002         bt_dev_dbg(hdev, "sock %p", sk);
5003
5004         for (i = 0; exp_features[i].uuid; i++) {
5005                 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5006                         return exp_features[i].set_func(sk, hdev, cp, data_len);
5007         }
5008
5009         return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5010                                MGMT_OP_SET_EXP_FEATURE,
5011                                MGMT_STATUS_NOT_SUPPORTED);
5012 }
5013
5014 static u32 get_params_flags(struct hci_dev *hdev,
5015                             struct hci_conn_params *params)
5016 {
5017         u32 flags = hdev->conn_flags;
5018
5019         /* Devices using RPAs can only be programmed in the acceptlist if
5020          * LL Privacy has been enable otherwise they cannot mark
5021          * HCI_CONN_FLAG_REMOTE_WAKEUP.
5022          */
5023         if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5024             hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
5025                 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5026
5027         return flags;
5028 }
5029
5030 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5031                             u16 data_len)
5032 {
5033         struct mgmt_cp_get_device_flags *cp = data;
5034         struct mgmt_rp_get_device_flags rp;
5035         struct bdaddr_list_with_flags *br_params;
5036         struct hci_conn_params *params;
5037         u32 supported_flags;
5038         u32 current_flags = 0;
5039         u8 status = MGMT_STATUS_INVALID_PARAMS;
5040
5041         bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5042                    &cp->addr.bdaddr, cp->addr.type);
5043
5044         hci_dev_lock(hdev);
5045
5046         supported_flags = hdev->conn_flags;
5047
5048         memset(&rp, 0, sizeof(rp));
5049
5050         if (cp->addr.type == BDADDR_BREDR) {
5051                 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5052                                                               &cp->addr.bdaddr,
5053                                                               cp->addr.type);
5054                 if (!br_params)
5055                         goto done;
5056
5057                 current_flags = br_params->flags;
5058         } else {
5059                 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5060                                                 le_addr_type(cp->addr.type));
5061                 if (!params)
5062                         goto done;
5063
5064                 supported_flags = get_params_flags(hdev, params);
5065                 current_flags = params->flags;
5066         }
5067
5068         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5069         rp.addr.type = cp->addr.type;
5070         rp.supported_flags = cpu_to_le32(supported_flags);
5071         rp.current_flags = cpu_to_le32(current_flags);
5072
5073         status = MGMT_STATUS_SUCCESS;
5074
5075 done:
5076         hci_dev_unlock(hdev);
5077
5078         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5079                                 &rp, sizeof(rp));
5080 }
5081
5082 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5083                                  bdaddr_t *bdaddr, u8 bdaddr_type,
5084                                  u32 supported_flags, u32 current_flags)
5085 {
5086         struct mgmt_ev_device_flags_changed ev;
5087
5088         bacpy(&ev.addr.bdaddr, bdaddr);
5089         ev.addr.type = bdaddr_type;
5090         ev.supported_flags = cpu_to_le32(supported_flags);
5091         ev.current_flags = cpu_to_le32(current_flags);
5092
5093         mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5094 }
5095
5096 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5097                             u16 len)
5098 {
5099         struct mgmt_cp_set_device_flags *cp = data;
5100         struct bdaddr_list_with_flags *br_params;
5101         struct hci_conn_params *params;
5102         u8 status = MGMT_STATUS_INVALID_PARAMS;
5103         u32 supported_flags;
5104         u32 current_flags = __le32_to_cpu(cp->current_flags);
5105
5106         bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5107                    &cp->addr.bdaddr, cp->addr.type, current_flags);
5108
5109         // We should take hci_dev_lock() early, I think.. conn_flags can change
5110         supported_flags = hdev->conn_flags;
5111
5112         if ((supported_flags | current_flags) != supported_flags) {
5113                 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5114                             current_flags, supported_flags);
5115                 goto done;
5116         }
5117
5118         hci_dev_lock(hdev);
5119
5120         if (cp->addr.type == BDADDR_BREDR) {
5121                 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5122                                                               &cp->addr.bdaddr,
5123                                                               cp->addr.type);
5124
5125                 if (br_params) {
5126                         br_params->flags = current_flags;
5127                         status = MGMT_STATUS_SUCCESS;
5128                 } else {
5129                         bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5130                                     &cp->addr.bdaddr, cp->addr.type);
5131                 }
5132
5133                 goto unlock;
5134         }
5135
5136         params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5137                                         le_addr_type(cp->addr.type));
5138         if (!params) {
5139                 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5140                             &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5141                 goto unlock;
5142         }
5143
5144         supported_flags = get_params_flags(hdev, params);
5145
5146         if ((supported_flags | current_flags) != supported_flags) {
5147                 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5148                             current_flags, supported_flags);
5149                 goto unlock;
5150         }
5151
5152         params->flags = current_flags;
5153         status = MGMT_STATUS_SUCCESS;
5154
5155         /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5156          * has been set.
5157          */
5158         if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5159                 hci_update_passive_scan(hdev);
5160
5161 unlock:
5162         hci_dev_unlock(hdev);
5163
5164 done:
5165         if (status == MGMT_STATUS_SUCCESS)
5166                 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5167                                      supported_flags, current_flags);
5168
5169         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5170                                  &cp->addr, sizeof(cp->addr));
5171 }
5172
5173 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5174                                    u16 handle)
5175 {
5176         struct mgmt_ev_adv_monitor_added ev;
5177
5178         ev.monitor_handle = cpu_to_le16(handle);
5179
5180         mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5181 }
5182
5183 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5184 {
5185         struct mgmt_ev_adv_monitor_removed ev;
5186         struct mgmt_pending_cmd *cmd;
5187         struct sock *sk_skip = NULL;
5188         struct mgmt_cp_remove_adv_monitor *cp;
5189
5190         cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5191         if (cmd) {
5192                 cp = cmd->param;
5193
5194                 if (cp->monitor_handle)
5195                         sk_skip = cmd->sk;
5196         }
5197
5198         ev.monitor_handle = cpu_to_le16(handle);
5199
5200         mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5201 }
5202
5203 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5204                                  void *data, u16 len)
5205 {
5206         struct adv_monitor *monitor = NULL;
5207         struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5208         int handle, err;
5209         size_t rp_size = 0;
5210         __u32 supported = 0;
5211         __u32 enabled = 0;
5212         __u16 num_handles = 0;
5213         __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5214
5215         BT_DBG("request for %s", hdev->name);
5216
5217         hci_dev_lock(hdev);
5218
5219         if (msft_monitor_supported(hdev))
5220                 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5221
5222         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5223                 handles[num_handles++] = monitor->handle;
5224
5225         hci_dev_unlock(hdev);
5226
5227         rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5228         rp = kmalloc(rp_size, GFP_KERNEL);
5229         if (!rp)
5230                 return -ENOMEM;
5231
5232         /* All supported features are currently enabled */
5233         enabled = supported;
5234
5235         rp->supported_features = cpu_to_le32(supported);
5236         rp->enabled_features = cpu_to_le32(enabled);
5237         rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5238         rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5239         rp->num_handles = cpu_to_le16(num_handles);
5240         if (num_handles)
5241                 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5242
5243         err = mgmt_cmd_complete(sk, hdev->id,
5244                                 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5245                                 MGMT_STATUS_SUCCESS, rp, rp_size);
5246
5247         kfree(rp);
5248
5249         return err;
5250 }
5251
5252 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5253                                                    void *data, int status)
5254 {
5255         struct mgmt_rp_add_adv_patterns_monitor rp;
5256         struct mgmt_pending_cmd *cmd = data;
5257         struct adv_monitor *monitor = cmd->user_data;
5258
5259         hci_dev_lock(hdev);
5260
5261         rp.monitor_handle = cpu_to_le16(monitor->handle);
5262
5263         if (!status) {
5264                 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5265                 hdev->adv_monitors_cnt++;
5266                 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5267                         monitor->state = ADV_MONITOR_STATE_REGISTERED;
5268                 hci_update_passive_scan(hdev);
5269         }
5270
5271         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5272                           mgmt_status(status), &rp, sizeof(rp));
5273         mgmt_pending_remove(cmd);
5274
5275         hci_dev_unlock(hdev);
5276         bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5277                    rp.monitor_handle, status);
5278 }
5279
5280 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5281 {
5282         struct mgmt_pending_cmd *cmd = data;
5283         struct adv_monitor *monitor = cmd->user_data;
5284
5285         return hci_add_adv_monitor(hdev, monitor);
5286 }
5287
5288 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5289                                       struct adv_monitor *m, u8 status,
5290                                       void *data, u16 len, u16 op)
5291 {
5292         struct mgmt_pending_cmd *cmd;
5293         int err;
5294
5295         hci_dev_lock(hdev);
5296
5297         if (status)
5298                 goto unlock;
5299
5300         if (pending_find(MGMT_OP_SET_LE, hdev) ||
5301             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5302             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5303             pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5304                 status = MGMT_STATUS_BUSY;
5305                 goto unlock;
5306         }
5307
5308         cmd = mgmt_pending_add(sk, op, hdev, data, len);
5309         if (!cmd) {
5310                 status = MGMT_STATUS_NO_RESOURCES;
5311                 goto unlock;
5312         }
5313
5314         cmd->user_data = m;
5315         err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5316                                  mgmt_add_adv_patterns_monitor_complete);
5317         if (err) {
5318                 if (err == -ENOMEM)
5319                         status = MGMT_STATUS_NO_RESOURCES;
5320                 else
5321                         status = MGMT_STATUS_FAILED;
5322
5323                 goto unlock;
5324         }
5325
5326         hci_dev_unlock(hdev);
5327
5328         return 0;
5329
5330 unlock:
5331         hci_free_adv_monitor(hdev, m);
5332         hci_dev_unlock(hdev);
5333         return mgmt_cmd_status(sk, hdev->id, op, status);
5334 }
5335
5336 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5337                                    struct mgmt_adv_rssi_thresholds *rssi)
5338 {
5339         if (rssi) {
5340                 m->rssi.low_threshold = rssi->low_threshold;
5341                 m->rssi.low_threshold_timeout =
5342                     __le16_to_cpu(rssi->low_threshold_timeout);
5343                 m->rssi.high_threshold = rssi->high_threshold;
5344                 m->rssi.high_threshold_timeout =
5345                     __le16_to_cpu(rssi->high_threshold_timeout);
5346                 m->rssi.sampling_period = rssi->sampling_period;
5347         } else {
5348                 /* Default values. These numbers are the least constricting
5349                  * parameters for MSFT API to work, so it behaves as if there
5350                  * are no rssi parameter to consider. May need to be changed
5351                  * if other API are to be supported.
5352                  */
5353                 m->rssi.low_threshold = -127;
5354                 m->rssi.low_threshold_timeout = 60;
5355                 m->rssi.high_threshold = -127;
5356                 m->rssi.high_threshold_timeout = 0;
5357                 m->rssi.sampling_period = 0;
5358         }
5359 }
5360
5361 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5362                                     struct mgmt_adv_pattern *patterns)
5363 {
5364         u8 offset = 0, length = 0;
5365         struct adv_pattern *p = NULL;
5366         int i;
5367
5368         for (i = 0; i < pattern_count; i++) {
5369                 offset = patterns[i].offset;
5370                 length = patterns[i].length;
5371                 if (offset >= HCI_MAX_AD_LENGTH ||
5372                     length > HCI_MAX_AD_LENGTH ||
5373                     (offset + length) > HCI_MAX_AD_LENGTH)
5374                         return MGMT_STATUS_INVALID_PARAMS;
5375
5376                 p = kmalloc(sizeof(*p), GFP_KERNEL);
5377                 if (!p)
5378                         return MGMT_STATUS_NO_RESOURCES;
5379
5380                 p->ad_type = patterns[i].ad_type;
5381                 p->offset = patterns[i].offset;
5382                 p->length = patterns[i].length;
5383                 memcpy(p->value, patterns[i].value, p->length);
5384
5385                 INIT_LIST_HEAD(&p->list);
5386                 list_add(&p->list, &m->patterns);
5387         }
5388
5389         return MGMT_STATUS_SUCCESS;
5390 }
5391
5392 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5393                                     void *data, u16 len)
5394 {
5395         struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5396         struct adv_monitor *m = NULL;
5397         u8 status = MGMT_STATUS_SUCCESS;
5398         size_t expected_size = sizeof(*cp);
5399
5400         BT_DBG("request for %s", hdev->name);
5401
5402         if (len <= sizeof(*cp)) {
5403                 status = MGMT_STATUS_INVALID_PARAMS;
5404                 goto done;
5405         }
5406
5407         expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5408         if (len != expected_size) {
5409                 status = MGMT_STATUS_INVALID_PARAMS;
5410                 goto done;
5411         }
5412
5413         m = kzalloc(sizeof(*m), GFP_KERNEL);
5414         if (!m) {
5415                 status = MGMT_STATUS_NO_RESOURCES;
5416                 goto done;
5417         }
5418
5419         INIT_LIST_HEAD(&m->patterns);
5420
5421         parse_adv_monitor_rssi(m, NULL);
5422         status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5423
5424 done:
5425         return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5426                                           MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5427 }
5428
5429 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5430                                          void *data, u16 len)
5431 {
5432         struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5433         struct adv_monitor *m = NULL;
5434         u8 status = MGMT_STATUS_SUCCESS;
5435         size_t expected_size = sizeof(*cp);
5436
5437         BT_DBG("request for %s", hdev->name);
5438
5439         if (len <= sizeof(*cp)) {
5440                 status = MGMT_STATUS_INVALID_PARAMS;
5441                 goto done;
5442         }
5443
5444         expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5445         if (len != expected_size) {
5446                 status = MGMT_STATUS_INVALID_PARAMS;
5447                 goto done;
5448         }
5449
5450         m = kzalloc(sizeof(*m), GFP_KERNEL);
5451         if (!m) {
5452                 status = MGMT_STATUS_NO_RESOURCES;
5453                 goto done;
5454         }
5455
5456         INIT_LIST_HEAD(&m->patterns);
5457
5458         parse_adv_monitor_rssi(m, &cp->rssi);
5459         status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5460
5461 done:
5462         return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5463                                          MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5464 }
5465
5466 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5467                                              void *data, int status)
5468 {
5469         struct mgmt_rp_remove_adv_monitor rp;
5470         struct mgmt_pending_cmd *cmd = data;
5471         struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5472
5473         hci_dev_lock(hdev);
5474
5475         rp.monitor_handle = cp->monitor_handle;
5476
5477         if (!status)
5478                 hci_update_passive_scan(hdev);
5479
5480         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5481                           mgmt_status(status), &rp, sizeof(rp));
5482         mgmt_pending_remove(cmd);
5483
5484         hci_dev_unlock(hdev);
5485         bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5486                    rp.monitor_handle, status);
5487 }
5488
5489 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5490 {
5491         struct mgmt_pending_cmd *cmd = data;
5492         struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5493         u16 handle = __le16_to_cpu(cp->monitor_handle);
5494
5495         if (!handle)
5496                 return hci_remove_all_adv_monitor(hdev);
5497
5498         return hci_remove_single_adv_monitor(hdev, handle);
5499 }
5500
5501 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5502                               void *data, u16 len)
5503 {
5504         struct mgmt_pending_cmd *cmd;
5505         int err, status;
5506
5507         hci_dev_lock(hdev);
5508
5509         if (pending_find(MGMT_OP_SET_LE, hdev) ||
5510             pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5511             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5512             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5513                 status = MGMT_STATUS_BUSY;
5514                 goto unlock;
5515         }
5516
5517         cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5518         if (!cmd) {
5519                 status = MGMT_STATUS_NO_RESOURCES;
5520                 goto unlock;
5521         }
5522
5523         err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5524                                  mgmt_remove_adv_monitor_complete);
5525
5526         if (err) {
5527                 mgmt_pending_remove(cmd);
5528
5529                 if (err == -ENOMEM)
5530                         status = MGMT_STATUS_NO_RESOURCES;
5531                 else
5532                         status = MGMT_STATUS_FAILED;
5533
5534                 goto unlock;
5535         }
5536
5537         hci_dev_unlock(hdev);
5538
5539         return 0;
5540
5541 unlock:
5542         hci_dev_unlock(hdev);
5543         return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5544                                status);
5545 }
5546
5547 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5548 {
5549         struct mgmt_rp_read_local_oob_data mgmt_rp;
5550         size_t rp_size = sizeof(mgmt_rp);
5551         struct mgmt_pending_cmd *cmd = data;
5552         struct sk_buff *skb = cmd->skb;
5553         u8 status = mgmt_status(err);
5554
5555         if (!status) {
5556                 if (!skb)
5557                         status = MGMT_STATUS_FAILED;
5558                 else if (IS_ERR(skb))
5559                         status = mgmt_status(PTR_ERR(skb));
5560                 else
5561                         status = mgmt_status(skb->data[0]);
5562         }
5563
5564         bt_dev_dbg(hdev, "status %d", status);
5565
5566         if (status) {
5567                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5568                 goto remove;
5569         }
5570
5571         memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5572
5573         if (!bredr_sc_enabled(hdev)) {
5574                 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5575
5576                 if (skb->len < sizeof(*rp)) {
5577                         mgmt_cmd_status(cmd->sk, hdev->id,
5578                                         MGMT_OP_READ_LOCAL_OOB_DATA,
5579                                         MGMT_STATUS_FAILED);
5580                         goto remove;
5581                 }
5582
5583                 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5584                 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5585
5586                 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5587         } else {
5588                 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5589
5590                 if (skb->len < sizeof(*rp)) {
5591                         mgmt_cmd_status(cmd->sk, hdev->id,
5592                                         MGMT_OP_READ_LOCAL_OOB_DATA,
5593                                         MGMT_STATUS_FAILED);
5594                         goto remove;
5595                 }
5596
5597                 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5598                 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5599
5600                 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5601                 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5602         }
5603
5604         mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5605                           MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5606
5607 remove:
5608         if (skb && !IS_ERR(skb))
5609                 kfree_skb(skb);
5610
5611         mgmt_pending_free(cmd);
5612 }
5613
5614 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5615 {
5616         struct mgmt_pending_cmd *cmd = data;
5617
5618         if (bredr_sc_enabled(hdev))
5619                 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5620         else
5621                 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5622
5623         if (IS_ERR(cmd->skb))
5624                 return PTR_ERR(cmd->skb);
5625         else
5626                 return 0;
5627 }
5628
5629 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5630                                void *data, u16 data_len)
5631 {
5632         struct mgmt_pending_cmd *cmd;
5633         int err;
5634
5635         bt_dev_dbg(hdev, "sock %p", sk);
5636
5637         hci_dev_lock(hdev);
5638
5639         if (!hdev_is_powered(hdev)) {
5640                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5641                                       MGMT_STATUS_NOT_POWERED);
5642                 goto unlock;
5643         }
5644
5645         if (!lmp_ssp_capable(hdev)) {
5646                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5647                                       MGMT_STATUS_NOT_SUPPORTED);
5648                 goto unlock;
5649         }
5650
5651         cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5652         if (!cmd)
5653                 err = -ENOMEM;
5654         else
5655                 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5656                                          read_local_oob_data_complete);
5657
5658         if (err < 0) {
5659                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5660                                       MGMT_STATUS_FAILED);
5661
5662                 if (cmd)
5663                         mgmt_pending_free(cmd);
5664         }
5665
5666 unlock:
5667         hci_dev_unlock(hdev);
5668         return err;
5669 }
5670
5671 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5672                                void *data, u16 len)
5673 {
5674         struct mgmt_addr_info *addr = data;
5675         int err;
5676
5677         bt_dev_dbg(hdev, "sock %p", sk);
5678
5679         if (!bdaddr_type_is_valid(addr->type))
5680                 return mgmt_cmd_complete(sk, hdev->id,
5681                                          MGMT_OP_ADD_REMOTE_OOB_DATA,
5682                                          MGMT_STATUS_INVALID_PARAMS,
5683                                          addr, sizeof(*addr));
5684
5685         hci_dev_lock(hdev);
5686
5687         if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5688                 struct mgmt_cp_add_remote_oob_data *cp = data;
5689                 u8 status;
5690
5691                 if (cp->addr.type != BDADDR_BREDR) {
5692                         err = mgmt_cmd_complete(sk, hdev->id,
5693                                                 MGMT_OP_ADD_REMOTE_OOB_DATA,
5694                                                 MGMT_STATUS_INVALID_PARAMS,
5695                                                 &cp->addr, sizeof(cp->addr));
5696                         goto unlock;
5697                 }
5698
5699                 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5700                                               cp->addr.type, cp->hash,
5701                                               cp->rand, NULL, NULL);
5702                 if (err < 0)
5703                         status = MGMT_STATUS_FAILED;
5704                 else
5705                         status = MGMT_STATUS_SUCCESS;
5706
5707                 err = mgmt_cmd_complete(sk, hdev->id,
5708                                         MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5709                                         &cp->addr, sizeof(cp->addr));
5710         } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5711                 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5712                 u8 *rand192, *hash192, *rand256, *hash256;
5713                 u8 status;
5714
5715                 if (bdaddr_type_is_le(cp->addr.type)) {
5716                         /* Enforce zero-valued 192-bit parameters as
5717                          * long as legacy SMP OOB isn't implemented.
5718                          */
5719                         if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5720                             memcmp(cp->hash192, ZERO_KEY, 16)) {
5721                                 err = mgmt_cmd_complete(sk, hdev->id,
5722                                                         MGMT_OP_ADD_REMOTE_OOB_DATA,
5723                                                         MGMT_STATUS_INVALID_PARAMS,
5724                                                         addr, sizeof(*addr));
5725                                 goto unlock;
5726                         }
5727
5728                         rand192 = NULL;
5729                         hash192 = NULL;
5730                 } else {
5731                         /* In case one of the P-192 values is set to zero,
5732                          * then just disable OOB data for P-192.
5733                          */
5734                         if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5735                             !memcmp(cp->hash192, ZERO_KEY, 16)) {
5736                                 rand192 = NULL;
5737                                 hash192 = NULL;
5738                         } else {
5739                                 rand192 = cp->rand192;
5740                                 hash192 = cp->hash192;
5741                         }
5742                 }
5743
5744                 /* In case one of the P-256 values is set to zero, then just
5745                  * disable OOB data for P-256.
5746                  */
5747                 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5748                     !memcmp(cp->hash256, ZERO_KEY, 16)) {
5749                         rand256 = NULL;
5750                         hash256 = NULL;
5751                 } else {
5752                         rand256 = cp->rand256;
5753                         hash256 = cp->hash256;
5754                 }
5755
5756                 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5757                                               cp->addr.type, hash192, rand192,
5758                                               hash256, rand256);
5759                 if (err < 0)
5760                         status = MGMT_STATUS_FAILED;
5761                 else
5762                         status = MGMT_STATUS_SUCCESS;
5763
5764                 err = mgmt_cmd_complete(sk, hdev->id,
5765                                         MGMT_OP_ADD_REMOTE_OOB_DATA,
5766                                         status, &cp->addr, sizeof(cp->addr));
5767         } else {
5768                 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5769                            len);
5770                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5771                                       MGMT_STATUS_INVALID_PARAMS);
5772         }
5773
5774 unlock:
5775         hci_dev_unlock(hdev);
5776         return err;
5777 }
5778
5779 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5780                                   void *data, u16 len)
5781 {
5782         struct mgmt_cp_remove_remote_oob_data *cp = data;
5783         u8 status;
5784         int err;
5785
5786         bt_dev_dbg(hdev, "sock %p", sk);
5787
5788         if (cp->addr.type != BDADDR_BREDR)
5789                 return mgmt_cmd_complete(sk, hdev->id,
5790                                          MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5791                                          MGMT_STATUS_INVALID_PARAMS,
5792                                          &cp->addr, sizeof(cp->addr));
5793
5794         hci_dev_lock(hdev);
5795
5796         if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5797                 hci_remote_oob_data_clear(hdev);
5798                 status = MGMT_STATUS_SUCCESS;
5799                 goto done;
5800         }
5801
5802         err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5803         if (err < 0)
5804                 status = MGMT_STATUS_INVALID_PARAMS;
5805         else
5806                 status = MGMT_STATUS_SUCCESS;
5807
5808 done:
5809         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5810                                 status, &cp->addr, sizeof(cp->addr));
5811
5812         hci_dev_unlock(hdev);
5813         return err;
5814 }
5815
5816 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5817 {
5818         struct mgmt_pending_cmd *cmd;
5819
5820         bt_dev_dbg(hdev, "status %u", status);
5821
5822         hci_dev_lock(hdev);
5823
5824         cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5825         if (!cmd)
5826                 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5827
5828         if (!cmd)
5829                 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5830
5831         if (cmd) {
5832                 cmd->cmd_complete(cmd, mgmt_status(status));
5833                 mgmt_pending_remove(cmd);
5834         }
5835
5836         hci_dev_unlock(hdev);
5837 }
5838
5839 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5840                                     uint8_t *mgmt_status)
5841 {
5842         switch (type) {
5843         case DISCOV_TYPE_LE:
5844                 *mgmt_status = mgmt_le_support(hdev);
5845                 if (*mgmt_status)
5846                         return false;
5847                 break;
5848         case DISCOV_TYPE_INTERLEAVED:
5849                 *mgmt_status = mgmt_le_support(hdev);
5850                 if (*mgmt_status)
5851                         return false;
5852                 fallthrough;
5853         case DISCOV_TYPE_BREDR:
5854                 *mgmt_status = mgmt_bredr_support(hdev);
5855                 if (*mgmt_status)
5856                         return false;
5857                 break;
5858         default:
5859                 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5860                 return false;
5861         }
5862
5863         return true;
5864 }
5865
5866 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5867 {
5868         struct mgmt_pending_cmd *cmd = data;
5869
5870         if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5871             cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5872             cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5873                 return;
5874
5875         bt_dev_dbg(hdev, "err %d", err);
5876
5877         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5878                           cmd->param, 1);
5879         mgmt_pending_remove(cmd);
5880
5881         hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5882                                 DISCOVERY_FINDING);
5883 }
5884
5885 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5886 {
5887         return hci_start_discovery_sync(hdev);
5888 }
5889
5890 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5891                                     u16 op, void *data, u16 len)
5892 {
5893         struct mgmt_cp_start_discovery *cp = data;
5894         struct mgmt_pending_cmd *cmd;
5895         u8 status;
5896         int err;
5897
5898         bt_dev_dbg(hdev, "sock %p", sk);
5899
5900         hci_dev_lock(hdev);
5901
5902         if (!hdev_is_powered(hdev)) {
5903                 err = mgmt_cmd_complete(sk, hdev->id, op,
5904                                         MGMT_STATUS_NOT_POWERED,
5905                                         &cp->type, sizeof(cp->type));
5906                 goto failed;
5907         }
5908
5909         if (hdev->discovery.state != DISCOVERY_STOPPED ||
5910             hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5911                 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5912                                         &cp->type, sizeof(cp->type));
5913                 goto failed;
5914         }
5915
5916         if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5917                 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5918                                         &cp->type, sizeof(cp->type));
5919                 goto failed;
5920         }
5921
5922         /* Can't start discovery when it is paused */
5923         if (hdev->discovery_paused) {
5924                 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5925                                         &cp->type, sizeof(cp->type));
5926                 goto failed;
5927         }
5928
5929         /* Clear the discovery filter first to free any previously
5930          * allocated memory for the UUID list.
5931          */
5932         hci_discovery_filter_clear(hdev);
5933
5934         hdev->discovery.type = cp->type;
5935         hdev->discovery.report_invalid_rssi = false;
5936         if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5937                 hdev->discovery.limited = true;
5938         else
5939                 hdev->discovery.limited = false;
5940
5941         cmd = mgmt_pending_add(sk, op, hdev, data, len);
5942         if (!cmd) {
5943                 err = -ENOMEM;
5944                 goto failed;
5945         }
5946
5947         err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5948                                  start_discovery_complete);
5949         if (err < 0) {
5950                 mgmt_pending_remove(cmd);
5951                 goto failed;
5952         }
5953
5954         hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5955
5956 failed:
5957         hci_dev_unlock(hdev);
5958         return err;
5959 }
5960
5961 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5962                            void *data, u16 len)
5963 {
5964         return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5965                                         data, len);
5966 }
5967
5968 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5969                                    void *data, u16 len)
5970 {
5971         return start_discovery_internal(sk, hdev,
5972                                         MGMT_OP_START_LIMITED_DISCOVERY,
5973                                         data, len);
5974 }
5975
5976 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5977                                    void *data, u16 len)
5978 {
5979         struct mgmt_cp_start_service_discovery *cp = data;
5980         struct mgmt_pending_cmd *cmd;
5981         const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5982         u16 uuid_count, expected_len;
5983         u8 status;
5984         int err;
5985
5986         bt_dev_dbg(hdev, "sock %p", sk);
5987
5988         hci_dev_lock(hdev);
5989
5990         if (!hdev_is_powered(hdev)) {
5991                 err = mgmt_cmd_complete(sk, hdev->id,
5992                                         MGMT_OP_START_SERVICE_DISCOVERY,
5993                                         MGMT_STATUS_NOT_POWERED,
5994                                         &cp->type, sizeof(cp->type));
5995                 goto failed;
5996         }
5997
5998         if (hdev->discovery.state != DISCOVERY_STOPPED ||
5999             hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6000                 err = mgmt_cmd_complete(sk, hdev->id,
6001                                         MGMT_OP_START_SERVICE_DISCOVERY,
6002                                         MGMT_STATUS_BUSY, &cp->type,
6003                                         sizeof(cp->type));
6004                 goto failed;
6005         }
6006
6007         if (hdev->discovery_paused) {
6008                 err = mgmt_cmd_complete(sk, hdev->id,
6009                                         MGMT_OP_START_SERVICE_DISCOVERY,
6010                                         MGMT_STATUS_BUSY, &cp->type,
6011                                         sizeof(cp->type));
6012                 goto failed;
6013         }
6014
6015         uuid_count = __le16_to_cpu(cp->uuid_count);
6016         if (uuid_count > max_uuid_count) {
6017                 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6018                            uuid_count);
6019                 err = mgmt_cmd_complete(sk, hdev->id,
6020                                         MGMT_OP_START_SERVICE_DISCOVERY,
6021                                         MGMT_STATUS_INVALID_PARAMS, &cp->type,
6022                                         sizeof(cp->type));
6023                 goto failed;
6024         }
6025
6026         expected_len = sizeof(*cp) + uuid_count * 16;
6027         if (expected_len != len) {
6028                 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6029                            expected_len, len);
6030                 err = mgmt_cmd_complete(sk, hdev->id,
6031                                         MGMT_OP_START_SERVICE_DISCOVERY,
6032                                         MGMT_STATUS_INVALID_PARAMS, &cp->type,
6033                                         sizeof(cp->type));
6034                 goto failed;
6035         }
6036
6037         if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6038                 err = mgmt_cmd_complete(sk, hdev->id,
6039                                         MGMT_OP_START_SERVICE_DISCOVERY,
6040                                         status, &cp->type, sizeof(cp->type));
6041                 goto failed;
6042         }
6043
6044         cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6045                                hdev, data, len);
6046         if (!cmd) {
6047                 err = -ENOMEM;
6048                 goto failed;
6049         }
6050
6051         /* Clear the discovery filter first to free any previously
6052          * allocated memory for the UUID list.
6053          */
6054         hci_discovery_filter_clear(hdev);
6055
6056         hdev->discovery.result_filtering = true;
6057         hdev->discovery.type = cp->type;
6058         hdev->discovery.rssi = cp->rssi;
6059         hdev->discovery.uuid_count = uuid_count;
6060
6061         if (uuid_count > 0) {
6062                 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6063                                                 GFP_KERNEL);
6064                 if (!hdev->discovery.uuids) {
6065                         err = mgmt_cmd_complete(sk, hdev->id,
6066                                                 MGMT_OP_START_SERVICE_DISCOVERY,
6067                                                 MGMT_STATUS_FAILED,
6068                                                 &cp->type, sizeof(cp->type));
6069                         mgmt_pending_remove(cmd);
6070                         goto failed;
6071                 }
6072         }
6073
6074         err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6075                                  start_discovery_complete);
6076         if (err < 0) {
6077                 mgmt_pending_remove(cmd);
6078                 goto failed;
6079         }
6080
6081         hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6082
6083 failed:
6084         hci_dev_unlock(hdev);
6085         return err;
6086 }
6087
6088 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6089 {
6090         struct mgmt_pending_cmd *cmd;
6091
6092         bt_dev_dbg(hdev, "status %u", status);
6093
6094         hci_dev_lock(hdev);
6095
6096         cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6097         if (cmd) {
6098                 cmd->cmd_complete(cmd, mgmt_status(status));
6099                 mgmt_pending_remove(cmd);
6100         }
6101
6102         hci_dev_unlock(hdev);
6103 }
6104
6105 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6106 {
6107         struct mgmt_pending_cmd *cmd = data;
6108
6109         if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6110                 return;
6111
6112         bt_dev_dbg(hdev, "err %d", err);
6113
6114         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6115                           cmd->param, 1);
6116         mgmt_pending_remove(cmd);
6117
6118         if (!err)
6119                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6120 }
6121
6122 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6123 {
6124         return hci_stop_discovery_sync(hdev);
6125 }
6126
6127 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6128                           u16 len)
6129 {
6130         struct mgmt_cp_stop_discovery *mgmt_cp = data;
6131         struct mgmt_pending_cmd *cmd;
6132         int err;
6133
6134         bt_dev_dbg(hdev, "sock %p", sk);
6135
6136         hci_dev_lock(hdev);
6137
6138         if (!hci_discovery_active(hdev)) {
6139                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6140                                         MGMT_STATUS_REJECTED, &mgmt_cp->type,
6141                                         sizeof(mgmt_cp->type));
6142                 goto unlock;
6143         }
6144
6145         if (hdev->discovery.type != mgmt_cp->type) {
6146                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6147                                         MGMT_STATUS_INVALID_PARAMS,
6148                                         &mgmt_cp->type, sizeof(mgmt_cp->type));
6149                 goto unlock;
6150         }
6151
6152         cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6153         if (!cmd) {
6154                 err = -ENOMEM;
6155                 goto unlock;
6156         }
6157
6158         err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6159                                  stop_discovery_complete);
6160         if (err < 0) {
6161                 mgmt_pending_remove(cmd);
6162                 goto unlock;
6163         }
6164
6165         hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6166
6167 unlock:
6168         hci_dev_unlock(hdev);
6169         return err;
6170 }
6171
6172 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6173                         u16 len)
6174 {
6175         struct mgmt_cp_confirm_name *cp = data;
6176         struct inquiry_entry *e;
6177         int err;
6178
6179         bt_dev_dbg(hdev, "sock %p", sk);
6180
6181         hci_dev_lock(hdev);
6182
6183         if (!hci_discovery_active(hdev)) {
6184                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6185                                         MGMT_STATUS_FAILED, &cp->addr,
6186                                         sizeof(cp->addr));
6187                 goto failed;
6188         }
6189
6190         e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6191         if (!e) {
6192                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6193                                         MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6194                                         sizeof(cp->addr));
6195                 goto failed;
6196         }
6197
6198         if (cp->name_known) {
6199                 e->name_state = NAME_KNOWN;
6200                 list_del(&e->list);
6201         } else {
6202                 e->name_state = NAME_NEEDED;
6203                 hci_inquiry_cache_update_resolve(hdev, e);
6204         }
6205
6206         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6207                                 &cp->addr, sizeof(cp->addr));
6208
6209 failed:
6210         hci_dev_unlock(hdev);
6211         return err;
6212 }
6213
6214 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6215                         u16 len)
6216 {
6217         struct mgmt_cp_block_device *cp = data;
6218         u8 status;
6219         int err;
6220
6221         bt_dev_dbg(hdev, "sock %p", sk);
6222
6223         if (!bdaddr_type_is_valid(cp->addr.type))
6224                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6225                                          MGMT_STATUS_INVALID_PARAMS,
6226                                          &cp->addr, sizeof(cp->addr));
6227
6228         hci_dev_lock(hdev);
6229
6230         err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6231                                   cp->addr.type);
6232         if (err < 0) {
6233                 status = MGMT_STATUS_FAILED;
6234                 goto done;
6235         }
6236
6237         mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6238                    sk);
6239         status = MGMT_STATUS_SUCCESS;
6240
6241 done:
6242         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6243                                 &cp->addr, sizeof(cp->addr));
6244
6245         hci_dev_unlock(hdev);
6246
6247         return err;
6248 }
6249
6250 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6251                           u16 len)
6252 {
6253         struct mgmt_cp_unblock_device *cp = data;
6254         u8 status;
6255         int err;
6256
6257         bt_dev_dbg(hdev, "sock %p", sk);
6258
6259         if (!bdaddr_type_is_valid(cp->addr.type))
6260                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6261                                          MGMT_STATUS_INVALID_PARAMS,
6262                                          &cp->addr, sizeof(cp->addr));
6263
6264         hci_dev_lock(hdev);
6265
6266         err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6267                                   cp->addr.type);
6268         if (err < 0) {
6269                 status = MGMT_STATUS_INVALID_PARAMS;
6270                 goto done;
6271         }
6272
6273         mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6274                    sk);
6275         status = MGMT_STATUS_SUCCESS;
6276
6277 done:
6278         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6279                                 &cp->addr, sizeof(cp->addr));
6280
6281         hci_dev_unlock(hdev);
6282
6283         return err;
6284 }
6285
6286 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6287 {
6288         return hci_update_eir_sync(hdev);
6289 }
6290
6291 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6292                          u16 len)
6293 {
6294         struct mgmt_cp_set_device_id *cp = data;
6295         int err;
6296         __u16 source;
6297
6298         bt_dev_dbg(hdev, "sock %p", sk);
6299
6300         source = __le16_to_cpu(cp->source);
6301
6302         if (source > 0x0002)
6303                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6304                                        MGMT_STATUS_INVALID_PARAMS);
6305
6306         hci_dev_lock(hdev);
6307
6308         hdev->devid_source = source;
6309         hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6310         hdev->devid_product = __le16_to_cpu(cp->product);
6311         hdev->devid_version = __le16_to_cpu(cp->version);
6312
6313         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6314                                 NULL, 0);
6315
6316         hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6317
6318         hci_dev_unlock(hdev);
6319
6320         return err;
6321 }
6322
6323 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6324 {
6325         if (err)
6326                 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6327         else
6328                 bt_dev_dbg(hdev, "status %d", err);
6329 }
6330
6331 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6332 {
6333         struct cmd_lookup match = { NULL, hdev };
6334         u8 instance;
6335         struct adv_info *adv_instance;
6336         u8 status = mgmt_status(err);
6337
6338         if (status) {
6339                 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6340                                      cmd_status_rsp, &status);
6341                 return;
6342         }
6343
6344         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6345                 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6346         else
6347                 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6348
6349         mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6350                              &match);
6351
6352         new_settings(hdev, match.sk);
6353
6354         if (match.sk)
6355                 sock_put(match.sk);
6356
6357         /* If "Set Advertising" was just disabled and instance advertising was
6358          * set up earlier, then re-enable multi-instance advertising.
6359          */
6360         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6361             list_empty(&hdev->adv_instances))
6362                 return;
6363
6364         instance = hdev->cur_adv_instance;
6365         if (!instance) {
6366                 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6367                                                         struct adv_info, list);
6368                 if (!adv_instance)
6369                         return;
6370
6371                 instance = adv_instance->instance;
6372         }
6373
6374         err = hci_schedule_adv_instance_sync(hdev, instance, true);
6375
6376         enable_advertising_instance(hdev, err);
6377 }
6378
6379 static int set_adv_sync(struct hci_dev *hdev, void *data)
6380 {
6381         struct mgmt_pending_cmd *cmd = data;
6382         struct mgmt_mode *cp = cmd->param;
6383         u8 val = !!cp->val;
6384
6385         if (cp->val == 0x02)
6386                 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6387         else
6388                 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6389
6390         cancel_adv_timeout(hdev);
6391
6392         if (val) {
6393                 /* Switch to instance "0" for the Set Advertising setting.
6394                  * We cannot use update_[adv|scan_rsp]_data() here as the
6395                  * HCI_ADVERTISING flag is not yet set.
6396                  */
6397                 hdev->cur_adv_instance = 0x00;
6398
6399                 if (ext_adv_capable(hdev)) {
6400                         hci_start_ext_adv_sync(hdev, 0x00);
6401                 } else {
6402                         hci_update_adv_data_sync(hdev, 0x00);
6403                         hci_update_scan_rsp_data_sync(hdev, 0x00);
6404                         hci_enable_advertising_sync(hdev);
6405                 }
6406         } else {
6407                 hci_disable_advertising_sync(hdev);
6408         }
6409
6410         return 0;
6411 }
6412
6413 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6414                            u16 len)
6415 {
6416         struct mgmt_mode *cp = data;
6417         struct mgmt_pending_cmd *cmd;
6418         u8 val, status;
6419         int err;
6420
6421         bt_dev_dbg(hdev, "sock %p", sk);
6422
6423         status = mgmt_le_support(hdev);
6424         if (status)
6425                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6426                                        status);
6427
6428         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6429                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6430                                        MGMT_STATUS_INVALID_PARAMS);
6431
6432         if (hdev->advertising_paused)
6433                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6434                                        MGMT_STATUS_BUSY);
6435
6436         hci_dev_lock(hdev);
6437
6438         val = !!cp->val;
6439
6440         /* The following conditions are ones which mean that we should
6441          * not do any HCI communication but directly send a mgmt
6442          * response to user space (after toggling the flag if
6443          * necessary).
6444          */
6445         if (!hdev_is_powered(hdev) ||
6446             (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6447              (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6448             hci_dev_test_flag(hdev, HCI_MESH) ||
6449             hci_conn_num(hdev, LE_LINK) > 0 ||
6450             (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6451              hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6452                 bool changed;
6453
6454                 if (cp->val) {
6455                         hdev->cur_adv_instance = 0x00;
6456                         changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6457                         if (cp->val == 0x02)
6458                                 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6459                         else
6460                                 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6461                 } else {
6462                         changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6463                         hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6464                 }
6465
6466                 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6467                 if (err < 0)
6468                         goto unlock;
6469
6470                 if (changed)
6471                         err = new_settings(hdev, sk);
6472
6473                 goto unlock;
6474         }
6475
6476         if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6477             pending_find(MGMT_OP_SET_LE, hdev)) {
6478                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6479                                       MGMT_STATUS_BUSY);
6480                 goto unlock;
6481         }
6482
6483         cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6484         if (!cmd)
6485                 err = -ENOMEM;
6486         else
6487                 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6488                                          set_advertising_complete);
6489
6490         if (err < 0 && cmd)
6491                 mgmt_pending_remove(cmd);
6492
6493 unlock:
6494         hci_dev_unlock(hdev);
6495         return err;
6496 }
6497
6498 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6499                               void *data, u16 len)
6500 {
6501         struct mgmt_cp_set_static_address *cp = data;
6502         int err;
6503
6504         bt_dev_dbg(hdev, "sock %p", sk);
6505
6506         if (!lmp_le_capable(hdev))
6507                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6508                                        MGMT_STATUS_NOT_SUPPORTED);
6509
6510         if (hdev_is_powered(hdev))
6511                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6512                                        MGMT_STATUS_REJECTED);
6513
6514         if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6515                 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6516                         return mgmt_cmd_status(sk, hdev->id,
6517                                                MGMT_OP_SET_STATIC_ADDRESS,
6518                                                MGMT_STATUS_INVALID_PARAMS);
6519
6520                 /* Two most significant bits shall be set */
6521                 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6522                         return mgmt_cmd_status(sk, hdev->id,
6523                                                MGMT_OP_SET_STATIC_ADDRESS,
6524                                                MGMT_STATUS_INVALID_PARAMS);
6525         }
6526
6527         hci_dev_lock(hdev);
6528
6529         bacpy(&hdev->static_addr, &cp->bdaddr);
6530
6531         err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6532         if (err < 0)
6533                 goto unlock;
6534
6535         err = new_settings(hdev, sk);
6536
6537 unlock:
6538         hci_dev_unlock(hdev);
6539         return err;
6540 }
6541
6542 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6543                            void *data, u16 len)
6544 {
6545         struct mgmt_cp_set_scan_params *cp = data;
6546         __u16 interval, window;
6547         int err;
6548
6549         bt_dev_dbg(hdev, "sock %p", sk);
6550
6551         if (!lmp_le_capable(hdev))
6552                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6553                                        MGMT_STATUS_NOT_SUPPORTED);
6554
6555         interval = __le16_to_cpu(cp->interval);
6556
6557         if (interval < 0x0004 || interval > 0x4000)
6558                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6559                                        MGMT_STATUS_INVALID_PARAMS);
6560
6561         window = __le16_to_cpu(cp->window);
6562
6563         if (window < 0x0004 || window > 0x4000)
6564                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6565                                        MGMT_STATUS_INVALID_PARAMS);
6566
6567         if (window > interval)
6568                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6569                                        MGMT_STATUS_INVALID_PARAMS);
6570
6571         hci_dev_lock(hdev);
6572
6573         hdev->le_scan_interval = interval;
6574         hdev->le_scan_window = window;
6575
6576         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6577                                 NULL, 0);
6578
6579         /* If background scan is running, restart it so new parameters are
6580          * loaded.
6581          */
6582         if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6583             hdev->discovery.state == DISCOVERY_STOPPED)
6584                 hci_update_passive_scan(hdev);
6585
6586         hci_dev_unlock(hdev);
6587
6588         return err;
6589 }
6590
6591 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6592 {
6593         struct mgmt_pending_cmd *cmd = data;
6594
6595         bt_dev_dbg(hdev, "err %d", err);
6596
6597         if (err) {
6598                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6599                                 mgmt_status(err));
6600         } else {
6601                 struct mgmt_mode *cp = cmd->param;
6602
6603                 if (cp->val)
6604                         hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6605                 else
6606                         hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6607
6608                 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6609                 new_settings(hdev, cmd->sk);
6610         }
6611
6612         mgmt_pending_free(cmd);
6613 }
6614
6615 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6616 {
6617         struct mgmt_pending_cmd *cmd = data;
6618         struct mgmt_mode *cp = cmd->param;
6619
6620         return hci_write_fast_connectable_sync(hdev, cp->val);
6621 }
6622
6623 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6624                                 void *data, u16 len)
6625 {
6626         struct mgmt_mode *cp = data;
6627         struct mgmt_pending_cmd *cmd;
6628         int err;
6629
6630         bt_dev_dbg(hdev, "sock %p", sk);
6631
6632         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6633             hdev->hci_ver < BLUETOOTH_VER_1_2)
6634                 return mgmt_cmd_status(sk, hdev->id,
6635                                        MGMT_OP_SET_FAST_CONNECTABLE,
6636                                        MGMT_STATUS_NOT_SUPPORTED);
6637
6638         if (cp->val != 0x00 && cp->val != 0x01)
6639                 return mgmt_cmd_status(sk, hdev->id,
6640                                        MGMT_OP_SET_FAST_CONNECTABLE,
6641                                        MGMT_STATUS_INVALID_PARAMS);
6642
6643         hci_dev_lock(hdev);
6644
6645         if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6646                 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6647                 goto unlock;
6648         }
6649
6650         if (!hdev_is_powered(hdev)) {
6651                 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6652                 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6653                 new_settings(hdev, sk);
6654                 goto unlock;
6655         }
6656
6657         cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6658                                len);
6659         if (!cmd)
6660                 err = -ENOMEM;
6661         else
6662                 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6663                                          fast_connectable_complete);
6664
6665         if (err < 0) {
6666                 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6667                                 MGMT_STATUS_FAILED);
6668
6669                 if (cmd)
6670                         mgmt_pending_free(cmd);
6671         }
6672
6673 unlock:
6674         hci_dev_unlock(hdev);
6675
6676         return err;
6677 }
6678
6679 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6680 {
6681         struct mgmt_pending_cmd *cmd = data;
6682
6683         bt_dev_dbg(hdev, "err %d", err);
6684
6685         if (err) {
6686                 u8 mgmt_err = mgmt_status(err);
6687
6688                 /* We need to restore the flag if related HCI commands
6689                  * failed.
6690                  */
6691                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6692
6693                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6694         } else {
6695                 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6696                 new_settings(hdev, cmd->sk);
6697         }
6698
6699         mgmt_pending_free(cmd);
6700 }
6701
6702 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6703 {
6704         int status;
6705
6706         status = hci_write_fast_connectable_sync(hdev, false);
6707
6708         if (!status)
6709                 status = hci_update_scan_sync(hdev);
6710
6711         /* Since only the advertising data flags will change, there
6712          * is no need to update the scan response data.
6713          */
6714         if (!status)
6715                 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6716
6717         return status;
6718 }
6719
6720 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6721 {
6722         struct mgmt_mode *cp = data;
6723         struct mgmt_pending_cmd *cmd;
6724         int err;
6725
6726         bt_dev_dbg(hdev, "sock %p", sk);
6727
6728         if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6729                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6730                                        MGMT_STATUS_NOT_SUPPORTED);
6731
6732         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6733                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6734                                        MGMT_STATUS_REJECTED);
6735
6736         if (cp->val != 0x00 && cp->val != 0x01)
6737                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6738                                        MGMT_STATUS_INVALID_PARAMS);
6739
6740         hci_dev_lock(hdev);
6741
6742         if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6743                 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6744                 goto unlock;
6745         }
6746
6747         if (!hdev_is_powered(hdev)) {
6748                 if (!cp->val) {
6749                         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6750                         hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6751                         hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6752                         hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6753                         hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6754                 }
6755
6756                 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6757
6758                 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6759                 if (err < 0)
6760                         goto unlock;
6761
6762                 err = new_settings(hdev, sk);
6763                 goto unlock;
6764         }
6765
6766         /* Reject disabling when powered on */
6767         if (!cp->val) {
6768                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6769                                       MGMT_STATUS_REJECTED);
6770                 goto unlock;
6771         } else {
6772                 /* When configuring a dual-mode controller to operate
6773                  * with LE only and using a static address, then switching
6774                  * BR/EDR back on is not allowed.
6775                  *
6776                  * Dual-mode controllers shall operate with the public
6777                  * address as its identity address for BR/EDR and LE. So
6778                  * reject the attempt to create an invalid configuration.
6779                  *
6780                  * The same restrictions applies when secure connections
6781                  * has been enabled. For BR/EDR this is a controller feature
6782                  * while for LE it is a host stack feature. This means that
6783                  * switching BR/EDR back on when secure connections has been
6784                  * enabled is not a supported transaction.
6785                  */
6786                 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6787                     (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6788                      hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6789                         err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6790                                               MGMT_STATUS_REJECTED);
6791                         goto unlock;
6792                 }
6793         }
6794
6795         cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6796         if (!cmd)
6797                 err = -ENOMEM;
6798         else
6799                 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6800                                          set_bredr_complete);
6801
6802         if (err < 0) {
6803                 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6804                                 MGMT_STATUS_FAILED);
6805                 if (cmd)
6806                         mgmt_pending_free(cmd);
6807
6808                 goto unlock;
6809         }
6810
6811         /* We need to flip the bit already here so that
6812          * hci_req_update_adv_data generates the correct flags.
6813          */
6814         hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6815
6816 unlock:
6817         hci_dev_unlock(hdev);
6818         return err;
6819 }
6820
6821 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6822 {
6823         struct mgmt_pending_cmd *cmd = data;
6824         struct mgmt_mode *cp;
6825
6826         bt_dev_dbg(hdev, "err %d", err);
6827
6828         if (err) {
6829                 u8 mgmt_err = mgmt_status(err);
6830
6831                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6832                 goto done;
6833         }
6834
6835         cp = cmd->param;
6836
6837         switch (cp->val) {
6838         case 0x00:
6839                 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6840                 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6841                 break;
6842         case 0x01:
6843                 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6844                 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6845                 break;
6846         case 0x02:
6847                 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6848                 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6849                 break;
6850         }
6851
6852         send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6853         new_settings(hdev, cmd->sk);
6854
6855 done:
6856         mgmt_pending_free(cmd);
6857 }
6858
6859 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6860 {
6861         struct mgmt_pending_cmd *cmd = data;
6862         struct mgmt_mode *cp = cmd->param;
6863         u8 val = !!cp->val;
6864
6865         /* Force write of val */
6866         hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6867
6868         return hci_write_sc_support_sync(hdev, val);
6869 }
6870
6871 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6872                            void *data, u16 len)
6873 {
6874         struct mgmt_mode *cp = data;
6875         struct mgmt_pending_cmd *cmd;
6876         u8 val;
6877         int err;
6878
6879         bt_dev_dbg(hdev, "sock %p", sk);
6880
6881         if (!lmp_sc_capable(hdev) &&
6882             !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6883                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6884                                        MGMT_STATUS_NOT_SUPPORTED);
6885
6886         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6887             lmp_sc_capable(hdev) &&
6888             !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6889                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6890                                        MGMT_STATUS_REJECTED);
6891
6892         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6893                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6894                                        MGMT_STATUS_INVALID_PARAMS);
6895
6896         hci_dev_lock(hdev);
6897
6898         if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6899             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6900                 bool changed;
6901
6902                 if (cp->val) {
6903                         changed = !hci_dev_test_and_set_flag(hdev,
6904                                                              HCI_SC_ENABLED);
6905                         if (cp->val == 0x02)
6906                                 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6907                         else
6908                                 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6909                 } else {
6910                         changed = hci_dev_test_and_clear_flag(hdev,
6911                                                               HCI_SC_ENABLED);
6912                         hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6913                 }
6914
6915                 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6916                 if (err < 0)
6917                         goto failed;
6918
6919                 if (changed)
6920                         err = new_settings(hdev, sk);
6921
6922                 goto failed;
6923         }
6924
6925         val = !!cp->val;
6926
6927         if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6928             (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6929                 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6930                 goto failed;
6931         }
6932
6933         cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6934         if (!cmd)
6935                 err = -ENOMEM;
6936         else
6937                 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6938                                          set_secure_conn_complete);
6939
6940         if (err < 0) {
6941                 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6942                                 MGMT_STATUS_FAILED);
6943                 if (cmd)
6944                         mgmt_pending_free(cmd);
6945         }
6946
6947 failed:
6948         hci_dev_unlock(hdev);
6949         return err;
6950 }
6951
6952 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6953                           void *data, u16 len)
6954 {
6955         struct mgmt_mode *cp = data;
6956         bool changed, use_changed;
6957         int err;
6958
6959         bt_dev_dbg(hdev, "sock %p", sk);
6960
6961         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6962                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6963                                        MGMT_STATUS_INVALID_PARAMS);
6964
6965         hci_dev_lock(hdev);
6966
6967         if (cp->val)
6968                 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6969         else
6970                 changed = hci_dev_test_and_clear_flag(hdev,
6971                                                       HCI_KEEP_DEBUG_KEYS);
6972
6973         if (cp->val == 0x02)
6974                 use_changed = !hci_dev_test_and_set_flag(hdev,
6975                                                          HCI_USE_DEBUG_KEYS);
6976         else
6977                 use_changed = hci_dev_test_and_clear_flag(hdev,
6978                                                           HCI_USE_DEBUG_KEYS);
6979
6980         if (hdev_is_powered(hdev) && use_changed &&
6981             hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6982                 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6983                 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6984                              sizeof(mode), &mode);
6985         }
6986
6987         err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6988         if (err < 0)
6989                 goto unlock;
6990
6991         if (changed)
6992                 err = new_settings(hdev, sk);
6993
6994 unlock:
6995         hci_dev_unlock(hdev);
6996         return err;
6997 }
6998
6999 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7000                        u16 len)
7001 {
7002         struct mgmt_cp_set_privacy *cp = cp_data;
7003         bool changed;
7004         int err;
7005
7006         bt_dev_dbg(hdev, "sock %p", sk);
7007
7008         if (!lmp_le_capable(hdev))
7009                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7010                                        MGMT_STATUS_NOT_SUPPORTED);
7011
7012         if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7013                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7014                                        MGMT_STATUS_INVALID_PARAMS);
7015
7016         if (hdev_is_powered(hdev))
7017                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7018                                        MGMT_STATUS_REJECTED);
7019
7020         hci_dev_lock(hdev);
7021
7022         /* If user space supports this command it is also expected to
7023          * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7024          */
7025         hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7026
7027         if (cp->privacy) {
7028                 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7029                 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7030                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7031                 hci_adv_instances_set_rpa_expired(hdev, true);
7032                 if (cp->privacy == 0x02)
7033                         hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7034                 else
7035                         hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7036         } else {
7037                 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7038                 memset(hdev->irk, 0, sizeof(hdev->irk));
7039                 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7040                 hci_adv_instances_set_rpa_expired(hdev, false);
7041                 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7042         }
7043
7044         err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7045         if (err < 0)
7046                 goto unlock;
7047
7048         if (changed)
7049                 err = new_settings(hdev, sk);
7050
7051 unlock:
7052         hci_dev_unlock(hdev);
7053         return err;
7054 }
7055
7056 static bool irk_is_valid(struct mgmt_irk_info *irk)
7057 {
7058         switch (irk->addr.type) {
7059         case BDADDR_LE_PUBLIC:
7060                 return true;
7061
7062         case BDADDR_LE_RANDOM:
7063                 /* Two most significant bits shall be set */
7064                 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7065                         return false;
7066                 return true;
7067         }
7068
7069         return false;
7070 }
7071
7072 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7073                      u16 len)
7074 {
7075         struct mgmt_cp_load_irks *cp = cp_data;
7076         const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7077                                    sizeof(struct mgmt_irk_info));
7078         u16 irk_count, expected_len;
7079         int i, err;
7080
7081         bt_dev_dbg(hdev, "sock %p", sk);
7082
7083         if (!lmp_le_capable(hdev))
7084                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7085                                        MGMT_STATUS_NOT_SUPPORTED);
7086
7087         irk_count = __le16_to_cpu(cp->irk_count);
7088         if (irk_count > max_irk_count) {
7089                 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7090                            irk_count);
7091                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7092                                        MGMT_STATUS_INVALID_PARAMS);
7093         }
7094
7095         expected_len = struct_size(cp, irks, irk_count);
7096         if (expected_len != len) {
7097                 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7098                            expected_len, len);
7099                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7100                                        MGMT_STATUS_INVALID_PARAMS);
7101         }
7102
7103         bt_dev_dbg(hdev, "irk_count %u", irk_count);
7104
7105         for (i = 0; i < irk_count; i++) {
7106                 struct mgmt_irk_info *key = &cp->irks[i];
7107
7108                 if (!irk_is_valid(key))
7109                         return mgmt_cmd_status(sk, hdev->id,
7110                                                MGMT_OP_LOAD_IRKS,
7111                                                MGMT_STATUS_INVALID_PARAMS);
7112         }
7113
7114         hci_dev_lock(hdev);
7115
7116         hci_smp_irks_clear(hdev);
7117
7118         for (i = 0; i < irk_count; i++) {
7119                 struct mgmt_irk_info *irk = &cp->irks[i];
7120
7121                 if (hci_is_blocked_key(hdev,
7122                                        HCI_BLOCKED_KEY_TYPE_IRK,
7123                                        irk->val)) {
7124                         bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7125                                     &irk->addr.bdaddr);
7126                         continue;
7127                 }
7128
7129                 hci_add_irk(hdev, &irk->addr.bdaddr,
7130                             le_addr_type(irk->addr.type), irk->val,
7131                             BDADDR_ANY);
7132         }
7133
7134         hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7135
7136         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7137
7138         hci_dev_unlock(hdev);
7139
7140         return err;
7141 }
7142
7143 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7144 {
7145         if (key->initiator != 0x00 && key->initiator != 0x01)
7146                 return false;
7147
7148         switch (key->addr.type) {
7149         case BDADDR_LE_PUBLIC:
7150                 return true;
7151
7152         case BDADDR_LE_RANDOM:
7153                 /* Two most significant bits shall be set */
7154                 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7155                         return false;
7156                 return true;
7157         }
7158
7159         return false;
7160 }
7161
7162 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7163                                void *cp_data, u16 len)
7164 {
7165         struct mgmt_cp_load_long_term_keys *cp = cp_data;
7166         const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7167                                    sizeof(struct mgmt_ltk_info));
7168         u16 key_count, expected_len;
7169         int i, err;
7170
7171         bt_dev_dbg(hdev, "sock %p", sk);
7172
7173         if (!lmp_le_capable(hdev))
7174                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7175                                        MGMT_STATUS_NOT_SUPPORTED);
7176
7177         key_count = __le16_to_cpu(cp->key_count);
7178         if (key_count > max_key_count) {
7179                 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7180                            key_count);
7181                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7182                                        MGMT_STATUS_INVALID_PARAMS);
7183         }
7184
7185         expected_len = struct_size(cp, keys, key_count);
7186         if (expected_len != len) {
7187                 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7188                            expected_len, len);
7189                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7190                                        MGMT_STATUS_INVALID_PARAMS);
7191         }
7192
7193         bt_dev_dbg(hdev, "key_count %u", key_count);
7194
7195         for (i = 0; i < key_count; i++) {
7196                 struct mgmt_ltk_info *key = &cp->keys[i];
7197
7198                 if (!ltk_is_valid(key))
7199                         return mgmt_cmd_status(sk, hdev->id,
7200                                                MGMT_OP_LOAD_LONG_TERM_KEYS,
7201                                                MGMT_STATUS_INVALID_PARAMS);
7202         }
7203
7204         hci_dev_lock(hdev);
7205
7206         hci_smp_ltks_clear(hdev);
7207
7208         for (i = 0; i < key_count; i++) {
7209                 struct mgmt_ltk_info *key = &cp->keys[i];
7210                 u8 type, authenticated;
7211
7212                 if (hci_is_blocked_key(hdev,
7213                                        HCI_BLOCKED_KEY_TYPE_LTK,
7214                                        key->val)) {
7215                         bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7216                                     &key->addr.bdaddr);
7217                         continue;
7218                 }
7219
7220                 switch (key->type) {
7221                 case MGMT_LTK_UNAUTHENTICATED:
7222                         authenticated = 0x00;
7223                         type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7224                         break;
7225                 case MGMT_LTK_AUTHENTICATED:
7226                         authenticated = 0x01;
7227                         type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7228                         break;
7229                 case MGMT_LTK_P256_UNAUTH:
7230                         authenticated = 0x00;
7231                         type = SMP_LTK_P256;
7232                         break;
7233                 case MGMT_LTK_P256_AUTH:
7234                         authenticated = 0x01;
7235                         type = SMP_LTK_P256;
7236                         break;
7237                 case MGMT_LTK_P256_DEBUG:
7238                         authenticated = 0x00;
7239                         type = SMP_LTK_P256_DEBUG;
7240                         fallthrough;
7241                 default:
7242                         continue;
7243                 }
7244
7245                 hci_add_ltk(hdev, &key->addr.bdaddr,
7246                             le_addr_type(key->addr.type), type, authenticated,
7247                             key->val, key->enc_size, key->ediv, key->rand);
7248         }
7249
7250         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7251                            NULL, 0);
7252
7253         hci_dev_unlock(hdev);
7254
7255         return err;
7256 }
7257
7258 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7259 {
7260         struct mgmt_pending_cmd *cmd = data;
7261         struct hci_conn *conn = cmd->user_data;
7262         struct mgmt_cp_get_conn_info *cp = cmd->param;
7263         struct mgmt_rp_get_conn_info rp;
7264         u8 status;
7265
7266         bt_dev_dbg(hdev, "err %d", err);
7267
7268         memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
7269
7270         status = mgmt_status(err);
7271         if (status == MGMT_STATUS_SUCCESS) {
7272                 rp.rssi = conn->rssi;
7273                 rp.tx_power = conn->tx_power;
7274                 rp.max_tx_power = conn->max_tx_power;
7275         } else {
7276                 rp.rssi = HCI_RSSI_INVALID;
7277                 rp.tx_power = HCI_TX_POWER_INVALID;
7278                 rp.max_tx_power = HCI_TX_POWER_INVALID;
7279         }
7280
7281         mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7282                           &rp, sizeof(rp));
7283
7284         mgmt_pending_free(cmd);
7285 }
7286
7287 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7288 {
7289         struct mgmt_pending_cmd *cmd = data;
7290         struct mgmt_cp_get_conn_info *cp = cmd->param;
7291         struct hci_conn *conn;
7292         int err;
7293         __le16   handle;
7294
7295         /* Make sure we are still connected */
7296         if (cp->addr.type == BDADDR_BREDR)
7297                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7298                                                &cp->addr.bdaddr);
7299         else
7300                 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7301
7302         if (!conn || conn->state != BT_CONNECTED)
7303                 return MGMT_STATUS_NOT_CONNECTED;
7304
7305         cmd->user_data = conn;
7306         handle = cpu_to_le16(conn->handle);
7307
7308         /* Refresh RSSI each time */
7309         err = hci_read_rssi_sync(hdev, handle);
7310
7311         /* For LE links TX power does not change thus we don't need to
7312          * query for it once value is known.
7313          */
7314         if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7315                      conn->tx_power == HCI_TX_POWER_INVALID))
7316                 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7317
7318         /* Max TX power needs to be read only once per connection */
7319         if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7320                 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7321
7322         return err;
7323 }
7324
7325 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7326                          u16 len)
7327 {
7328         struct mgmt_cp_get_conn_info *cp = data;
7329         struct mgmt_rp_get_conn_info rp;
7330         struct hci_conn *conn;
7331         unsigned long conn_info_age;
7332         int err = 0;
7333
7334         bt_dev_dbg(hdev, "sock %p", sk);
7335
7336         memset(&rp, 0, sizeof(rp));
7337         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7338         rp.addr.type = cp->addr.type;
7339
7340         if (!bdaddr_type_is_valid(cp->addr.type))
7341                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7342                                          MGMT_STATUS_INVALID_PARAMS,
7343                                          &rp, sizeof(rp));
7344
7345         hci_dev_lock(hdev);
7346
7347         if (!hdev_is_powered(hdev)) {
7348                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7349                                         MGMT_STATUS_NOT_POWERED, &rp,
7350                                         sizeof(rp));
7351                 goto unlock;
7352         }
7353
7354         if (cp->addr.type == BDADDR_BREDR)
7355                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7356                                                &cp->addr.bdaddr);
7357         else
7358                 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7359
7360         if (!conn || conn->state != BT_CONNECTED) {
7361                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7362                                         MGMT_STATUS_NOT_CONNECTED, &rp,
7363                                         sizeof(rp));
7364                 goto unlock;
7365         }
7366
7367         /* To avoid client trying to guess when to poll again for information we
7368          * calculate conn info age as random value between min/max set in hdev.
7369          */
7370         conn_info_age = hdev->conn_info_min_age +
7371                         prandom_u32_max(hdev->conn_info_max_age -
7372                                         hdev->conn_info_min_age);
7373
7374         /* Query controller to refresh cached values if they are too old or were
7375          * never read.
7376          */
7377         if (time_after(jiffies, conn->conn_info_timestamp +
7378                        msecs_to_jiffies(conn_info_age)) ||
7379             !conn->conn_info_timestamp) {
7380                 struct mgmt_pending_cmd *cmd;
7381
7382                 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7383                                        len);
7384                 if (!cmd) {
7385                         err = -ENOMEM;
7386                 } else {
7387                         err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7388                                                  cmd, get_conn_info_complete);
7389                 }
7390
7391                 if (err < 0) {
7392                         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7393                                           MGMT_STATUS_FAILED, &rp, sizeof(rp));
7394
7395                         if (cmd)
7396                                 mgmt_pending_free(cmd);
7397
7398                         goto unlock;
7399                 }
7400
7401                 conn->conn_info_timestamp = jiffies;
7402         } else {
7403                 /* Cache is valid, just reply with values cached in hci_conn */
7404                 rp.rssi = conn->rssi;
7405                 rp.tx_power = conn->tx_power;
7406                 rp.max_tx_power = conn->max_tx_power;
7407
7408                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7409                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7410         }
7411
7412 unlock:
7413         hci_dev_unlock(hdev);
7414         return err;
7415 }
7416
7417 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7418 {
7419         struct mgmt_pending_cmd *cmd = data;
7420         struct mgmt_cp_get_clock_info *cp = cmd->param;
7421         struct mgmt_rp_get_clock_info rp;
7422         struct hci_conn *conn = cmd->user_data;
7423         u8 status = mgmt_status(err);
7424
7425         bt_dev_dbg(hdev, "err %d", err);
7426
7427         memset(&rp, 0, sizeof(rp));
7428         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7429         rp.addr.type = cp->addr.type;
7430
7431         if (err)
7432                 goto complete;
7433
7434         rp.local_clock = cpu_to_le32(hdev->clock);
7435
7436         if (conn) {
7437                 rp.piconet_clock = cpu_to_le32(conn->clock);
7438                 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7439         }
7440
7441 complete:
7442         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7443                           sizeof(rp));
7444
7445         mgmt_pending_free(cmd);
7446 }
7447
7448 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7449 {
7450         struct mgmt_pending_cmd *cmd = data;
7451         struct mgmt_cp_get_clock_info *cp = cmd->param;
7452         struct hci_cp_read_clock hci_cp;
7453         struct hci_conn *conn;
7454
7455         memset(&hci_cp, 0, sizeof(hci_cp));
7456         hci_read_clock_sync(hdev, &hci_cp);
7457
7458         /* Make sure connection still exists */
7459         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7460         if (!conn || conn->state != BT_CONNECTED)
7461                 return MGMT_STATUS_NOT_CONNECTED;
7462
7463         cmd->user_data = conn;
7464         hci_cp.handle = cpu_to_le16(conn->handle);
7465         hci_cp.which = 0x01; /* Piconet clock */
7466
7467         return hci_read_clock_sync(hdev, &hci_cp);
7468 }
7469
7470 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7471                                                                 u16 len)
7472 {
7473         struct mgmt_cp_get_clock_info *cp = data;
7474         struct mgmt_rp_get_clock_info rp;
7475         struct mgmt_pending_cmd *cmd;
7476         struct hci_conn *conn;
7477         int err;
7478
7479         bt_dev_dbg(hdev, "sock %p", sk);
7480
7481         memset(&rp, 0, sizeof(rp));
7482         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7483         rp.addr.type = cp->addr.type;
7484
7485         if (cp->addr.type != BDADDR_BREDR)
7486                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7487                                          MGMT_STATUS_INVALID_PARAMS,
7488                                          &rp, sizeof(rp));
7489
7490         hci_dev_lock(hdev);
7491
7492         if (!hdev_is_powered(hdev)) {
7493                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7494                                         MGMT_STATUS_NOT_POWERED, &rp,
7495                                         sizeof(rp));
7496                 goto unlock;
7497         }
7498
7499         if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7500                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7501                                                &cp->addr.bdaddr);
7502                 if (!conn || conn->state != BT_CONNECTED) {
7503                         err = mgmt_cmd_complete(sk, hdev->id,
7504                                                 MGMT_OP_GET_CLOCK_INFO,
7505                                                 MGMT_STATUS_NOT_CONNECTED,
7506                                                 &rp, sizeof(rp));
7507                         goto unlock;
7508                 }
7509         } else {
7510                 conn = NULL;
7511         }
7512
7513         cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7514         if (!cmd)
7515                 err = -ENOMEM;
7516         else
7517                 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7518                                          get_clock_info_complete);
7519
7520         if (err < 0) {
7521                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7522                                         MGMT_STATUS_FAILED, &rp, sizeof(rp));
7523
7524                 if (cmd)
7525                         mgmt_pending_free(cmd);
7526         }
7527
7528
7529 unlock:
7530         hci_dev_unlock(hdev);
7531         return err;
7532 }
7533
7534 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7535 {
7536         struct hci_conn *conn;
7537
7538         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7539         if (!conn)
7540                 return false;
7541
7542         if (conn->dst_type != type)
7543                 return false;
7544
7545         if (conn->state != BT_CONNECTED)
7546                 return false;
7547
7548         return true;
7549 }
7550
7551 /* This function requires the caller holds hdev->lock */
7552 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7553                                u8 addr_type, u8 auto_connect)
7554 {
7555         struct hci_conn_params *params;
7556
7557         params = hci_conn_params_add(hdev, addr, addr_type);
7558         if (!params)
7559                 return -EIO;
7560
7561         if (params->auto_connect == auto_connect)
7562                 return 0;
7563
7564         list_del_init(&params->action);
7565
7566         switch (auto_connect) {
7567         case HCI_AUTO_CONN_DISABLED:
7568         case HCI_AUTO_CONN_LINK_LOSS:
7569                 /* If auto connect is being disabled when we're trying to
7570                  * connect to device, keep connecting.
7571                  */
7572                 if (params->explicit_connect)
7573                         list_add(&params->action, &hdev->pend_le_conns);
7574                 break;
7575         case HCI_AUTO_CONN_REPORT:
7576                 if (params->explicit_connect)
7577                         list_add(&params->action, &hdev->pend_le_conns);
7578                 else
7579                         list_add(&params->action, &hdev->pend_le_reports);
7580                 break;
7581         case HCI_AUTO_CONN_DIRECT:
7582         case HCI_AUTO_CONN_ALWAYS:
7583                 if (!is_connected(hdev, addr, addr_type))
7584                         list_add(&params->action, &hdev->pend_le_conns);
7585                 break;
7586         }
7587
7588         params->auto_connect = auto_connect;
7589
7590         bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7591                    addr, addr_type, auto_connect);
7592
7593         return 0;
7594 }
7595
7596 static void device_added(struct sock *sk, struct hci_dev *hdev,
7597                          bdaddr_t *bdaddr, u8 type, u8 action)
7598 {
7599         struct mgmt_ev_device_added ev;
7600
7601         bacpy(&ev.addr.bdaddr, bdaddr);
7602         ev.addr.type = type;
7603         ev.action = action;
7604
7605         mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7606 }
7607
7608 static int add_device_sync(struct hci_dev *hdev, void *data)
7609 {
7610         return hci_update_passive_scan_sync(hdev);
7611 }
7612
7613 static int add_device(struct sock *sk, struct hci_dev *hdev,
7614                       void *data, u16 len)
7615 {
7616         struct mgmt_cp_add_device *cp = data;
7617         u8 auto_conn, addr_type;
7618         struct hci_conn_params *params;
7619         int err;
7620         u32 current_flags = 0;
7621         u32 supported_flags;
7622
7623         bt_dev_dbg(hdev, "sock %p", sk);
7624
7625         if (!bdaddr_type_is_valid(cp->addr.type) ||
7626             !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7627                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7628                                          MGMT_STATUS_INVALID_PARAMS,
7629                                          &cp->addr, sizeof(cp->addr));
7630
7631         if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7632                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7633                                          MGMT_STATUS_INVALID_PARAMS,
7634                                          &cp->addr, sizeof(cp->addr));
7635
7636         hci_dev_lock(hdev);
7637
7638         if (cp->addr.type == BDADDR_BREDR) {
7639                 /* Only incoming connections action is supported for now */
7640                 if (cp->action != 0x01) {
7641                         err = mgmt_cmd_complete(sk, hdev->id,
7642                                                 MGMT_OP_ADD_DEVICE,
7643                                                 MGMT_STATUS_INVALID_PARAMS,
7644                                                 &cp->addr, sizeof(cp->addr));
7645                         goto unlock;
7646                 }
7647
7648                 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7649                                                      &cp->addr.bdaddr,
7650                                                      cp->addr.type, 0);
7651                 if (err)
7652                         goto unlock;
7653
7654                 hci_update_scan(hdev);
7655
7656                 goto added;
7657         }
7658
7659         addr_type = le_addr_type(cp->addr.type);
7660
7661         if (cp->action == 0x02)
7662                 auto_conn = HCI_AUTO_CONN_ALWAYS;
7663         else if (cp->action == 0x01)
7664                 auto_conn = HCI_AUTO_CONN_DIRECT;
7665         else
7666                 auto_conn = HCI_AUTO_CONN_REPORT;
7667
7668         /* Kernel internally uses conn_params with resolvable private
7669          * address, but Add Device allows only identity addresses.
7670          * Make sure it is enforced before calling
7671          * hci_conn_params_lookup.
7672          */
7673         if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7674                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7675                                         MGMT_STATUS_INVALID_PARAMS,
7676                                         &cp->addr, sizeof(cp->addr));
7677                 goto unlock;
7678         }
7679
7680         /* If the connection parameters don't exist for this device,
7681          * they will be created and configured with defaults.
7682          */
7683         if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7684                                 auto_conn) < 0) {
7685                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7686                                         MGMT_STATUS_FAILED, &cp->addr,
7687                                         sizeof(cp->addr));
7688                 goto unlock;
7689         } else {
7690                 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7691                                                 addr_type);
7692                 if (params)
7693                         current_flags = params->flags;
7694         }
7695
7696         err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7697         if (err < 0)
7698                 goto unlock;
7699
7700 added:
7701         device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7702         supported_flags = hdev->conn_flags;
7703         device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7704                              supported_flags, current_flags);
7705
7706         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7707                                 MGMT_STATUS_SUCCESS, &cp->addr,
7708                                 sizeof(cp->addr));
7709
7710 unlock:
7711         hci_dev_unlock(hdev);
7712         return err;
7713 }
7714
7715 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7716                            bdaddr_t *bdaddr, u8 type)
7717 {
7718         struct mgmt_ev_device_removed ev;
7719
7720         bacpy(&ev.addr.bdaddr, bdaddr);
7721         ev.addr.type = type;
7722
7723         mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7724 }
7725
7726 static int remove_device_sync(struct hci_dev *hdev, void *data)
7727 {
7728         return hci_update_passive_scan_sync(hdev);
7729 }
7730
7731 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7732                          void *data, u16 len)
7733 {
7734         struct mgmt_cp_remove_device *cp = data;
7735         int err;
7736
7737         bt_dev_dbg(hdev, "sock %p", sk);
7738
7739         hci_dev_lock(hdev);
7740
7741         if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7742                 struct hci_conn_params *params;
7743                 u8 addr_type;
7744
7745                 if (!bdaddr_type_is_valid(cp->addr.type)) {
7746                         err = mgmt_cmd_complete(sk, hdev->id,
7747                                                 MGMT_OP_REMOVE_DEVICE,
7748                                                 MGMT_STATUS_INVALID_PARAMS,
7749                                                 &cp->addr, sizeof(cp->addr));
7750                         goto unlock;
7751                 }
7752
7753                 if (cp->addr.type == BDADDR_BREDR) {
7754                         err = hci_bdaddr_list_del(&hdev->accept_list,
7755                                                   &cp->addr.bdaddr,
7756                                                   cp->addr.type);
7757                         if (err) {
7758                                 err = mgmt_cmd_complete(sk, hdev->id,
7759                                                         MGMT_OP_REMOVE_DEVICE,
7760                                                         MGMT_STATUS_INVALID_PARAMS,
7761                                                         &cp->addr,
7762                                                         sizeof(cp->addr));
7763                                 goto unlock;
7764                         }
7765
7766                         hci_update_scan(hdev);
7767
7768                         device_removed(sk, hdev, &cp->addr.bdaddr,
7769                                        cp->addr.type);
7770                         goto complete;
7771                 }
7772
7773                 addr_type = le_addr_type(cp->addr.type);
7774
7775                 /* Kernel internally uses conn_params with resolvable private
7776                  * address, but Remove Device allows only identity addresses.
7777                  * Make sure it is enforced before calling
7778                  * hci_conn_params_lookup.
7779                  */
7780                 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7781                         err = mgmt_cmd_complete(sk, hdev->id,
7782                                                 MGMT_OP_REMOVE_DEVICE,
7783                                                 MGMT_STATUS_INVALID_PARAMS,
7784                                                 &cp->addr, sizeof(cp->addr));
7785                         goto unlock;
7786                 }
7787
7788                 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7789                                                 addr_type);
7790                 if (!params) {
7791                         err = mgmt_cmd_complete(sk, hdev->id,
7792                                                 MGMT_OP_REMOVE_DEVICE,
7793                                                 MGMT_STATUS_INVALID_PARAMS,
7794                                                 &cp->addr, sizeof(cp->addr));
7795                         goto unlock;
7796                 }
7797
7798                 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7799                     params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7800                         err = mgmt_cmd_complete(sk, hdev->id,
7801                                                 MGMT_OP_REMOVE_DEVICE,
7802                                                 MGMT_STATUS_INVALID_PARAMS,
7803                                                 &cp->addr, sizeof(cp->addr));
7804                         goto unlock;
7805                 }
7806
7807                 list_del(&params->action);
7808                 list_del(&params->list);
7809                 kfree(params);
7810
7811                 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7812         } else {
7813                 struct hci_conn_params *p, *tmp;
7814                 struct bdaddr_list *b, *btmp;
7815
7816                 if (cp->addr.type) {
7817                         err = mgmt_cmd_complete(sk, hdev->id,
7818                                                 MGMT_OP_REMOVE_DEVICE,
7819                                                 MGMT_STATUS_INVALID_PARAMS,
7820                                                 &cp->addr, sizeof(cp->addr));
7821                         goto unlock;
7822                 }
7823
7824                 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7825                         device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7826                         list_del(&b->list);
7827                         kfree(b);
7828                 }
7829
7830                 hci_update_scan(hdev);
7831
7832                 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7833                         if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7834                                 continue;
7835                         device_removed(sk, hdev, &p->addr, p->addr_type);
7836                         if (p->explicit_connect) {
7837                                 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7838                                 continue;
7839                         }
7840                         list_del(&p->action);
7841                         list_del(&p->list);
7842                         kfree(p);
7843                 }
7844
7845                 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7846         }
7847
7848         hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7849
7850 complete:
7851         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7852                                 MGMT_STATUS_SUCCESS, &cp->addr,
7853                                 sizeof(cp->addr));
7854 unlock:
7855         hci_dev_unlock(hdev);
7856         return err;
7857 }
7858
7859 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7860                            u16 len)
7861 {
7862         struct mgmt_cp_load_conn_param *cp = data;
7863         const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7864                                      sizeof(struct mgmt_conn_param));
7865         u16 param_count, expected_len;
7866         int i;
7867
7868         if (!lmp_le_capable(hdev))
7869                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7870                                        MGMT_STATUS_NOT_SUPPORTED);
7871
7872         param_count = __le16_to_cpu(cp->param_count);
7873         if (param_count > max_param_count) {
7874                 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7875                            param_count);
7876                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7877                                        MGMT_STATUS_INVALID_PARAMS);
7878         }
7879
7880         expected_len = struct_size(cp, params, param_count);
7881         if (expected_len != len) {
7882                 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7883                            expected_len, len);
7884                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7885                                        MGMT_STATUS_INVALID_PARAMS);
7886         }
7887
7888         bt_dev_dbg(hdev, "param_count %u", param_count);
7889
7890         hci_dev_lock(hdev);
7891
7892         hci_conn_params_clear_disabled(hdev);
7893
7894         for (i = 0; i < param_count; i++) {
7895                 struct mgmt_conn_param *param = &cp->params[i];
7896                 struct hci_conn_params *hci_param;
7897                 u16 min, max, latency, timeout;
7898                 u8 addr_type;
7899
7900                 bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7901                            param->addr.type);
7902
7903                 if (param->addr.type == BDADDR_LE_PUBLIC) {
7904                         addr_type = ADDR_LE_DEV_PUBLIC;
7905                 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7906                         addr_type = ADDR_LE_DEV_RANDOM;
7907                 } else {
7908                         bt_dev_err(hdev, "ignoring invalid connection parameters");
7909                         continue;
7910                 }
7911
7912                 min = le16_to_cpu(param->min_interval);
7913                 max = le16_to_cpu(param->max_interval);
7914                 latency = le16_to_cpu(param->latency);
7915                 timeout = le16_to_cpu(param->timeout);
7916
7917                 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7918                            min, max, latency, timeout);
7919
7920                 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7921                         bt_dev_err(hdev, "ignoring invalid connection parameters");
7922                         continue;
7923                 }
7924
7925                 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7926                                                 addr_type);
7927                 if (!hci_param) {
7928                         bt_dev_err(hdev, "failed to add connection parameters");
7929                         continue;
7930                 }
7931
7932                 hci_param->conn_min_interval = min;
7933                 hci_param->conn_max_interval = max;
7934                 hci_param->conn_latency = latency;
7935                 hci_param->supervision_timeout = timeout;
7936         }
7937
7938         hci_dev_unlock(hdev);
7939
7940         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7941                                  NULL, 0);
7942 }
7943
7944 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7945                                void *data, u16 len)
7946 {
7947         struct mgmt_cp_set_external_config *cp = data;
7948         bool changed;
7949         int err;
7950
7951         bt_dev_dbg(hdev, "sock %p", sk);
7952
7953         if (hdev_is_powered(hdev))
7954                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7955                                        MGMT_STATUS_REJECTED);
7956
7957         if (cp->config != 0x00 && cp->config != 0x01)
7958                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7959                                          MGMT_STATUS_INVALID_PARAMS);
7960
7961         if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7962                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7963                                        MGMT_STATUS_NOT_SUPPORTED);
7964
7965         hci_dev_lock(hdev);
7966
7967         if (cp->config)
7968                 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7969         else
7970                 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7971
7972         err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7973         if (err < 0)
7974                 goto unlock;
7975
7976         if (!changed)
7977                 goto unlock;
7978
7979         err = new_options(hdev, sk);
7980
7981         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7982                 mgmt_index_removed(hdev);
7983
7984                 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7985                         hci_dev_set_flag(hdev, HCI_CONFIG);
7986                         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7987
7988                         queue_work(hdev->req_workqueue, &hdev->power_on);
7989                 } else {
7990                         set_bit(HCI_RAW, &hdev->flags);
7991                         mgmt_index_added(hdev);
7992                 }
7993         }
7994
7995 unlock:
7996         hci_dev_unlock(hdev);
7997         return err;
7998 }
7999
8000 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8001                               void *data, u16 len)
8002 {
8003         struct mgmt_cp_set_public_address *cp = data;
8004         bool changed;
8005         int err;
8006
8007         bt_dev_dbg(hdev, "sock %p", sk);
8008
8009         if (hdev_is_powered(hdev))
8010                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8011                                        MGMT_STATUS_REJECTED);
8012
8013         if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8014                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8015                                        MGMT_STATUS_INVALID_PARAMS);
8016
8017         if (!hdev->set_bdaddr)
8018                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8019                                        MGMT_STATUS_NOT_SUPPORTED);
8020
8021         hci_dev_lock(hdev);
8022
8023         changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8024         bacpy(&hdev->public_addr, &cp->bdaddr);
8025
8026         err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8027         if (err < 0)
8028                 goto unlock;
8029
8030         if (!changed)
8031                 goto unlock;
8032
8033         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8034                 err = new_options(hdev, sk);
8035
8036         if (is_configured(hdev)) {
8037                 mgmt_index_removed(hdev);
8038
8039                 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8040
8041                 hci_dev_set_flag(hdev, HCI_CONFIG);
8042                 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8043
8044                 queue_work(hdev->req_workqueue, &hdev->power_on);
8045         }
8046
8047 unlock:
8048         hci_dev_unlock(hdev);
8049         return err;
8050 }
8051
8052 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8053                                              int err)
8054 {
8055         const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8056         struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8057         u8 *h192, *r192, *h256, *r256;
8058         struct mgmt_pending_cmd *cmd = data;
8059         struct sk_buff *skb = cmd->skb;
8060         u8 status = mgmt_status(err);
8061         u16 eir_len;
8062
8063         if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8064                 return;
8065
8066         if (!status) {
8067                 if (!skb)
8068                         status = MGMT_STATUS_FAILED;
8069                 else if (IS_ERR(skb))
8070                         status = mgmt_status(PTR_ERR(skb));
8071                 else
8072                         status = mgmt_status(skb->data[0]);
8073         }
8074
8075         bt_dev_dbg(hdev, "status %u", status);
8076
8077         mgmt_cp = cmd->param;
8078
8079         if (status) {
8080                 status = mgmt_status(status);
8081                 eir_len = 0;
8082
8083                 h192 = NULL;
8084                 r192 = NULL;
8085                 h256 = NULL;
8086                 r256 = NULL;
8087         } else if (!bredr_sc_enabled(hdev)) {
8088                 struct hci_rp_read_local_oob_data *rp;
8089
8090                 if (skb->len != sizeof(*rp)) {
8091                         status = MGMT_STATUS_FAILED;
8092                         eir_len = 0;
8093                 } else {
8094                         status = MGMT_STATUS_SUCCESS;
8095                         rp = (void *)skb->data;
8096
8097                         eir_len = 5 + 18 + 18;
8098                         h192 = rp->hash;
8099                         r192 = rp->rand;
8100                         h256 = NULL;
8101                         r256 = NULL;
8102                 }
8103         } else {
8104                 struct hci_rp_read_local_oob_ext_data *rp;
8105
8106                 if (skb->len != sizeof(*rp)) {
8107                         status = MGMT_STATUS_FAILED;
8108                         eir_len = 0;
8109                 } else {
8110                         status = MGMT_STATUS_SUCCESS;
8111                         rp = (void *)skb->data;
8112
8113                         if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8114                                 eir_len = 5 + 18 + 18;
8115                                 h192 = NULL;
8116                                 r192 = NULL;
8117                         } else {
8118                                 eir_len = 5 + 18 + 18 + 18 + 18;
8119                                 h192 = rp->hash192;
8120                                 r192 = rp->rand192;
8121                         }
8122
8123                         h256 = rp->hash256;
8124                         r256 = rp->rand256;
8125                 }
8126         }
8127
8128         mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8129         if (!mgmt_rp)
8130                 goto done;
8131
8132         if (eir_len == 0)
8133                 goto send_rsp;
8134
8135         eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8136                                   hdev->dev_class, 3);
8137
8138         if (h192 && r192) {
8139                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8140                                           EIR_SSP_HASH_C192, h192, 16);
8141                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8142                                           EIR_SSP_RAND_R192, r192, 16);
8143         }
8144
8145         if (h256 && r256) {
8146                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8147                                           EIR_SSP_HASH_C256, h256, 16);
8148                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8149                                           EIR_SSP_RAND_R256, r256, 16);
8150         }
8151
8152 send_rsp:
8153         mgmt_rp->type = mgmt_cp->type;
8154         mgmt_rp->eir_len = cpu_to_le16(eir_len);
8155
8156         err = mgmt_cmd_complete(cmd->sk, hdev->id,
8157                                 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8158                                 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8159         if (err < 0 || status)
8160                 goto done;
8161
8162         hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8163
8164         err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8165                                  mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8166                                  HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8167 done:
8168         if (skb && !IS_ERR(skb))
8169                 kfree_skb(skb);
8170
8171         kfree(mgmt_rp);
8172         mgmt_pending_remove(cmd);
8173 }
8174
8175 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8176                                   struct mgmt_cp_read_local_oob_ext_data *cp)
8177 {
8178         struct mgmt_pending_cmd *cmd;
8179         int err;
8180
8181         cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8182                                cp, sizeof(*cp));
8183         if (!cmd)
8184                 return -ENOMEM;
8185
8186         err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8187                                  read_local_oob_ext_data_complete);
8188
8189         if (err < 0) {
8190                 mgmt_pending_remove(cmd);
8191                 return err;
8192         }
8193
8194         return 0;
8195 }
8196
8197 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8198                                    void *data, u16 data_len)
8199 {
8200         struct mgmt_cp_read_local_oob_ext_data *cp = data;
8201         struct mgmt_rp_read_local_oob_ext_data *rp;
8202         size_t rp_len;
8203         u16 eir_len;
8204         u8 status, flags, role, addr[7], hash[16], rand[16];
8205         int err;
8206
8207         bt_dev_dbg(hdev, "sock %p", sk);
8208
8209         if (hdev_is_powered(hdev)) {
8210                 switch (cp->type) {
8211                 case BIT(BDADDR_BREDR):
8212                         status = mgmt_bredr_support(hdev);
8213                         if (status)
8214                                 eir_len = 0;
8215                         else
8216                                 eir_len = 5;
8217                         break;
8218                 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8219                         status = mgmt_le_support(hdev);
8220                         if (status)
8221                                 eir_len = 0;
8222                         else
8223                                 eir_len = 9 + 3 + 18 + 18 + 3;
8224                         break;
8225                 default:
8226                         status = MGMT_STATUS_INVALID_PARAMS;
8227                         eir_len = 0;
8228                         break;
8229                 }
8230         } else {
8231                 status = MGMT_STATUS_NOT_POWERED;
8232                 eir_len = 0;
8233         }
8234
8235         rp_len = sizeof(*rp) + eir_len;
8236         rp = kmalloc(rp_len, GFP_ATOMIC);
8237         if (!rp)
8238                 return -ENOMEM;
8239
8240         if (!status && !lmp_ssp_capable(hdev)) {
8241                 status = MGMT_STATUS_NOT_SUPPORTED;
8242                 eir_len = 0;
8243         }
8244
8245         if (status)
8246                 goto complete;
8247
8248         hci_dev_lock(hdev);
8249
8250         eir_len = 0;
8251         switch (cp->type) {
8252         case BIT(BDADDR_BREDR):
8253                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8254                         err = read_local_ssp_oob_req(hdev, sk, cp);
8255                         hci_dev_unlock(hdev);
8256                         if (!err)
8257                                 goto done;
8258
8259                         status = MGMT_STATUS_FAILED;
8260                         goto complete;
8261                 } else {
8262                         eir_len = eir_append_data(rp->eir, eir_len,
8263                                                   EIR_CLASS_OF_DEV,
8264                                                   hdev->dev_class, 3);
8265                 }
8266                 break;
8267         case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8268                 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8269                     smp_generate_oob(hdev, hash, rand) < 0) {
8270                         hci_dev_unlock(hdev);
8271                         status = MGMT_STATUS_FAILED;
8272                         goto complete;
8273                 }
8274
8275                 /* This should return the active RPA, but since the RPA
8276                  * is only programmed on demand, it is really hard to fill
8277                  * this in at the moment. For now disallow retrieving
8278                  * local out-of-band data when privacy is in use.
8279                  *
8280                  * Returning the identity address will not help here since
8281                  * pairing happens before the identity resolving key is
8282                  * known and thus the connection establishment happens
8283                  * based on the RPA and not the identity address.
8284                  */
8285                 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8286                         hci_dev_unlock(hdev);
8287                         status = MGMT_STATUS_REJECTED;
8288                         goto complete;
8289                 }
8290
8291                 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8292                    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8293                    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8294                     bacmp(&hdev->static_addr, BDADDR_ANY))) {
8295                         memcpy(addr, &hdev->static_addr, 6);
8296                         addr[6] = 0x01;
8297                 } else {
8298                         memcpy(addr, &hdev->bdaddr, 6);
8299                         addr[6] = 0x00;
8300                 }
8301
8302                 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8303                                           addr, sizeof(addr));
8304
8305                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8306                         role = 0x02;
8307                 else
8308                         role = 0x01;
8309
8310                 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8311                                           &role, sizeof(role));
8312
8313                 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8314                         eir_len = eir_append_data(rp->eir, eir_len,
8315                                                   EIR_LE_SC_CONFIRM,
8316                                                   hash, sizeof(hash));
8317
8318                         eir_len = eir_append_data(rp->eir, eir_len,
8319                                                   EIR_LE_SC_RANDOM,
8320                                                   rand, sizeof(rand));
8321                 }
8322
8323                 flags = mgmt_get_adv_discov_flags(hdev);
8324
8325                 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8326                         flags |= LE_AD_NO_BREDR;
8327
8328                 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8329                                           &flags, sizeof(flags));
8330                 break;
8331         }
8332
8333         hci_dev_unlock(hdev);
8334
8335         hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8336
8337         status = MGMT_STATUS_SUCCESS;
8338
8339 complete:
8340         rp->type = cp->type;
8341         rp->eir_len = cpu_to_le16(eir_len);
8342
8343         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8344                                 status, rp, sizeof(*rp) + eir_len);
8345         if (err < 0 || status)
8346                 goto done;
8347
8348         err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8349                                  rp, sizeof(*rp) + eir_len,
8350                                  HCI_MGMT_OOB_DATA_EVENTS, sk);
8351
8352 done:
8353         kfree(rp);
8354
8355         return err;
8356 }
8357
8358 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8359 {
8360         u32 flags = 0;
8361
8362         flags |= MGMT_ADV_FLAG_CONNECTABLE;
8363         flags |= MGMT_ADV_FLAG_DISCOV;
8364         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8365         flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8366         flags |= MGMT_ADV_FLAG_APPEARANCE;
8367         flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8368         flags |= MGMT_ADV_PARAM_DURATION;
8369         flags |= MGMT_ADV_PARAM_TIMEOUT;
8370         flags |= MGMT_ADV_PARAM_INTERVALS;
8371         flags |= MGMT_ADV_PARAM_TX_POWER;
8372         flags |= MGMT_ADV_PARAM_SCAN_RSP;
8373
8374         /* In extended adv TX_POWER returned from Set Adv Param
8375          * will be always valid.
8376          */
8377         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8378                 flags |= MGMT_ADV_FLAG_TX_POWER;
8379
8380         if (ext_adv_capable(hdev)) {
8381                 flags |= MGMT_ADV_FLAG_SEC_1M;
8382                 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8383                 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8384
8385                 if (hdev->le_features[1] & HCI_LE_PHY_2M)
8386                         flags |= MGMT_ADV_FLAG_SEC_2M;
8387
8388                 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
8389                         flags |= MGMT_ADV_FLAG_SEC_CODED;
8390         }
8391
8392         return flags;
8393 }
8394
8395 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8396                              void *data, u16 data_len)
8397 {
8398         struct mgmt_rp_read_adv_features *rp;
8399         size_t rp_len;
8400         int err;
8401         struct adv_info *adv_instance;
8402         u32 supported_flags;
8403         u8 *instance;
8404
8405         bt_dev_dbg(hdev, "sock %p", sk);
8406
8407         if (!lmp_le_capable(hdev))
8408                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8409                                        MGMT_STATUS_REJECTED);
8410
8411         hci_dev_lock(hdev);
8412
8413         rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8414         rp = kmalloc(rp_len, GFP_ATOMIC);
8415         if (!rp) {
8416                 hci_dev_unlock(hdev);
8417                 return -ENOMEM;
8418         }
8419
8420         supported_flags = get_supported_adv_flags(hdev);
8421
8422         rp->supported_flags = cpu_to_le32(supported_flags);
8423         rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
8424         rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
8425         rp->max_instances = hdev->le_num_of_adv_sets;
8426         rp->num_instances = hdev->adv_instance_cnt;
8427
8428         instance = rp->instance;
8429         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8430                 /* Only instances 1-le_num_of_adv_sets are externally visible */
8431                 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8432                         *instance = adv_instance->instance;
8433                         instance++;
8434                 } else {
8435                         rp->num_instances--;
8436                         rp_len--;
8437                 }
8438         }
8439
8440         hci_dev_unlock(hdev);
8441
8442         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8443                                 MGMT_STATUS_SUCCESS, rp, rp_len);
8444
8445         kfree(rp);
8446
8447         return err;
8448 }
8449
8450 static u8 calculate_name_len(struct hci_dev *hdev)
8451 {
8452         u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
8453
8454         return eir_append_local_name(hdev, buf, 0);
8455 }
8456
8457 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8458                            bool is_adv_data)
8459 {
8460         u8 max_len = HCI_MAX_AD_LENGTH;
8461
8462         if (is_adv_data) {
8463                 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8464                                  MGMT_ADV_FLAG_LIMITED_DISCOV |
8465                                  MGMT_ADV_FLAG_MANAGED_FLAGS))
8466                         max_len -= 3;
8467
8468                 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8469                         max_len -= 3;
8470         } else {
8471                 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8472                         max_len -= calculate_name_len(hdev);
8473
8474                 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8475                         max_len -= 4;
8476         }
8477
8478         return max_len;
8479 }
8480
8481 static bool flags_managed(u32 adv_flags)
8482 {
8483         return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8484                             MGMT_ADV_FLAG_LIMITED_DISCOV |
8485                             MGMT_ADV_FLAG_MANAGED_FLAGS);
8486 }
8487
8488 static bool tx_power_managed(u32 adv_flags)
8489 {
8490         return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8491 }
8492
8493 static bool name_managed(u32 adv_flags)
8494 {
8495         return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8496 }
8497
8498 static bool appearance_managed(u32 adv_flags)
8499 {
8500         return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8501 }
8502
8503 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8504                               u8 len, bool is_adv_data)
8505 {
8506         int i, cur_len;
8507         u8 max_len;
8508
8509         max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8510
8511         if (len > max_len)
8512                 return false;
8513
8514         /* Make sure that the data is correctly formatted. */
8515         for (i = 0; i < len; i += (cur_len + 1)) {
8516                 cur_len = data[i];
8517
8518                 if (!cur_len)
8519                         continue;
8520
8521                 if (data[i + 1] == EIR_FLAGS &&
8522                     (!is_adv_data || flags_managed(adv_flags)))
8523                         return false;
8524
8525                 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8526                         return false;
8527
8528                 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8529                         return false;
8530
8531                 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8532                         return false;
8533
8534                 if (data[i + 1] == EIR_APPEARANCE &&
8535                     appearance_managed(adv_flags))
8536                         return false;
8537
8538                 /* If the current field length would exceed the total data
8539                  * length, then it's invalid.
8540                  */
8541                 if (i + cur_len >= len)
8542                         return false;
8543         }
8544
8545         return true;
8546 }
8547
8548 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8549 {
8550         u32 supported_flags, phy_flags;
8551
8552         /* The current implementation only supports a subset of the specified
8553          * flags. Also need to check mutual exclusiveness of sec flags.
8554          */
8555         supported_flags = get_supported_adv_flags(hdev);
8556         phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8557         if (adv_flags & ~supported_flags ||
8558             ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8559                 return false;
8560
8561         return true;
8562 }
8563
8564 static bool adv_busy(struct hci_dev *hdev)
8565 {
8566         return pending_find(MGMT_OP_SET_LE, hdev);
8567 }
8568
8569 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8570                              int err)
8571 {
8572         struct adv_info *adv, *n;
8573
8574         bt_dev_dbg(hdev, "err %d", err);
8575
8576         hci_dev_lock(hdev);
8577
8578         list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8579                 u8 instance;
8580
8581                 if (!adv->pending)
8582                         continue;
8583
8584                 if (!err) {
8585                         adv->pending = false;
8586                         continue;
8587                 }
8588
8589                 instance = adv->instance;
8590
8591                 if (hdev->cur_adv_instance == instance)
8592                         cancel_adv_timeout(hdev);
8593
8594                 hci_remove_adv_instance(hdev, instance);
8595                 mgmt_advertising_removed(sk, hdev, instance);
8596         }
8597
8598         hci_dev_unlock(hdev);
8599 }
8600
8601 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8602 {
8603         struct mgmt_pending_cmd *cmd = data;
8604         struct mgmt_cp_add_advertising *cp = cmd->param;
8605         struct mgmt_rp_add_advertising rp;
8606
8607         memset(&rp, 0, sizeof(rp));
8608
8609         rp.instance = cp->instance;
8610
8611         if (err)
8612                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8613                                 mgmt_status(err));
8614         else
8615                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8616                                   mgmt_status(err), &rp, sizeof(rp));
8617
8618         add_adv_complete(hdev, cmd->sk, cp->instance, err);
8619
8620         mgmt_pending_free(cmd);
8621 }
8622
8623 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8624 {
8625         struct mgmt_pending_cmd *cmd = data;
8626         struct mgmt_cp_add_advertising *cp = cmd->param;
8627
8628         return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8629 }
8630
8631 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8632                            void *data, u16 data_len)
8633 {
8634         struct mgmt_cp_add_advertising *cp = data;
8635         struct mgmt_rp_add_advertising rp;
8636         u32 flags;
8637         u8 status;
8638         u16 timeout, duration;
8639         unsigned int prev_instance_cnt;
8640         u8 schedule_instance = 0;
8641         struct adv_info *adv, *next_instance;
8642         int err;
8643         struct mgmt_pending_cmd *cmd;
8644
8645         bt_dev_dbg(hdev, "sock %p", sk);
8646
8647         status = mgmt_le_support(hdev);
8648         if (status)
8649                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8650                                        status);
8651
8652         if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8653                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8654                                        MGMT_STATUS_INVALID_PARAMS);
8655
8656         if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8657                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8658                                        MGMT_STATUS_INVALID_PARAMS);
8659
8660         flags = __le32_to_cpu(cp->flags);
8661         timeout = __le16_to_cpu(cp->timeout);
8662         duration = __le16_to_cpu(cp->duration);
8663
8664         if (!requested_adv_flags_are_valid(hdev, flags))
8665                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8666                                        MGMT_STATUS_INVALID_PARAMS);
8667
8668         hci_dev_lock(hdev);
8669
8670         if (timeout && !hdev_is_powered(hdev)) {
8671                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8672                                       MGMT_STATUS_REJECTED);
8673                 goto unlock;
8674         }
8675
8676         if (adv_busy(hdev)) {
8677                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8678                                       MGMT_STATUS_BUSY);
8679                 goto unlock;
8680         }
8681
8682         if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8683             !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8684                                cp->scan_rsp_len, false)) {
8685                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8686                                       MGMT_STATUS_INVALID_PARAMS);
8687                 goto unlock;
8688         }
8689
8690         prev_instance_cnt = hdev->adv_instance_cnt;
8691
8692         adv = hci_add_adv_instance(hdev, cp->instance, flags,
8693                                    cp->adv_data_len, cp->data,
8694                                    cp->scan_rsp_len,
8695                                    cp->data + cp->adv_data_len,
8696                                    timeout, duration,
8697                                    HCI_ADV_TX_POWER_NO_PREFERENCE,
8698                                    hdev->le_adv_min_interval,
8699                                    hdev->le_adv_max_interval, 0);
8700         if (IS_ERR(adv)) {
8701                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8702                                       MGMT_STATUS_FAILED);
8703                 goto unlock;
8704         }
8705
8706         /* Only trigger an advertising added event if a new instance was
8707          * actually added.
8708          */
8709         if (hdev->adv_instance_cnt > prev_instance_cnt)
8710                 mgmt_advertising_added(sk, hdev, cp->instance);
8711
8712         if (hdev->cur_adv_instance == cp->instance) {
8713                 /* If the currently advertised instance is being changed then
8714                  * cancel the current advertising and schedule the next
8715                  * instance. If there is only one instance then the overridden
8716                  * advertising data will be visible right away.
8717                  */
8718                 cancel_adv_timeout(hdev);
8719
8720                 next_instance = hci_get_next_instance(hdev, cp->instance);
8721                 if (next_instance)
8722                         schedule_instance = next_instance->instance;
8723         } else if (!hdev->adv_instance_timeout) {
8724                 /* Immediately advertise the new instance if no other
8725                  * instance is currently being advertised.
8726                  */
8727                 schedule_instance = cp->instance;
8728         }
8729
8730         /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8731          * there is no instance to be advertised then we have no HCI
8732          * communication to make. Simply return.
8733          */
8734         if (!hdev_is_powered(hdev) ||
8735             hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8736             !schedule_instance) {
8737                 rp.instance = cp->instance;
8738                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8739                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8740                 goto unlock;
8741         }
8742
8743         /* We're good to go, update advertising data, parameters, and start
8744          * advertising.
8745          */
8746         cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8747                                data_len);
8748         if (!cmd) {
8749                 err = -ENOMEM;
8750                 goto unlock;
8751         }
8752
8753         cp->instance = schedule_instance;
8754
8755         err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8756                                  add_advertising_complete);
8757         if (err < 0)
8758                 mgmt_pending_free(cmd);
8759
8760 unlock:
8761         hci_dev_unlock(hdev);
8762
8763         return err;
8764 }
8765
8766 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8767                                         int err)
8768 {
8769         struct mgmt_pending_cmd *cmd = data;
8770         struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8771         struct mgmt_rp_add_ext_adv_params rp;
8772         struct adv_info *adv;
8773         u32 flags;
8774
8775         BT_DBG("%s", hdev->name);
8776
8777         hci_dev_lock(hdev);
8778
8779         adv = hci_find_adv_instance(hdev, cp->instance);
8780         if (!adv)
8781                 goto unlock;
8782
8783         rp.instance = cp->instance;
8784         rp.tx_power = adv->tx_power;
8785
8786         /* While we're at it, inform userspace of the available space for this
8787          * advertisement, given the flags that will be used.
8788          */
8789         flags = __le32_to_cpu(cp->flags);
8790         rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8791         rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8792
8793         if (err) {
8794                 /* If this advertisement was previously advertising and we
8795                  * failed to update it, we signal that it has been removed and
8796                  * delete its structure
8797                  */
8798                 if (!adv->pending)
8799                         mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8800
8801                 hci_remove_adv_instance(hdev, cp->instance);
8802
8803                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8804                                 mgmt_status(err));
8805         } else {
8806                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8807                                   mgmt_status(err), &rp, sizeof(rp));
8808         }
8809
8810 unlock:
8811         if (cmd)
8812                 mgmt_pending_free(cmd);
8813
8814         hci_dev_unlock(hdev);
8815 }
8816
8817 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8818 {
8819         struct mgmt_pending_cmd *cmd = data;
8820         struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8821
8822         return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8823 }
8824
8825 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8826                               void *data, u16 data_len)
8827 {
8828         struct mgmt_cp_add_ext_adv_params *cp = data;
8829         struct mgmt_rp_add_ext_adv_params rp;
8830         struct mgmt_pending_cmd *cmd = NULL;
8831         struct adv_info *adv;
8832         u32 flags, min_interval, max_interval;
8833         u16 timeout, duration;
8834         u8 status;
8835         s8 tx_power;
8836         int err;
8837
8838         BT_DBG("%s", hdev->name);
8839
8840         status = mgmt_le_support(hdev);
8841         if (status)
8842                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8843                                        status);
8844
8845         if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8846                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8847                                        MGMT_STATUS_INVALID_PARAMS);
8848
8849         /* The purpose of breaking add_advertising into two separate MGMT calls
8850          * for params and data is to allow more parameters to be added to this
8851          * structure in the future. For this reason, we verify that we have the
8852          * bare minimum structure we know of when the interface was defined. Any
8853          * extra parameters we don't know about will be ignored in this request.
8854          */
8855         if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8856                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8857                                        MGMT_STATUS_INVALID_PARAMS);
8858
8859         flags = __le32_to_cpu(cp->flags);
8860
8861         if (!requested_adv_flags_are_valid(hdev, flags))
8862                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8863                                        MGMT_STATUS_INVALID_PARAMS);
8864
8865         hci_dev_lock(hdev);
8866
8867         /* In new interface, we require that we are powered to register */
8868         if (!hdev_is_powered(hdev)) {
8869                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8870                                       MGMT_STATUS_REJECTED);
8871                 goto unlock;
8872         }
8873
8874         if (adv_busy(hdev)) {
8875                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8876                                       MGMT_STATUS_BUSY);
8877                 goto unlock;
8878         }
8879
8880         /* Parse defined parameters from request, use defaults otherwise */
8881         timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8882                   __le16_to_cpu(cp->timeout) : 0;
8883
8884         duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8885                    __le16_to_cpu(cp->duration) :
8886                    hdev->def_multi_adv_rotation_duration;
8887
8888         min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8889                        __le32_to_cpu(cp->min_interval) :
8890                        hdev->le_adv_min_interval;
8891
8892         max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8893                        __le32_to_cpu(cp->max_interval) :
8894                        hdev->le_adv_max_interval;
8895
8896         tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8897                    cp->tx_power :
8898                    HCI_ADV_TX_POWER_NO_PREFERENCE;
8899
8900         /* Create advertising instance with no advertising or response data */
8901         adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8902                                    timeout, duration, tx_power, min_interval,
8903                                    max_interval, 0);
8904
8905         if (IS_ERR(adv)) {
8906                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8907                                       MGMT_STATUS_FAILED);
8908                 goto unlock;
8909         }
8910
8911         /* Submit request for advertising params if ext adv available */
8912         if (ext_adv_capable(hdev)) {
8913                 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8914                                        data, data_len);
8915                 if (!cmd) {
8916                         err = -ENOMEM;
8917                         hci_remove_adv_instance(hdev, cp->instance);
8918                         goto unlock;
8919                 }
8920
8921                 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8922                                          add_ext_adv_params_complete);
8923                 if (err < 0)
8924                         mgmt_pending_free(cmd);
8925         } else {
8926                 rp.instance = cp->instance;
8927                 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8928                 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8929                 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8930                 err = mgmt_cmd_complete(sk, hdev->id,
8931                                         MGMT_OP_ADD_EXT_ADV_PARAMS,
8932                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8933         }
8934
8935 unlock:
8936         hci_dev_unlock(hdev);
8937
8938         return err;
8939 }
8940
8941 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8942 {
8943         struct mgmt_pending_cmd *cmd = data;
8944         struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8945         struct mgmt_rp_add_advertising rp;
8946
8947         add_adv_complete(hdev, cmd->sk, cp->instance, err);
8948
8949         memset(&rp, 0, sizeof(rp));
8950
8951         rp.instance = cp->instance;
8952
8953         if (err)
8954                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8955                                 mgmt_status(err));
8956         else
8957                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8958                                   mgmt_status(err), &rp, sizeof(rp));
8959
8960         mgmt_pending_free(cmd);
8961 }
8962
8963 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8964 {
8965         struct mgmt_pending_cmd *cmd = data;
8966         struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8967         int err;
8968
8969         if (ext_adv_capable(hdev)) {
8970                 err = hci_update_adv_data_sync(hdev, cp->instance);
8971                 if (err)
8972                         return err;
8973
8974                 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8975                 if (err)
8976                         return err;
8977
8978                 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8979         }
8980
8981         return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8982 }
8983
8984 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8985                             u16 data_len)
8986 {
8987         struct mgmt_cp_add_ext_adv_data *cp = data;
8988         struct mgmt_rp_add_ext_adv_data rp;
8989         u8 schedule_instance = 0;
8990         struct adv_info *next_instance;
8991         struct adv_info *adv_instance;
8992         int err = 0;
8993         struct mgmt_pending_cmd *cmd;
8994
8995         BT_DBG("%s", hdev->name);
8996
8997         hci_dev_lock(hdev);
8998
8999         adv_instance = hci_find_adv_instance(hdev, cp->instance);
9000
9001         if (!adv_instance) {
9002                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9003                                       MGMT_STATUS_INVALID_PARAMS);
9004                 goto unlock;
9005         }
9006
9007         /* In new interface, we require that we are powered to register */
9008         if (!hdev_is_powered(hdev)) {
9009                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9010                                       MGMT_STATUS_REJECTED);
9011                 goto clear_new_instance;
9012         }
9013
9014         if (adv_busy(hdev)) {
9015                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9016                                       MGMT_STATUS_BUSY);
9017                 goto clear_new_instance;
9018         }
9019
9020         /* Validate new data */
9021         if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9022                                cp->adv_data_len, true) ||
9023             !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9024                                cp->adv_data_len, cp->scan_rsp_len, false)) {
9025                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9026                                       MGMT_STATUS_INVALID_PARAMS);
9027                 goto clear_new_instance;
9028         }
9029
9030         /* Set the data in the advertising instance */
9031         hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9032                                   cp->data, cp->scan_rsp_len,
9033                                   cp->data + cp->adv_data_len);
9034
9035         /* If using software rotation, determine next instance to use */
9036         if (hdev->cur_adv_instance == cp->instance) {
9037                 /* If the currently advertised instance is being changed
9038                  * then cancel the current advertising and schedule the
9039                  * next instance. If there is only one instance then the
9040                  * overridden advertising data will be visible right
9041                  * away
9042                  */
9043                 cancel_adv_timeout(hdev);
9044
9045                 next_instance = hci_get_next_instance(hdev, cp->instance);
9046                 if (next_instance)
9047                         schedule_instance = next_instance->instance;
9048         } else if (!hdev->adv_instance_timeout) {
9049                 /* Immediately advertise the new instance if no other
9050                  * instance is currently being advertised.
9051                  */
9052                 schedule_instance = cp->instance;
9053         }
9054
9055         /* If the HCI_ADVERTISING flag is set or there is no instance to
9056          * be advertised then we have no HCI communication to make.
9057          * Simply return.
9058          */
9059         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9060                 if (adv_instance->pending) {
9061                         mgmt_advertising_added(sk, hdev, cp->instance);
9062                         adv_instance->pending = false;
9063                 }
9064                 rp.instance = cp->instance;
9065                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9066                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9067                 goto unlock;
9068         }
9069
9070         cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9071                                data_len);
9072         if (!cmd) {
9073                 err = -ENOMEM;
9074                 goto clear_new_instance;
9075         }
9076
9077         err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9078                                  add_ext_adv_data_complete);
9079         if (err < 0) {
9080                 mgmt_pending_free(cmd);
9081                 goto clear_new_instance;
9082         }
9083
9084         /* We were successful in updating data, so trigger advertising_added
9085          * event if this is an instance that wasn't previously advertising. If
9086          * a failure occurs in the requests we initiated, we will remove the
9087          * instance again in add_advertising_complete
9088          */
9089         if (adv_instance->pending)
9090                 mgmt_advertising_added(sk, hdev, cp->instance);
9091
9092         goto unlock;
9093
9094 clear_new_instance:
9095         hci_remove_adv_instance(hdev, cp->instance);
9096
9097 unlock:
9098         hci_dev_unlock(hdev);
9099
9100         return err;
9101 }
9102
9103 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9104                                         int err)
9105 {
9106         struct mgmt_pending_cmd *cmd = data;
9107         struct mgmt_cp_remove_advertising *cp = cmd->param;
9108         struct mgmt_rp_remove_advertising rp;
9109
9110         bt_dev_dbg(hdev, "err %d", err);
9111
9112         memset(&rp, 0, sizeof(rp));
9113         rp.instance = cp->instance;
9114
9115         if (err)
9116                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9117                                 mgmt_status(err));
9118         else
9119                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9120                                   MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9121
9122         mgmt_pending_free(cmd);
9123 }
9124
9125 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9126 {
9127         struct mgmt_pending_cmd *cmd = data;
9128         struct mgmt_cp_remove_advertising *cp = cmd->param;
9129         int err;
9130
9131         err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9132         if (err)
9133                 return err;
9134
9135         if (list_empty(&hdev->adv_instances))
9136                 err = hci_disable_advertising_sync(hdev);
9137
9138         return err;
9139 }
9140
9141 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9142                               void *data, u16 data_len)
9143 {
9144         struct mgmt_cp_remove_advertising *cp = data;
9145         struct mgmt_pending_cmd *cmd;
9146         int err;
9147
9148         bt_dev_dbg(hdev, "sock %p", sk);
9149
9150         hci_dev_lock(hdev);
9151
9152         if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9153                 err = mgmt_cmd_status(sk, hdev->id,
9154                                       MGMT_OP_REMOVE_ADVERTISING,
9155                                       MGMT_STATUS_INVALID_PARAMS);
9156                 goto unlock;
9157         }
9158
9159         if (pending_find(MGMT_OP_SET_LE, hdev)) {
9160                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9161                                       MGMT_STATUS_BUSY);
9162                 goto unlock;
9163         }
9164
9165         if (list_empty(&hdev->adv_instances)) {
9166                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9167                                       MGMT_STATUS_INVALID_PARAMS);
9168                 goto unlock;
9169         }
9170
9171         cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9172                                data_len);
9173         if (!cmd) {
9174                 err = -ENOMEM;
9175                 goto unlock;
9176         }
9177
9178         err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9179                                  remove_advertising_complete);
9180         if (err < 0)
9181                 mgmt_pending_free(cmd);
9182
9183 unlock:
9184         hci_dev_unlock(hdev);
9185
9186         return err;
9187 }
9188
9189 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9190                              void *data, u16 data_len)
9191 {
9192         struct mgmt_cp_get_adv_size_info *cp = data;
9193         struct mgmt_rp_get_adv_size_info rp;
9194         u32 flags, supported_flags;
9195
9196         bt_dev_dbg(hdev, "sock %p", sk);
9197
9198         if (!lmp_le_capable(hdev))
9199                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9200                                        MGMT_STATUS_REJECTED);
9201
9202         if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9203                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9204                                        MGMT_STATUS_INVALID_PARAMS);
9205
9206         flags = __le32_to_cpu(cp->flags);
9207
9208         /* The current implementation only supports a subset of the specified
9209          * flags.
9210          */
9211         supported_flags = get_supported_adv_flags(hdev);
9212         if (flags & ~supported_flags)
9213                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9214                                        MGMT_STATUS_INVALID_PARAMS);
9215
9216         rp.instance = cp->instance;
9217         rp.flags = cp->flags;
9218         rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9219         rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9220
9221         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9222                                  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9223 }
9224
9225 static const struct hci_mgmt_handler mgmt_handlers[] = {
9226         { NULL }, /* 0x0000 (no command) */
9227         { read_version,            MGMT_READ_VERSION_SIZE,
9228                                                 HCI_MGMT_NO_HDEV |
9229                                                 HCI_MGMT_UNTRUSTED },
9230         { read_commands,           MGMT_READ_COMMANDS_SIZE,
9231                                                 HCI_MGMT_NO_HDEV |
9232                                                 HCI_MGMT_UNTRUSTED },
9233         { read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
9234                                                 HCI_MGMT_NO_HDEV |
9235                                                 HCI_MGMT_UNTRUSTED },
9236         { read_controller_info,    MGMT_READ_INFO_SIZE,
9237                                                 HCI_MGMT_UNTRUSTED },
9238         { set_powered,             MGMT_SETTING_SIZE },
9239         { set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
9240         { set_connectable,         MGMT_SETTING_SIZE },
9241         { set_fast_connectable,    MGMT_SETTING_SIZE },
9242         { set_bondable,            MGMT_SETTING_SIZE },
9243         { set_link_security,       MGMT_SETTING_SIZE },
9244         { set_ssp,                 MGMT_SETTING_SIZE },
9245         { set_hs,                  MGMT_SETTING_SIZE },
9246         { set_le,                  MGMT_SETTING_SIZE },
9247         { set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
9248         { set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
9249         { add_uuid,                MGMT_ADD_UUID_SIZE },
9250         { remove_uuid,             MGMT_REMOVE_UUID_SIZE },
9251         { load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
9252                                                 HCI_MGMT_VAR_LEN },
9253         { load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9254                                                 HCI_MGMT_VAR_LEN },
9255         { disconnect,              MGMT_DISCONNECT_SIZE },
9256         { get_connections,         MGMT_GET_CONNECTIONS_SIZE },
9257         { pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
9258         { pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
9259         { set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
9260         { pair_device,             MGMT_PAIR_DEVICE_SIZE },
9261         { cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
9262         { unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
9263         { user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
9264         { user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9265         { user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
9266         { user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9267         { read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
9268         { add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9269                                                 HCI_MGMT_VAR_LEN },
9270         { remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9271         { start_discovery,         MGMT_START_DISCOVERY_SIZE },
9272         { stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
9273         { confirm_name,            MGMT_CONFIRM_NAME_SIZE },
9274         { block_device,            MGMT_BLOCK_DEVICE_SIZE },
9275         { unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
9276         { set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
9277         { set_advertising,         MGMT_SETTING_SIZE },
9278         { set_bredr,               MGMT_SETTING_SIZE },
9279         { set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
9280         { set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
9281         { set_secure_conn,         MGMT_SETTING_SIZE },
9282         { set_debug_keys,          MGMT_SETTING_SIZE },
9283         { set_privacy,             MGMT_SET_PRIVACY_SIZE },
9284         { load_irks,               MGMT_LOAD_IRKS_SIZE,
9285                                                 HCI_MGMT_VAR_LEN },
9286         { get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
9287         { get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
9288         { add_device,              MGMT_ADD_DEVICE_SIZE },
9289         { remove_device,           MGMT_REMOVE_DEVICE_SIZE },
9290         { load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
9291                                                 HCI_MGMT_VAR_LEN },
9292         { read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9293                                                 HCI_MGMT_NO_HDEV |
9294                                                 HCI_MGMT_UNTRUSTED },
9295         { read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
9296                                                 HCI_MGMT_UNCONFIGURED |
9297                                                 HCI_MGMT_UNTRUSTED },
9298         { set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
9299                                                 HCI_MGMT_UNCONFIGURED },
9300         { set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
9301                                                 HCI_MGMT_UNCONFIGURED },
9302         { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9303                                                 HCI_MGMT_VAR_LEN },
9304         { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9305         { read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
9306                                                 HCI_MGMT_NO_HDEV |
9307                                                 HCI_MGMT_UNTRUSTED },
9308         { read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
9309         { add_advertising,         MGMT_ADD_ADVERTISING_SIZE,
9310                                                 HCI_MGMT_VAR_LEN },
9311         { remove_advertising,      MGMT_REMOVE_ADVERTISING_SIZE },
9312         { get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
9313         { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9314         { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9315                                                 HCI_MGMT_UNTRUSTED },
9316         { set_appearance,          MGMT_SET_APPEARANCE_SIZE },
9317         { get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
9318         { set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
9319         { set_blocked_keys,        MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9320                                                 HCI_MGMT_VAR_LEN },
9321         { set_wideband_speech,     MGMT_SETTING_SIZE },
9322         { read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
9323                                                 HCI_MGMT_UNTRUSTED },
9324         { read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
9325                                                 HCI_MGMT_UNTRUSTED |
9326                                                 HCI_MGMT_HDEV_OPTIONAL },
9327         { set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
9328                                                 HCI_MGMT_VAR_LEN |
9329                                                 HCI_MGMT_HDEV_OPTIONAL },
9330         { read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9331                                                 HCI_MGMT_UNTRUSTED },
9332         { set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9333                                                 HCI_MGMT_VAR_LEN },
9334         { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9335                                                 HCI_MGMT_UNTRUSTED },
9336         { set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9337                                                 HCI_MGMT_VAR_LEN },
9338         { get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
9339         { set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
9340         { read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9341         { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9342                                                 HCI_MGMT_VAR_LEN },
9343         { remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
9344         { add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9345                                                 HCI_MGMT_VAR_LEN },
9346         { add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
9347                                                 HCI_MGMT_VAR_LEN },
9348         { add_adv_patterns_monitor_rssi,
9349                                    MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE },
9350         { set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
9351                                                 HCI_MGMT_VAR_LEN },
9352         { mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
9353         { mesh_send,               MGMT_MESH_SEND_SIZE,
9354                                                 HCI_MGMT_VAR_LEN },
9355         { mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
9356 };
9357
9358 void mgmt_index_added(struct hci_dev *hdev)
9359 {
9360         struct mgmt_ev_ext_index ev;
9361
9362         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9363                 return;
9364
9365         switch (hdev->dev_type) {
9366         case HCI_PRIMARY:
9367                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9368                         mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9369                                          NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9370                         ev.type = 0x01;
9371                 } else {
9372                         mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9373                                          HCI_MGMT_INDEX_EVENTS);
9374                         ev.type = 0x00;
9375                 }
9376                 break;
9377         case HCI_AMP:
9378                 ev.type = 0x02;
9379                 break;
9380         default:
9381                 return;
9382         }
9383
9384         ev.bus = hdev->bus;
9385
9386         mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9387                          HCI_MGMT_EXT_INDEX_EVENTS);
9388 }
9389
9390 void mgmt_index_removed(struct hci_dev *hdev)
9391 {
9392         struct mgmt_ev_ext_index ev;
9393         u8 status = MGMT_STATUS_INVALID_INDEX;
9394
9395         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9396                 return;
9397
9398         switch (hdev->dev_type) {
9399         case HCI_PRIMARY:
9400                 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9401
9402                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9403                         mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9404                                          NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9405                         ev.type = 0x01;
9406                 } else {
9407                         mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9408                                          HCI_MGMT_INDEX_EVENTS);
9409                         ev.type = 0x00;
9410                 }
9411                 break;
9412         case HCI_AMP:
9413                 ev.type = 0x02;
9414                 break;
9415         default:
9416                 return;
9417         }
9418
9419         ev.bus = hdev->bus;
9420
9421         mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9422                          HCI_MGMT_EXT_INDEX_EVENTS);
9423
9424         /* Cancel any remaining timed work */
9425         if (!hci_dev_test_flag(hdev, HCI_MGMT))
9426                 return;
9427         cancel_delayed_work_sync(&hdev->discov_off);
9428         cancel_delayed_work_sync(&hdev->service_cache);
9429         cancel_delayed_work_sync(&hdev->rpa_expired);
9430 }
9431
9432 void mgmt_power_on(struct hci_dev *hdev, int err)
9433 {
9434         struct cmd_lookup match = { NULL, hdev };
9435
9436         bt_dev_dbg(hdev, "err %d", err);
9437
9438         hci_dev_lock(hdev);
9439
9440         if (!err) {
9441                 restart_le_actions(hdev);
9442                 hci_update_passive_scan(hdev);
9443         }
9444
9445         mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9446
9447         new_settings(hdev, match.sk);
9448
9449         if (match.sk)
9450                 sock_put(match.sk);
9451
9452         hci_dev_unlock(hdev);
9453 }
9454
9455 void __mgmt_power_off(struct hci_dev *hdev)
9456 {
9457         struct cmd_lookup match = { NULL, hdev };
9458         u8 status, zero_cod[] = { 0, 0, 0 };
9459
9460         mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9461
9462         /* If the power off is because of hdev unregistration let
9463          * use the appropriate INVALID_INDEX status. Otherwise use
9464          * NOT_POWERED. We cover both scenarios here since later in
9465          * mgmt_index_removed() any hci_conn callbacks will have already
9466          * been triggered, potentially causing misleading DISCONNECTED
9467          * status responses.
9468          */
9469         if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9470                 status = MGMT_STATUS_INVALID_INDEX;
9471         else
9472                 status = MGMT_STATUS_NOT_POWERED;
9473
9474         mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9475
9476         if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9477                 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9478                                    zero_cod, sizeof(zero_cod),
9479                                    HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9480                 ext_info_changed(hdev, NULL);
9481         }
9482
9483         new_settings(hdev, match.sk);
9484
9485         if (match.sk)
9486                 sock_put(match.sk);
9487 }
9488
9489 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9490 {
9491         struct mgmt_pending_cmd *cmd;
9492         u8 status;
9493
9494         cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9495         if (!cmd)
9496                 return;
9497
9498         if (err == -ERFKILL)
9499                 status = MGMT_STATUS_RFKILLED;
9500         else
9501                 status = MGMT_STATUS_FAILED;
9502
9503         mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9504
9505         mgmt_pending_remove(cmd);
9506 }
9507
9508 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9509                        bool persistent)
9510 {
9511         struct mgmt_ev_new_link_key ev;
9512
9513         memset(&ev, 0, sizeof(ev));
9514
9515         ev.store_hint = persistent;
9516         bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9517         ev.key.addr.type = BDADDR_BREDR;
9518         ev.key.type = key->type;
9519         memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9520         ev.key.pin_len = key->pin_len;
9521
9522         mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9523 }
9524
9525 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9526 {
9527         switch (ltk->type) {
9528         case SMP_LTK:
9529         case SMP_LTK_RESPONDER:
9530                 if (ltk->authenticated)
9531                         return MGMT_LTK_AUTHENTICATED;
9532                 return MGMT_LTK_UNAUTHENTICATED;
9533         case SMP_LTK_P256:
9534                 if (ltk->authenticated)
9535                         return MGMT_LTK_P256_AUTH;
9536                 return MGMT_LTK_P256_UNAUTH;
9537         case SMP_LTK_P256_DEBUG:
9538                 return MGMT_LTK_P256_DEBUG;
9539         }
9540
9541         return MGMT_LTK_UNAUTHENTICATED;
9542 }
9543
9544 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9545 {
9546         struct mgmt_ev_new_long_term_key ev;
9547
9548         memset(&ev, 0, sizeof(ev));
9549
9550         /* Devices using resolvable or non-resolvable random addresses
9551          * without providing an identity resolving key don't require
9552          * to store long term keys. Their addresses will change the
9553          * next time around.
9554          *
9555          * Only when a remote device provides an identity address
9556          * make sure the long term key is stored. If the remote
9557          * identity is known, the long term keys are internally
9558          * mapped to the identity address. So allow static random
9559          * and public addresses here.
9560          */
9561         if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9562             (key->bdaddr.b[5] & 0xc0) != 0xc0)
9563                 ev.store_hint = 0x00;
9564         else
9565                 ev.store_hint = persistent;
9566
9567         bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9568         ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9569         ev.key.type = mgmt_ltk_type(key);
9570         ev.key.enc_size = key->enc_size;
9571         ev.key.ediv = key->ediv;
9572         ev.key.rand = key->rand;
9573
9574         if (key->type == SMP_LTK)
9575                 ev.key.initiator = 1;
9576
9577         /* Make sure we copy only the significant bytes based on the
9578          * encryption key size, and set the rest of the value to zeroes.
9579          */
9580         memcpy(ev.key.val, key->val, key->enc_size);
9581         memset(ev.key.val + key->enc_size, 0,
9582                sizeof(ev.key.val) - key->enc_size);
9583
9584         mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9585 }
9586
9587 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9588 {
9589         struct mgmt_ev_new_irk ev;
9590
9591         memset(&ev, 0, sizeof(ev));
9592
9593         ev.store_hint = persistent;
9594
9595         bacpy(&ev.rpa, &irk->rpa);
9596         bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9597         ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9598         memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9599
9600         mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9601 }
9602
9603 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9604                    bool persistent)
9605 {
9606         struct mgmt_ev_new_csrk ev;
9607
9608         memset(&ev, 0, sizeof(ev));
9609
9610         /* Devices using resolvable or non-resolvable random addresses
9611          * without providing an identity resolving key don't require
9612          * to store signature resolving keys. Their addresses will change
9613          * the next time around.
9614          *
9615          * Only when a remote device provides an identity address
9616          * make sure the signature resolving key is stored. So allow
9617          * static random and public addresses here.
9618          */
9619         if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9620             (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9621                 ev.store_hint = 0x00;
9622         else
9623                 ev.store_hint = persistent;
9624
9625         bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9626         ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9627         ev.key.type = csrk->type;
9628         memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9629
9630         mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9631 }
9632
9633 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9634                          u8 bdaddr_type, u8 store_hint, u16 min_interval,
9635                          u16 max_interval, u16 latency, u16 timeout)
9636 {
9637         struct mgmt_ev_new_conn_param ev;
9638
9639         if (!hci_is_identity_address(bdaddr, bdaddr_type))
9640                 return;
9641
9642         memset(&ev, 0, sizeof(ev));
9643         bacpy(&ev.addr.bdaddr, bdaddr);
9644         ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9645         ev.store_hint = store_hint;
9646         ev.min_interval = cpu_to_le16(min_interval);
9647         ev.max_interval = cpu_to_le16(max_interval);
9648         ev.latency = cpu_to_le16(latency);
9649         ev.timeout = cpu_to_le16(timeout);
9650
9651         mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9652 }
9653
9654 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9655                            u8 *name, u8 name_len)
9656 {
9657         struct sk_buff *skb;
9658         struct mgmt_ev_device_connected *ev;
9659         u16 eir_len = 0;
9660         u32 flags = 0;
9661
9662         /* allocate buff for LE or BR/EDR adv */
9663         if (conn->le_adv_data_len > 0)
9664                 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9665                                      sizeof(*ev) + conn->le_adv_data_len);
9666         else
9667                 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9668                                      sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9669                                      eir_precalc_len(sizeof(conn->dev_class)));
9670
9671         ev = skb_put(skb, sizeof(*ev));
9672         bacpy(&ev->addr.bdaddr, &conn->dst);
9673         ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9674
9675         if (conn->out)
9676                 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9677
9678         ev->flags = __cpu_to_le32(flags);
9679
9680         /* We must ensure that the EIR Data fields are ordered and
9681          * unique. Keep it simple for now and avoid the problem by not
9682          * adding any BR/EDR data to the LE adv.
9683          */
9684         if (conn->le_adv_data_len > 0) {
9685                 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9686                 eir_len = conn->le_adv_data_len;
9687         } else {
9688                 if (name)
9689                         eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9690
9691                 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9692                         eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9693                                                     conn->dev_class, sizeof(conn->dev_class));
9694         }
9695
9696         ev->eir_len = cpu_to_le16(eir_len);
9697
9698         mgmt_event_skb(skb, NULL);
9699 }
9700
9701 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9702 {
9703         struct sock **sk = data;
9704
9705         cmd->cmd_complete(cmd, 0);
9706
9707         *sk = cmd->sk;
9708         sock_hold(*sk);
9709
9710         mgmt_pending_remove(cmd);
9711 }
9712
9713 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9714 {
9715         struct hci_dev *hdev = data;
9716         struct mgmt_cp_unpair_device *cp = cmd->param;
9717
9718         device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9719
9720         cmd->cmd_complete(cmd, 0);
9721         mgmt_pending_remove(cmd);
9722 }
9723
9724 bool mgmt_powering_down(struct hci_dev *hdev)
9725 {
9726         struct mgmt_pending_cmd *cmd;
9727         struct mgmt_mode *cp;
9728
9729         cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9730         if (!cmd)
9731                 return false;
9732
9733         cp = cmd->param;
9734         if (!cp->val)
9735                 return true;
9736
9737         return false;
9738 }
9739
9740 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9741                               u8 link_type, u8 addr_type, u8 reason,
9742                               bool mgmt_connected)
9743 {
9744         struct mgmt_ev_device_disconnected ev;
9745         struct sock *sk = NULL;
9746
9747         /* The connection is still in hci_conn_hash so test for 1
9748          * instead of 0 to know if this is the last one.
9749          */
9750         if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9751                 cancel_delayed_work(&hdev->power_off);
9752                 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9753         }
9754
9755         if (!mgmt_connected)
9756                 return;
9757
9758         if (link_type != ACL_LINK && link_type != LE_LINK)
9759                 return;
9760
9761         mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9762
9763         bacpy(&ev.addr.bdaddr, bdaddr);
9764         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9765         ev.reason = reason;
9766
9767         /* Report disconnects due to suspend */
9768         if (hdev->suspended)
9769                 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9770
9771         mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9772
9773         if (sk)
9774                 sock_put(sk);
9775
9776         mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9777                              hdev);
9778 }
9779
9780 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9781                             u8 link_type, u8 addr_type, u8 status)
9782 {
9783         u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9784         struct mgmt_cp_disconnect *cp;
9785         struct mgmt_pending_cmd *cmd;
9786
9787         mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9788                              hdev);
9789
9790         cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9791         if (!cmd)
9792                 return;
9793
9794         cp = cmd->param;
9795
9796         if (bacmp(bdaddr, &cp->addr.bdaddr))
9797                 return;
9798
9799         if (cp->addr.type != bdaddr_type)
9800                 return;
9801
9802         cmd->cmd_complete(cmd, mgmt_status(status));
9803         mgmt_pending_remove(cmd);
9804 }
9805
9806 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9807                          u8 addr_type, u8 status)
9808 {
9809         struct mgmt_ev_connect_failed ev;
9810
9811         /* The connection is still in hci_conn_hash so test for 1
9812          * instead of 0 to know if this is the last one.
9813          */
9814         if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9815                 cancel_delayed_work(&hdev->power_off);
9816                 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9817         }
9818
9819         bacpy(&ev.addr.bdaddr, bdaddr);
9820         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9821         ev.status = mgmt_status(status);
9822
9823         mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9824 }
9825
9826 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9827 {
9828         struct mgmt_ev_pin_code_request ev;
9829
9830         bacpy(&ev.addr.bdaddr, bdaddr);
9831         ev.addr.type = BDADDR_BREDR;
9832         ev.secure = secure;
9833
9834         mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9835 }
9836
9837 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9838                                   u8 status)
9839 {
9840         struct mgmt_pending_cmd *cmd;
9841
9842         cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9843         if (!cmd)
9844                 return;
9845
9846         cmd->cmd_complete(cmd, mgmt_status(status));
9847         mgmt_pending_remove(cmd);
9848 }
9849
9850 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9851                                       u8 status)
9852 {
9853         struct mgmt_pending_cmd *cmd;
9854
9855         cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9856         if (!cmd)
9857                 return;
9858
9859         cmd->cmd_complete(cmd, mgmt_status(status));
9860         mgmt_pending_remove(cmd);
9861 }
9862
9863 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9864                               u8 link_type, u8 addr_type, u32 value,
9865                               u8 confirm_hint)
9866 {
9867         struct mgmt_ev_user_confirm_request ev;
9868
9869         bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9870
9871         bacpy(&ev.addr.bdaddr, bdaddr);
9872         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9873         ev.confirm_hint = confirm_hint;
9874         ev.value = cpu_to_le32(value);
9875
9876         return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9877                           NULL);
9878 }
9879
9880 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9881                               u8 link_type, u8 addr_type)
9882 {
9883         struct mgmt_ev_user_passkey_request ev;
9884
9885         bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9886
9887         bacpy(&ev.addr.bdaddr, bdaddr);
9888         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9889
9890         return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9891                           NULL);
9892 }
9893
9894 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9895                                       u8 link_type, u8 addr_type, u8 status,
9896                                       u8 opcode)
9897 {
9898         struct mgmt_pending_cmd *cmd;
9899
9900         cmd = pending_find(opcode, hdev);
9901         if (!cmd)
9902                 return -ENOENT;
9903
9904         cmd->cmd_complete(cmd, mgmt_status(status));
9905         mgmt_pending_remove(cmd);
9906
9907         return 0;
9908 }
9909
9910 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9911                                      u8 link_type, u8 addr_type, u8 status)
9912 {
9913         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9914                                           status, MGMT_OP_USER_CONFIRM_REPLY);
9915 }
9916
9917 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9918                                          u8 link_type, u8 addr_type, u8 status)
9919 {
9920         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9921                                           status,
9922                                           MGMT_OP_USER_CONFIRM_NEG_REPLY);
9923 }
9924
9925 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9926                                      u8 link_type, u8 addr_type, u8 status)
9927 {
9928         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9929                                           status, MGMT_OP_USER_PASSKEY_REPLY);
9930 }
9931
9932 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9933                                          u8 link_type, u8 addr_type, u8 status)
9934 {
9935         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9936                                           status,
9937                                           MGMT_OP_USER_PASSKEY_NEG_REPLY);
9938 }
9939
9940 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9941                              u8 link_type, u8 addr_type, u32 passkey,
9942                              u8 entered)
9943 {
9944         struct mgmt_ev_passkey_notify ev;
9945
9946         bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9947
9948         bacpy(&ev.addr.bdaddr, bdaddr);
9949         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9950         ev.passkey = __cpu_to_le32(passkey);
9951         ev.entered = entered;
9952
9953         return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9954 }
9955
9956 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9957 {
9958         struct mgmt_ev_auth_failed ev;
9959         struct mgmt_pending_cmd *cmd;
9960         u8 status = mgmt_status(hci_status);
9961
9962         bacpy(&ev.addr.bdaddr, &conn->dst);
9963         ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9964         ev.status = status;
9965
9966         cmd = find_pairing(conn);
9967
9968         mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9969                     cmd ? cmd->sk : NULL);
9970
9971         if (cmd) {
9972                 cmd->cmd_complete(cmd, status);
9973                 mgmt_pending_remove(cmd);
9974         }
9975 }
9976
9977 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9978 {
9979         struct cmd_lookup match = { NULL, hdev };
9980         bool changed;
9981
9982         if (status) {
9983                 u8 mgmt_err = mgmt_status(status);
9984                 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9985                                      cmd_status_rsp, &mgmt_err);
9986                 return;
9987         }
9988
9989         if (test_bit(HCI_AUTH, &hdev->flags))
9990                 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9991         else
9992                 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9993
9994         mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9995                              &match);
9996
9997         if (changed)
9998                 new_settings(hdev, match.sk);
9999
10000         if (match.sk)
10001                 sock_put(match.sk);
10002 }
10003
10004 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10005 {
10006         struct cmd_lookup *match = data;
10007
10008         if (match->sk == NULL) {
10009                 match->sk = cmd->sk;
10010                 sock_hold(match->sk);
10011         }
10012 }
10013
10014 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10015                                     u8 status)
10016 {
10017         struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10018
10019         mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
10020         mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
10021         mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
10022
10023         if (!status) {
10024                 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10025                                    3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10026                 ext_info_changed(hdev, NULL);
10027         }
10028
10029         if (match.sk)
10030                 sock_put(match.sk);
10031 }
10032
10033 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10034 {
10035         struct mgmt_cp_set_local_name ev;
10036         struct mgmt_pending_cmd *cmd;
10037
10038         if (status)
10039                 return;
10040
10041         memset(&ev, 0, sizeof(ev));
10042         memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10043         memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10044
10045         cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10046         if (!cmd) {
10047                 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10048
10049                 /* If this is a HCI command related to powering on the
10050                  * HCI dev don't send any mgmt signals.
10051                  */
10052                 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10053                         return;
10054         }
10055
10056         mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10057                            HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10058         ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10059 }
10060
10061 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10062 {
10063         int i;
10064
10065         for (i = 0; i < uuid_count; i++) {
10066                 if (!memcmp(uuid, uuids[i], 16))
10067                         return true;
10068         }
10069
10070         return false;
10071 }
10072
10073 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10074 {
10075         u16 parsed = 0;
10076
10077         while (parsed < eir_len) {
10078                 u8 field_len = eir[0];
10079                 u8 uuid[16];
10080                 int i;
10081
10082                 if (field_len == 0)
10083                         break;
10084
10085                 if (eir_len - parsed < field_len + 1)
10086                         break;
10087
10088                 switch (eir[1]) {
10089                 case EIR_UUID16_ALL:
10090                 case EIR_UUID16_SOME:
10091                         for (i = 0; i + 3 <= field_len; i += 2) {
10092                                 memcpy(uuid, bluetooth_base_uuid, 16);
10093                                 uuid[13] = eir[i + 3];
10094                                 uuid[12] = eir[i + 2];
10095                                 if (has_uuid(uuid, uuid_count, uuids))
10096                                         return true;
10097                         }
10098                         break;
10099                 case EIR_UUID32_ALL:
10100                 case EIR_UUID32_SOME:
10101                         for (i = 0; i + 5 <= field_len; i += 4) {
10102                                 memcpy(uuid, bluetooth_base_uuid, 16);
10103                                 uuid[15] = eir[i + 5];
10104                                 uuid[14] = eir[i + 4];
10105                                 uuid[13] = eir[i + 3];
10106                                 uuid[12] = eir[i + 2];
10107                                 if (has_uuid(uuid, uuid_count, uuids))
10108                                         return true;
10109                         }
10110                         break;
10111                 case EIR_UUID128_ALL:
10112                 case EIR_UUID128_SOME:
10113                         for (i = 0; i + 17 <= field_len; i += 16) {
10114                                 memcpy(uuid, eir + i + 2, 16);
10115                                 if (has_uuid(uuid, uuid_count, uuids))
10116                                         return true;
10117                         }
10118                         break;
10119                 }
10120
10121                 parsed += field_len + 1;
10122                 eir += field_len + 1;
10123         }
10124
10125         return false;
10126 }
10127
10128 static void restart_le_scan(struct hci_dev *hdev)
10129 {
10130         /* If controller is not scanning we are done. */
10131         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10132                 return;
10133
10134         if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10135                        hdev->discovery.scan_start +
10136                        hdev->discovery.scan_duration))
10137                 return;
10138
10139         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10140                            DISCOV_LE_RESTART_DELAY);
10141 }
10142
10143 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10144                             u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10145 {
10146         /* If a RSSI threshold has been specified, and
10147          * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10148          * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10149          * is set, let it through for further processing, as we might need to
10150          * restart the scan.
10151          *
10152          * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10153          * the results are also dropped.
10154          */
10155         if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10156             (rssi == HCI_RSSI_INVALID ||
10157             (rssi < hdev->discovery.rssi &&
10158              !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10159                 return  false;
10160
10161         if (hdev->discovery.uuid_count != 0) {
10162                 /* If a list of UUIDs is provided in filter, results with no
10163                  * matching UUID should be dropped.
10164                  */
10165                 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10166                                    hdev->discovery.uuids) &&
10167                     !eir_has_uuids(scan_rsp, scan_rsp_len,
10168                                    hdev->discovery.uuid_count,
10169                                    hdev->discovery.uuids))
10170                         return false;
10171         }
10172
10173         /* If duplicate filtering does not report RSSI changes, then restart
10174          * scanning to ensure updated result with updated RSSI values.
10175          */
10176         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10177                 restart_le_scan(hdev);
10178
10179                 /* Validate RSSI value against the RSSI threshold once more. */
10180                 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10181                     rssi < hdev->discovery.rssi)
10182                         return false;
10183         }
10184
10185         return true;
10186 }
10187
10188 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10189                                   bdaddr_t *bdaddr, u8 addr_type)
10190 {
10191         struct mgmt_ev_adv_monitor_device_lost ev;
10192
10193         ev.monitor_handle = cpu_to_le16(handle);
10194         bacpy(&ev.addr.bdaddr, bdaddr);
10195         ev.addr.type = addr_type;
10196
10197         mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10198                    NULL);
10199 }
10200
10201 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10202                                                struct sk_buff *skb,
10203                                                struct sock *skip_sk,
10204                                                u16 handle)
10205 {
10206         struct sk_buff *advmon_skb;
10207         size_t advmon_skb_len;
10208         __le16 *monitor_handle;
10209
10210         if (!skb)
10211                 return;
10212
10213         advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10214                           sizeof(struct mgmt_ev_device_found)) + skb->len;
10215         advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10216                                     advmon_skb_len);
10217         if (!advmon_skb)
10218                 return;
10219
10220         /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10221          * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10222          * store monitor_handle of the matched monitor.
10223          */
10224         monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10225         *monitor_handle = cpu_to_le16(handle);
10226         skb_put_data(advmon_skb, skb->data, skb->len);
10227
10228         mgmt_event_skb(advmon_skb, skip_sk);
10229 }
10230
10231 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10232                                           bdaddr_t *bdaddr, bool report_device,
10233                                           struct sk_buff *skb,
10234                                           struct sock *skip_sk)
10235 {
10236         struct monitored_device *dev, *tmp;
10237         bool matched = false;
10238         bool notified = false;
10239
10240         /* We have received the Advertisement Report because:
10241          * 1. the kernel has initiated active discovery
10242          * 2. if not, we have pend_le_reports > 0 in which case we are doing
10243          *    passive scanning
10244          * 3. if none of the above is true, we have one or more active
10245          *    Advertisement Monitor
10246          *
10247          * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10248          * and report ONLY one advertisement per device for the matched Monitor
10249          * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10250          *
10251          * For case 3, since we are not active scanning and all advertisements
10252          * received are due to a matched Advertisement Monitor, report all
10253          * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10254          */
10255         if (report_device && !hdev->advmon_pend_notify) {
10256                 mgmt_event_skb(skb, skip_sk);
10257                 return;
10258         }
10259
10260         hdev->advmon_pend_notify = false;
10261
10262         list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10263                 if (!bacmp(&dev->bdaddr, bdaddr)) {
10264                         matched = true;
10265
10266                         if (!dev->notified) {
10267                                 mgmt_send_adv_monitor_device_found(hdev, skb,
10268                                                                    skip_sk,
10269                                                                    dev->handle);
10270                                 notified = true;
10271                                 dev->notified = true;
10272                         }
10273                 }
10274
10275                 if (!dev->notified)
10276                         hdev->advmon_pend_notify = true;
10277         }
10278
10279         if (!report_device &&
10280             ((matched && !notified) || !msft_monitor_supported(hdev))) {
10281                 /* Handle 0 indicates that we are not active scanning and this
10282                  * is a subsequent advertisement report for an already matched
10283                  * Advertisement Monitor or the controller offloading support
10284                  * is not available.
10285                  */
10286                 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10287         }
10288
10289         if (report_device)
10290                 mgmt_event_skb(skb, skip_sk);
10291         else
10292                 kfree_skb(skb);
10293 }
10294
10295 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10296                               u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10297                               u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10298                               u64 instant)
10299 {
10300         struct sk_buff *skb;
10301         struct mgmt_ev_mesh_device_found *ev;
10302         int i, j;
10303
10304         if (!hdev->mesh_ad_types[0])
10305                 goto accepted;
10306
10307         /* Scan for requested AD types */
10308         if (eir_len > 0) {
10309                 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10310                         for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10311                                 if (!hdev->mesh_ad_types[j])
10312                                         break;
10313
10314                                 if (hdev->mesh_ad_types[j] == eir[i + 1])
10315                                         goto accepted;
10316                         }
10317                 }
10318         }
10319
10320         if (scan_rsp_len > 0) {
10321                 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10322                         for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10323                                 if (!hdev->mesh_ad_types[j])
10324                                         break;
10325
10326                                 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10327                                         goto accepted;
10328                         }
10329                 }
10330         }
10331
10332         return;
10333
10334 accepted:
10335         skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10336                              sizeof(*ev) + eir_len + scan_rsp_len);
10337         if (!skb)
10338                 return;
10339
10340         ev = skb_put(skb, sizeof(*ev));
10341
10342         bacpy(&ev->addr.bdaddr, bdaddr);
10343         ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10344         ev->rssi = rssi;
10345         ev->flags = cpu_to_le32(flags);
10346         ev->instant = cpu_to_le64(instant);
10347
10348         if (eir_len > 0)
10349                 /* Copy EIR or advertising data into event */
10350                 skb_put_data(skb, eir, eir_len);
10351
10352         if (scan_rsp_len > 0)
10353                 /* Append scan response data to event */
10354                 skb_put_data(skb, scan_rsp, scan_rsp_len);
10355
10356         ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10357
10358         mgmt_event_skb(skb, NULL);
10359 }
10360
10361 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10362                        u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10363                        u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10364                        u64 instant)
10365 {
10366         struct sk_buff *skb;
10367         struct mgmt_ev_device_found *ev;
10368         bool report_device = hci_discovery_active(hdev);
10369
10370         if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10371                 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10372                                   eir, eir_len, scan_rsp, scan_rsp_len,
10373                                   instant);
10374
10375         /* Don't send events for a non-kernel initiated discovery. With
10376          * LE one exception is if we have pend_le_reports > 0 in which
10377          * case we're doing passive scanning and want these events.
10378          */
10379         if (!hci_discovery_active(hdev)) {
10380                 if (link_type == ACL_LINK)
10381                         return;
10382                 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10383                         report_device = true;
10384                 else if (!hci_is_adv_monitoring(hdev))
10385                         return;
10386         }
10387
10388         if (hdev->discovery.result_filtering) {
10389                 /* We are using service discovery */
10390                 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10391                                      scan_rsp_len))
10392                         return;
10393         }
10394
10395         if (hdev->discovery.limited) {
10396                 /* Check for limited discoverable bit */
10397                 if (dev_class) {
10398                         if (!(dev_class[1] & 0x20))
10399                                 return;
10400                 } else {
10401                         u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10402                         if (!flags || !(flags[0] & LE_AD_LIMITED))
10403                                 return;
10404                 }
10405         }
10406
10407         /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10408         skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10409                              sizeof(*ev) + eir_len + scan_rsp_len + 5);
10410         if (!skb)
10411                 return;
10412
10413         ev = skb_put(skb, sizeof(*ev));
10414
10415         /* In case of device discovery with BR/EDR devices (pre 1.2), the
10416          * RSSI value was reported as 0 when not available. This behavior
10417          * is kept when using device discovery. This is required for full
10418          * backwards compatibility with the API.
10419          *
10420          * However when using service discovery, the value 127 will be
10421          * returned when the RSSI is not available.
10422          */
10423         if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10424             link_type == ACL_LINK)
10425                 rssi = 0;
10426
10427         bacpy(&ev->addr.bdaddr, bdaddr);
10428         ev->addr.type = link_to_bdaddr(link_type, addr_type);
10429         ev->rssi = rssi;
10430         ev->flags = cpu_to_le32(flags);
10431
10432         if (eir_len > 0)
10433                 /* Copy EIR or advertising data into event */
10434                 skb_put_data(skb, eir, eir_len);
10435
10436         if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10437                 u8 eir_cod[5];
10438
10439                 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10440                                            dev_class, 3);
10441                 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10442         }
10443
10444         if (scan_rsp_len > 0)
10445                 /* Append scan response data to event */
10446                 skb_put_data(skb, scan_rsp, scan_rsp_len);
10447
10448         ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10449
10450         mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10451 }
10452
10453 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10454                       u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10455 {
10456         struct sk_buff *skb;
10457         struct mgmt_ev_device_found *ev;
10458         u16 eir_len = 0;
10459         u32 flags = 0;
10460
10461         skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10462                              sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10463
10464         ev = skb_put(skb, sizeof(*ev));
10465         bacpy(&ev->addr.bdaddr, bdaddr);
10466         ev->addr.type = link_to_bdaddr(link_type, addr_type);
10467         ev->rssi = rssi;
10468
10469         if (name)
10470                 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10471         else
10472                 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10473
10474         ev->eir_len = cpu_to_le16(eir_len);
10475         ev->flags = cpu_to_le32(flags);
10476
10477         mgmt_event_skb(skb, NULL);
10478 }
10479
10480 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10481 {
10482         struct mgmt_ev_discovering ev;
10483
10484         bt_dev_dbg(hdev, "discovering %u", discovering);
10485
10486         memset(&ev, 0, sizeof(ev));
10487         ev.type = hdev->discovery.type;
10488         ev.discovering = discovering;
10489
10490         mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10491 }
10492
10493 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10494 {
10495         struct mgmt_ev_controller_suspend ev;
10496
10497         ev.suspend_state = state;
10498         mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10499 }
10500
10501 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10502                    u8 addr_type)
10503 {
10504         struct mgmt_ev_controller_resume ev;
10505
10506         ev.wake_reason = reason;
10507         if (bdaddr) {
10508                 bacpy(&ev.addr.bdaddr, bdaddr);
10509                 ev.addr.type = addr_type;
10510         } else {
10511                 memset(&ev.addr, 0, sizeof(ev.addr));
10512         }
10513
10514         mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10515 }
10516
10517 static struct hci_mgmt_chan chan = {
10518         .channel        = HCI_CHANNEL_CONTROL,
10519         .handler_count  = ARRAY_SIZE(mgmt_handlers),
10520         .handlers       = mgmt_handlers,
10521         .hdev_init      = mgmt_init_hdev,
10522 };
10523
10524 int mgmt_init(void)
10525 {
10526         return hci_mgmt_chan_register(&chan);
10527 }
10528
10529 void mgmt_exit(void)
10530 {
10531         hci_mgmt_chan_unregister(&chan);
10532 }
10533
10534 void mgmt_cleanup(struct sock *sk)
10535 {
10536         struct mgmt_mesh_tx *mesh_tx;
10537         struct hci_dev *hdev;
10538
10539         read_lock(&hci_dev_list_lock);
10540
10541         list_for_each_entry(hdev, &hci_dev_list, list) {
10542                 do {
10543                         mesh_tx = mgmt_mesh_next(hdev, sk);
10544
10545                         if (mesh_tx)
10546                                 mesh_send_complete(hdev, mesh_tx, true);
10547                 } while (mesh_tx);
10548         }
10549
10550         read_unlock(&hci_dev_list_lock);
10551 }