f7b2d0971f240bcdf6b587829e80309708b71571
[platform/kernel/linux-starfive.git] / net / bluetooth / mgmt.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43
44 #define MGMT_VERSION    1
45 #define MGMT_REVISION   22
46
47 static const u16 mgmt_commands[] = {
48         MGMT_OP_READ_INDEX_LIST,
49         MGMT_OP_READ_INFO,
50         MGMT_OP_SET_POWERED,
51         MGMT_OP_SET_DISCOVERABLE,
52         MGMT_OP_SET_CONNECTABLE,
53         MGMT_OP_SET_FAST_CONNECTABLE,
54         MGMT_OP_SET_BONDABLE,
55         MGMT_OP_SET_LINK_SECURITY,
56         MGMT_OP_SET_SSP,
57         MGMT_OP_SET_HS,
58         MGMT_OP_SET_LE,
59         MGMT_OP_SET_DEV_CLASS,
60         MGMT_OP_SET_LOCAL_NAME,
61         MGMT_OP_ADD_UUID,
62         MGMT_OP_REMOVE_UUID,
63         MGMT_OP_LOAD_LINK_KEYS,
64         MGMT_OP_LOAD_LONG_TERM_KEYS,
65         MGMT_OP_DISCONNECT,
66         MGMT_OP_GET_CONNECTIONS,
67         MGMT_OP_PIN_CODE_REPLY,
68         MGMT_OP_PIN_CODE_NEG_REPLY,
69         MGMT_OP_SET_IO_CAPABILITY,
70         MGMT_OP_PAIR_DEVICE,
71         MGMT_OP_CANCEL_PAIR_DEVICE,
72         MGMT_OP_UNPAIR_DEVICE,
73         MGMT_OP_USER_CONFIRM_REPLY,
74         MGMT_OP_USER_CONFIRM_NEG_REPLY,
75         MGMT_OP_USER_PASSKEY_REPLY,
76         MGMT_OP_USER_PASSKEY_NEG_REPLY,
77         MGMT_OP_READ_LOCAL_OOB_DATA,
78         MGMT_OP_ADD_REMOTE_OOB_DATA,
79         MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80         MGMT_OP_START_DISCOVERY,
81         MGMT_OP_STOP_DISCOVERY,
82         MGMT_OP_CONFIRM_NAME,
83         MGMT_OP_BLOCK_DEVICE,
84         MGMT_OP_UNBLOCK_DEVICE,
85         MGMT_OP_SET_DEVICE_ID,
86         MGMT_OP_SET_ADVERTISING,
87         MGMT_OP_SET_BREDR,
88         MGMT_OP_SET_STATIC_ADDRESS,
89         MGMT_OP_SET_SCAN_PARAMS,
90         MGMT_OP_SET_SECURE_CONN,
91         MGMT_OP_SET_DEBUG_KEYS,
92         MGMT_OP_SET_PRIVACY,
93         MGMT_OP_LOAD_IRKS,
94         MGMT_OP_GET_CONN_INFO,
95         MGMT_OP_GET_CLOCK_INFO,
96         MGMT_OP_ADD_DEVICE,
97         MGMT_OP_REMOVE_DEVICE,
98         MGMT_OP_LOAD_CONN_PARAM,
99         MGMT_OP_READ_UNCONF_INDEX_LIST,
100         MGMT_OP_READ_CONFIG_INFO,
101         MGMT_OP_SET_EXTERNAL_CONFIG,
102         MGMT_OP_SET_PUBLIC_ADDRESS,
103         MGMT_OP_START_SERVICE_DISCOVERY,
104         MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105         MGMT_OP_READ_EXT_INDEX_LIST,
106         MGMT_OP_READ_ADV_FEATURES,
107         MGMT_OP_ADD_ADVERTISING,
108         MGMT_OP_REMOVE_ADVERTISING,
109         MGMT_OP_GET_ADV_SIZE_INFO,
110         MGMT_OP_START_LIMITED_DISCOVERY,
111         MGMT_OP_READ_EXT_INFO,
112         MGMT_OP_SET_APPEARANCE,
113         MGMT_OP_GET_PHY_CONFIGURATION,
114         MGMT_OP_SET_PHY_CONFIGURATION,
115         MGMT_OP_SET_BLOCKED_KEYS,
116         MGMT_OP_SET_WIDEBAND_SPEECH,
117         MGMT_OP_READ_CONTROLLER_CAP,
118         MGMT_OP_READ_EXP_FEATURES_INFO,
119         MGMT_OP_SET_EXP_FEATURE,
120         MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121         MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122         MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123         MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124         MGMT_OP_GET_DEVICE_FLAGS,
125         MGMT_OP_SET_DEVICE_FLAGS,
126         MGMT_OP_READ_ADV_MONITOR_FEATURES,
127         MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128         MGMT_OP_REMOVE_ADV_MONITOR,
129         MGMT_OP_ADD_EXT_ADV_PARAMS,
130         MGMT_OP_ADD_EXT_ADV_DATA,
131         MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132         MGMT_OP_SET_MESH_RECEIVER,
133         MGMT_OP_MESH_READ_FEATURES,
134         MGMT_OP_MESH_SEND,
135         MGMT_OP_MESH_SEND_CANCEL,
136 };
137
138 static const u16 mgmt_events[] = {
139         MGMT_EV_CONTROLLER_ERROR,
140         MGMT_EV_INDEX_ADDED,
141         MGMT_EV_INDEX_REMOVED,
142         MGMT_EV_NEW_SETTINGS,
143         MGMT_EV_CLASS_OF_DEV_CHANGED,
144         MGMT_EV_LOCAL_NAME_CHANGED,
145         MGMT_EV_NEW_LINK_KEY,
146         MGMT_EV_NEW_LONG_TERM_KEY,
147         MGMT_EV_DEVICE_CONNECTED,
148         MGMT_EV_DEVICE_DISCONNECTED,
149         MGMT_EV_CONNECT_FAILED,
150         MGMT_EV_PIN_CODE_REQUEST,
151         MGMT_EV_USER_CONFIRM_REQUEST,
152         MGMT_EV_USER_PASSKEY_REQUEST,
153         MGMT_EV_AUTH_FAILED,
154         MGMT_EV_DEVICE_FOUND,
155         MGMT_EV_DISCOVERING,
156         MGMT_EV_DEVICE_BLOCKED,
157         MGMT_EV_DEVICE_UNBLOCKED,
158         MGMT_EV_DEVICE_UNPAIRED,
159         MGMT_EV_PASSKEY_NOTIFY,
160         MGMT_EV_NEW_IRK,
161         MGMT_EV_NEW_CSRK,
162         MGMT_EV_DEVICE_ADDED,
163         MGMT_EV_DEVICE_REMOVED,
164         MGMT_EV_NEW_CONN_PARAM,
165         MGMT_EV_UNCONF_INDEX_ADDED,
166         MGMT_EV_UNCONF_INDEX_REMOVED,
167         MGMT_EV_NEW_CONFIG_OPTIONS,
168         MGMT_EV_EXT_INDEX_ADDED,
169         MGMT_EV_EXT_INDEX_REMOVED,
170         MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171         MGMT_EV_ADVERTISING_ADDED,
172         MGMT_EV_ADVERTISING_REMOVED,
173         MGMT_EV_EXT_INFO_CHANGED,
174         MGMT_EV_PHY_CONFIGURATION_CHANGED,
175         MGMT_EV_EXP_FEATURE_CHANGED,
176         MGMT_EV_DEVICE_FLAGS_CHANGED,
177         MGMT_EV_ADV_MONITOR_ADDED,
178         MGMT_EV_ADV_MONITOR_REMOVED,
179         MGMT_EV_CONTROLLER_SUSPEND,
180         MGMT_EV_CONTROLLER_RESUME,
181         MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182         MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184
185 static const u16 mgmt_untrusted_commands[] = {
186         MGMT_OP_READ_INDEX_LIST,
187         MGMT_OP_READ_INFO,
188         MGMT_OP_READ_UNCONF_INDEX_LIST,
189         MGMT_OP_READ_CONFIG_INFO,
190         MGMT_OP_READ_EXT_INDEX_LIST,
191         MGMT_OP_READ_EXT_INFO,
192         MGMT_OP_READ_CONTROLLER_CAP,
193         MGMT_OP_READ_EXP_FEATURES_INFO,
194         MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195         MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197
198 static const u16 mgmt_untrusted_events[] = {
199         MGMT_EV_INDEX_ADDED,
200         MGMT_EV_INDEX_REMOVED,
201         MGMT_EV_NEW_SETTINGS,
202         MGMT_EV_CLASS_OF_DEV_CHANGED,
203         MGMT_EV_LOCAL_NAME_CHANGED,
204         MGMT_EV_UNCONF_INDEX_ADDED,
205         MGMT_EV_UNCONF_INDEX_REMOVED,
206         MGMT_EV_NEW_CONFIG_OPTIONS,
207         MGMT_EV_EXT_INDEX_ADDED,
208         MGMT_EV_EXT_INDEX_REMOVED,
209         MGMT_EV_EXT_INFO_CHANGED,
210         MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212
213 #define CACHE_TIMEOUT   msecs_to_jiffies(2 * 1000)
214
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216                  "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220         MGMT_STATUS_SUCCESS,
221         MGMT_STATUS_UNKNOWN_COMMAND,    /* Unknown Command */
222         MGMT_STATUS_NOT_CONNECTED,      /* No Connection */
223         MGMT_STATUS_FAILED,             /* Hardware Failure */
224         MGMT_STATUS_CONNECT_FAILED,     /* Page Timeout */
225         MGMT_STATUS_AUTH_FAILED,        /* Authentication Failed */
226         MGMT_STATUS_AUTH_FAILED,        /* PIN or Key Missing */
227         MGMT_STATUS_NO_RESOURCES,       /* Memory Full */
228         MGMT_STATUS_TIMEOUT,            /* Connection Timeout */
229         MGMT_STATUS_NO_RESOURCES,       /* Max Number of Connections */
230         MGMT_STATUS_NO_RESOURCES,       /* Max Number of SCO Connections */
231         MGMT_STATUS_ALREADY_CONNECTED,  /* ACL Connection Exists */
232         MGMT_STATUS_BUSY,               /* Command Disallowed */
233         MGMT_STATUS_NO_RESOURCES,       /* Rejected Limited Resources */
234         MGMT_STATUS_REJECTED,           /* Rejected Security */
235         MGMT_STATUS_REJECTED,           /* Rejected Personal */
236         MGMT_STATUS_TIMEOUT,            /* Host Timeout */
237         MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported Feature */
238         MGMT_STATUS_INVALID_PARAMS,     /* Invalid Parameters */
239         MGMT_STATUS_DISCONNECTED,       /* OE User Ended Connection */
240         MGMT_STATUS_NO_RESOURCES,       /* OE Low Resources */
241         MGMT_STATUS_DISCONNECTED,       /* OE Power Off */
242         MGMT_STATUS_DISCONNECTED,       /* Connection Terminated */
243         MGMT_STATUS_BUSY,               /* Repeated Attempts */
244         MGMT_STATUS_REJECTED,           /* Pairing Not Allowed */
245         MGMT_STATUS_FAILED,             /* Unknown LMP PDU */
246         MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported Remote Feature */
247         MGMT_STATUS_REJECTED,           /* SCO Offset Rejected */
248         MGMT_STATUS_REJECTED,           /* SCO Interval Rejected */
249         MGMT_STATUS_REJECTED,           /* Air Mode Rejected */
250         MGMT_STATUS_INVALID_PARAMS,     /* Invalid LMP Parameters */
251         MGMT_STATUS_FAILED,             /* Unspecified Error */
252         MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported LMP Parameter Value */
253         MGMT_STATUS_FAILED,             /* Role Change Not Allowed */
254         MGMT_STATUS_TIMEOUT,            /* LMP Response Timeout */
255         MGMT_STATUS_FAILED,             /* LMP Error Transaction Collision */
256         MGMT_STATUS_FAILED,             /* LMP PDU Not Allowed */
257         MGMT_STATUS_REJECTED,           /* Encryption Mode Not Accepted */
258         MGMT_STATUS_FAILED,             /* Unit Link Key Used */
259         MGMT_STATUS_NOT_SUPPORTED,      /* QoS Not Supported */
260         MGMT_STATUS_TIMEOUT,            /* Instant Passed */
261         MGMT_STATUS_NOT_SUPPORTED,      /* Pairing Not Supported */
262         MGMT_STATUS_FAILED,             /* Transaction Collision */
263         MGMT_STATUS_FAILED,             /* Reserved for future use */
264         MGMT_STATUS_INVALID_PARAMS,     /* Unacceptable Parameter */
265         MGMT_STATUS_REJECTED,           /* QoS Rejected */
266         MGMT_STATUS_NOT_SUPPORTED,      /* Classification Not Supported */
267         MGMT_STATUS_REJECTED,           /* Insufficient Security */
268         MGMT_STATUS_INVALID_PARAMS,     /* Parameter Out Of Range */
269         MGMT_STATUS_FAILED,             /* Reserved for future use */
270         MGMT_STATUS_BUSY,               /* Role Switch Pending */
271         MGMT_STATUS_FAILED,             /* Reserved for future use */
272         MGMT_STATUS_FAILED,             /* Slot Violation */
273         MGMT_STATUS_FAILED,             /* Role Switch Failed */
274         MGMT_STATUS_INVALID_PARAMS,     /* EIR Too Large */
275         MGMT_STATUS_NOT_SUPPORTED,      /* Simple Pairing Not Supported */
276         MGMT_STATUS_BUSY,               /* Host Busy Pairing */
277         MGMT_STATUS_REJECTED,           /* Rejected, No Suitable Channel */
278         MGMT_STATUS_BUSY,               /* Controller Busy */
279         MGMT_STATUS_INVALID_PARAMS,     /* Unsuitable Connection Interval */
280         MGMT_STATUS_TIMEOUT,            /* Directed Advertising Timeout */
281         MGMT_STATUS_AUTH_FAILED,        /* Terminated Due to MIC Failure */
282         MGMT_STATUS_CONNECT_FAILED,     /* Connection Establishment Failed */
283         MGMT_STATUS_CONNECT_FAILED,     /* MAC Connection Failed */
284 };
285
286 static u8 mgmt_errno_status(int err)
287 {
288         switch (err) {
289         case 0:
290                 return MGMT_STATUS_SUCCESS;
291         case -EPERM:
292                 return MGMT_STATUS_REJECTED;
293         case -EINVAL:
294                 return MGMT_STATUS_INVALID_PARAMS;
295         case -EOPNOTSUPP:
296                 return MGMT_STATUS_NOT_SUPPORTED;
297         case -EBUSY:
298                 return MGMT_STATUS_BUSY;
299         case -ETIMEDOUT:
300                 return MGMT_STATUS_AUTH_FAILED;
301         case -ENOMEM:
302                 return MGMT_STATUS_NO_RESOURCES;
303         case -EISCONN:
304                 return MGMT_STATUS_ALREADY_CONNECTED;
305         case -ENOTCONN:
306                 return MGMT_STATUS_DISCONNECTED;
307         }
308
309         return MGMT_STATUS_FAILED;
310 }
311
312 static u8 mgmt_status(int err)
313 {
314         if (err < 0)
315                 return mgmt_errno_status(err);
316
317         if (err < ARRAY_SIZE(mgmt_status_table))
318                 return mgmt_status_table[err];
319
320         return MGMT_STATUS_FAILED;
321 }
322
323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324                             u16 len, int flag)
325 {
326         return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327                                flag, NULL);
328 }
329
330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331                               u16 len, int flag, struct sock *skip_sk)
332 {
333         return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334                                flag, skip_sk);
335 }
336
337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338                       struct sock *skip_sk)
339 {
340         return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341                                HCI_SOCK_TRUSTED, skip_sk);
342 }
343
344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346         return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347                                    skip_sk);
348 }
349
350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352         if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353                 return ADDR_LE_DEV_PUBLIC;
354         else
355                 return ADDR_LE_DEV_RANDOM;
356 }
357
358 void mgmt_fill_version_info(void *ver)
359 {
360         struct mgmt_rp_read_version *rp = ver;
361
362         rp->version = MGMT_VERSION;
363         rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365
366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367                         u16 data_len)
368 {
369         struct mgmt_rp_read_version rp;
370
371         bt_dev_dbg(hdev, "sock %p", sk);
372
373         mgmt_fill_version_info(&rp);
374
375         return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376                                  &rp, sizeof(rp));
377 }
378
379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380                          u16 data_len)
381 {
382         struct mgmt_rp_read_commands *rp;
383         u16 num_commands, num_events;
384         size_t rp_size;
385         int i, err;
386
387         bt_dev_dbg(hdev, "sock %p", sk);
388
389         if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390                 num_commands = ARRAY_SIZE(mgmt_commands);
391                 num_events = ARRAY_SIZE(mgmt_events);
392         } else {
393                 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394                 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395         }
396
397         rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399         rp = kmalloc(rp_size, GFP_KERNEL);
400         if (!rp)
401                 return -ENOMEM;
402
403         rp->num_commands = cpu_to_le16(num_commands);
404         rp->num_events = cpu_to_le16(num_events);
405
406         if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407                 __le16 *opcode = rp->opcodes;
408
409                 for (i = 0; i < num_commands; i++, opcode++)
410                         put_unaligned_le16(mgmt_commands[i], opcode);
411
412                 for (i = 0; i < num_events; i++, opcode++)
413                         put_unaligned_le16(mgmt_events[i], opcode);
414         } else {
415                 __le16 *opcode = rp->opcodes;
416
417                 for (i = 0; i < num_commands; i++, opcode++)
418                         put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420                 for (i = 0; i < num_events; i++, opcode++)
421                         put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422         }
423
424         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425                                 rp, rp_size);
426         kfree(rp);
427
428         return err;
429 }
430
431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432                            u16 data_len)
433 {
434         struct mgmt_rp_read_index_list *rp;
435         struct hci_dev *d;
436         size_t rp_len;
437         u16 count;
438         int err;
439
440         bt_dev_dbg(hdev, "sock %p", sk);
441
442         read_lock(&hci_dev_list_lock);
443
444         count = 0;
445         list_for_each_entry(d, &hci_dev_list, list) {
446                 if (d->dev_type == HCI_PRIMARY &&
447                     !hci_dev_test_flag(d, HCI_UNCONFIGURED))
448                         count++;
449         }
450
451         rp_len = sizeof(*rp) + (2 * count);
452         rp = kmalloc(rp_len, GFP_ATOMIC);
453         if (!rp) {
454                 read_unlock(&hci_dev_list_lock);
455                 return -ENOMEM;
456         }
457
458         count = 0;
459         list_for_each_entry(d, &hci_dev_list, list) {
460                 if (hci_dev_test_flag(d, HCI_SETUP) ||
461                     hci_dev_test_flag(d, HCI_CONFIG) ||
462                     hci_dev_test_flag(d, HCI_USER_CHANNEL))
463                         continue;
464
465                 /* Devices marked as raw-only are neither configured
466                  * nor unconfigured controllers.
467                  */
468                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
469                         continue;
470
471                 if (d->dev_type == HCI_PRIMARY &&
472                     !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
473                         rp->index[count++] = cpu_to_le16(d->id);
474                         bt_dev_dbg(hdev, "Added hci%u", d->id);
475                 }
476         }
477
478         rp->num_controllers = cpu_to_le16(count);
479         rp_len = sizeof(*rp) + (2 * count);
480
481         read_unlock(&hci_dev_list_lock);
482
483         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
484                                 0, rp, rp_len);
485
486         kfree(rp);
487
488         return err;
489 }
490
491 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
492                                   void *data, u16 data_len)
493 {
494         struct mgmt_rp_read_unconf_index_list *rp;
495         struct hci_dev *d;
496         size_t rp_len;
497         u16 count;
498         int err;
499
500         bt_dev_dbg(hdev, "sock %p", sk);
501
502         read_lock(&hci_dev_list_lock);
503
504         count = 0;
505         list_for_each_entry(d, &hci_dev_list, list) {
506                 if (d->dev_type == HCI_PRIMARY &&
507                     hci_dev_test_flag(d, HCI_UNCONFIGURED))
508                         count++;
509         }
510
511         rp_len = sizeof(*rp) + (2 * count);
512         rp = kmalloc(rp_len, GFP_ATOMIC);
513         if (!rp) {
514                 read_unlock(&hci_dev_list_lock);
515                 return -ENOMEM;
516         }
517
518         count = 0;
519         list_for_each_entry(d, &hci_dev_list, list) {
520                 if (hci_dev_test_flag(d, HCI_SETUP) ||
521                     hci_dev_test_flag(d, HCI_CONFIG) ||
522                     hci_dev_test_flag(d, HCI_USER_CHANNEL))
523                         continue;
524
525                 /* Devices marked as raw-only are neither configured
526                  * nor unconfigured controllers.
527                  */
528                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
529                         continue;
530
531                 if (d->dev_type == HCI_PRIMARY &&
532                     hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
533                         rp->index[count++] = cpu_to_le16(d->id);
534                         bt_dev_dbg(hdev, "Added hci%u", d->id);
535                 }
536         }
537
538         rp->num_controllers = cpu_to_le16(count);
539         rp_len = sizeof(*rp) + (2 * count);
540
541         read_unlock(&hci_dev_list_lock);
542
543         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
544                                 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
545
546         kfree(rp);
547
548         return err;
549 }
550
551 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
552                                void *data, u16 data_len)
553 {
554         struct mgmt_rp_read_ext_index_list *rp;
555         struct hci_dev *d;
556         u16 count;
557         int err;
558
559         bt_dev_dbg(hdev, "sock %p", sk);
560
561         read_lock(&hci_dev_list_lock);
562
563         count = 0;
564         list_for_each_entry(d, &hci_dev_list, list) {
565                 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
566                         count++;
567         }
568
569         rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
570         if (!rp) {
571                 read_unlock(&hci_dev_list_lock);
572                 return -ENOMEM;
573         }
574
575         count = 0;
576         list_for_each_entry(d, &hci_dev_list, list) {
577                 if (hci_dev_test_flag(d, HCI_SETUP) ||
578                     hci_dev_test_flag(d, HCI_CONFIG) ||
579                     hci_dev_test_flag(d, HCI_USER_CHANNEL))
580                         continue;
581
582                 /* Devices marked as raw-only are neither configured
583                  * nor unconfigured controllers.
584                  */
585                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
586                         continue;
587
588                 if (d->dev_type == HCI_PRIMARY) {
589                         if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
590                                 rp->entry[count].type = 0x01;
591                         else
592                                 rp->entry[count].type = 0x00;
593                 } else if (d->dev_type == HCI_AMP) {
594                         rp->entry[count].type = 0x02;
595                 } else {
596                         continue;
597                 }
598
599                 rp->entry[count].bus = d->bus;
600                 rp->entry[count++].index = cpu_to_le16(d->id);
601                 bt_dev_dbg(hdev, "Added hci%u", d->id);
602         }
603
604         rp->num_controllers = cpu_to_le16(count);
605
606         read_unlock(&hci_dev_list_lock);
607
608         /* If this command is called at least once, then all the
609          * default index and unconfigured index events are disabled
610          * and from now on only extended index events are used.
611          */
612         hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
613         hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
614         hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
615
616         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
617                                 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
618                                 struct_size(rp, entry, count));
619
620         kfree(rp);
621
622         return err;
623 }
624
625 static bool is_configured(struct hci_dev *hdev)
626 {
627         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
628             !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
629                 return false;
630
631         if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
632              test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
633             !bacmp(&hdev->public_addr, BDADDR_ANY))
634                 return false;
635
636         return true;
637 }
638
639 static __le32 get_missing_options(struct hci_dev *hdev)
640 {
641         u32 options = 0;
642
643         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
644             !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
645                 options |= MGMT_OPTION_EXTERNAL_CONFIG;
646
647         if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
648              test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
649             !bacmp(&hdev->public_addr, BDADDR_ANY))
650                 options |= MGMT_OPTION_PUBLIC_ADDRESS;
651
652         return cpu_to_le32(options);
653 }
654
655 static int new_options(struct hci_dev *hdev, struct sock *skip)
656 {
657         __le32 options = get_missing_options(hdev);
658
659         return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
660                                   sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
661 }
662
663 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
664 {
665         __le32 options = get_missing_options(hdev);
666
667         return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
668                                  sizeof(options));
669 }
670
671 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
672                             void *data, u16 data_len)
673 {
674         struct mgmt_rp_read_config_info rp;
675         u32 options = 0;
676
677         bt_dev_dbg(hdev, "sock %p", sk);
678
679         hci_dev_lock(hdev);
680
681         memset(&rp, 0, sizeof(rp));
682         rp.manufacturer = cpu_to_le16(hdev->manufacturer);
683
684         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
685                 options |= MGMT_OPTION_EXTERNAL_CONFIG;
686
687         if (hdev->set_bdaddr)
688                 options |= MGMT_OPTION_PUBLIC_ADDRESS;
689
690         rp.supported_options = cpu_to_le32(options);
691         rp.missing_options = get_missing_options(hdev);
692
693         hci_dev_unlock(hdev);
694
695         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
696                                  &rp, sizeof(rp));
697 }
698
699 static u32 get_supported_phys(struct hci_dev *hdev)
700 {
701         u32 supported_phys = 0;
702
703         if (lmp_bredr_capable(hdev)) {
704                 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
705
706                 if (hdev->features[0][0] & LMP_3SLOT)
707                         supported_phys |= MGMT_PHY_BR_1M_3SLOT;
708
709                 if (hdev->features[0][0] & LMP_5SLOT)
710                         supported_phys |= MGMT_PHY_BR_1M_5SLOT;
711
712                 if (lmp_edr_2m_capable(hdev)) {
713                         supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
714
715                         if (lmp_edr_3slot_capable(hdev))
716                                 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
717
718                         if (lmp_edr_5slot_capable(hdev))
719                                 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
720
721                         if (lmp_edr_3m_capable(hdev)) {
722                                 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
723
724                                 if (lmp_edr_3slot_capable(hdev))
725                                         supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
726
727                                 if (lmp_edr_5slot_capable(hdev))
728                                         supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
729                         }
730                 }
731         }
732
733         if (lmp_le_capable(hdev)) {
734                 supported_phys |= MGMT_PHY_LE_1M_TX;
735                 supported_phys |= MGMT_PHY_LE_1M_RX;
736
737                 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
738                         supported_phys |= MGMT_PHY_LE_2M_TX;
739                         supported_phys |= MGMT_PHY_LE_2M_RX;
740                 }
741
742                 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
743                         supported_phys |= MGMT_PHY_LE_CODED_TX;
744                         supported_phys |= MGMT_PHY_LE_CODED_RX;
745                 }
746         }
747
748         return supported_phys;
749 }
750
751 static u32 get_selected_phys(struct hci_dev *hdev)
752 {
753         u32 selected_phys = 0;
754
755         if (lmp_bredr_capable(hdev)) {
756                 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
757
758                 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
759                         selected_phys |= MGMT_PHY_BR_1M_3SLOT;
760
761                 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
762                         selected_phys |= MGMT_PHY_BR_1M_5SLOT;
763
764                 if (lmp_edr_2m_capable(hdev)) {
765                         if (!(hdev->pkt_type & HCI_2DH1))
766                                 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
767
768                         if (lmp_edr_3slot_capable(hdev) &&
769                             !(hdev->pkt_type & HCI_2DH3))
770                                 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
771
772                         if (lmp_edr_5slot_capable(hdev) &&
773                             !(hdev->pkt_type & HCI_2DH5))
774                                 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
775
776                         if (lmp_edr_3m_capable(hdev)) {
777                                 if (!(hdev->pkt_type & HCI_3DH1))
778                                         selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
779
780                                 if (lmp_edr_3slot_capable(hdev) &&
781                                     !(hdev->pkt_type & HCI_3DH3))
782                                         selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
783
784                                 if (lmp_edr_5slot_capable(hdev) &&
785                                     !(hdev->pkt_type & HCI_3DH5))
786                                         selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
787                         }
788                 }
789         }
790
791         if (lmp_le_capable(hdev)) {
792                 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
793                         selected_phys |= MGMT_PHY_LE_1M_TX;
794
795                 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
796                         selected_phys |= MGMT_PHY_LE_1M_RX;
797
798                 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
799                         selected_phys |= MGMT_PHY_LE_2M_TX;
800
801                 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
802                         selected_phys |= MGMT_PHY_LE_2M_RX;
803
804                 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
805                         selected_phys |= MGMT_PHY_LE_CODED_TX;
806
807                 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
808                         selected_phys |= MGMT_PHY_LE_CODED_RX;
809         }
810
811         return selected_phys;
812 }
813
814 static u32 get_configurable_phys(struct hci_dev *hdev)
815 {
816         return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
817                 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
818 }
819
820 static u32 get_supported_settings(struct hci_dev *hdev)
821 {
822         u32 settings = 0;
823
824         settings |= MGMT_SETTING_POWERED;
825         settings |= MGMT_SETTING_BONDABLE;
826         settings |= MGMT_SETTING_DEBUG_KEYS;
827         settings |= MGMT_SETTING_CONNECTABLE;
828         settings |= MGMT_SETTING_DISCOVERABLE;
829
830         if (lmp_bredr_capable(hdev)) {
831                 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
832                         settings |= MGMT_SETTING_FAST_CONNECTABLE;
833                 settings |= MGMT_SETTING_BREDR;
834                 settings |= MGMT_SETTING_LINK_SECURITY;
835
836                 if (lmp_ssp_capable(hdev)) {
837                         settings |= MGMT_SETTING_SSP;
838                         if (IS_ENABLED(CONFIG_BT_HS))
839                                 settings |= MGMT_SETTING_HS;
840                 }
841
842                 if (lmp_sc_capable(hdev))
843                         settings |= MGMT_SETTING_SECURE_CONN;
844
845                 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
846                              &hdev->quirks))
847                         settings |= MGMT_SETTING_WIDEBAND_SPEECH;
848         }
849
850         if (lmp_le_capable(hdev)) {
851                 settings |= MGMT_SETTING_LE;
852                 settings |= MGMT_SETTING_SECURE_CONN;
853                 settings |= MGMT_SETTING_PRIVACY;
854                 settings |= MGMT_SETTING_STATIC_ADDRESS;
855                 settings |= MGMT_SETTING_ADVERTISING;
856         }
857
858         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
859             hdev->set_bdaddr)
860                 settings |= MGMT_SETTING_CONFIGURATION;
861
862         if (cis_central_capable(hdev))
863                 settings |= MGMT_SETTING_CIS_CENTRAL;
864
865         if (cis_peripheral_capable(hdev))
866                 settings |= MGMT_SETTING_CIS_PERIPHERAL;
867
868         settings |= MGMT_SETTING_PHY_CONFIGURATION;
869
870         return settings;
871 }
872
873 static u32 get_current_settings(struct hci_dev *hdev)
874 {
875         u32 settings = 0;
876
877         if (hdev_is_powered(hdev))
878                 settings |= MGMT_SETTING_POWERED;
879
880         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
881                 settings |= MGMT_SETTING_CONNECTABLE;
882
883         if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
884                 settings |= MGMT_SETTING_FAST_CONNECTABLE;
885
886         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
887                 settings |= MGMT_SETTING_DISCOVERABLE;
888
889         if (hci_dev_test_flag(hdev, HCI_BONDABLE))
890                 settings |= MGMT_SETTING_BONDABLE;
891
892         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
893                 settings |= MGMT_SETTING_BREDR;
894
895         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
896                 settings |= MGMT_SETTING_LE;
897
898         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
899                 settings |= MGMT_SETTING_LINK_SECURITY;
900
901         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
902                 settings |= MGMT_SETTING_SSP;
903
904         if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
905                 settings |= MGMT_SETTING_HS;
906
907         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
908                 settings |= MGMT_SETTING_ADVERTISING;
909
910         if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
911                 settings |= MGMT_SETTING_SECURE_CONN;
912
913         if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
914                 settings |= MGMT_SETTING_DEBUG_KEYS;
915
916         if (hci_dev_test_flag(hdev, HCI_PRIVACY))
917                 settings |= MGMT_SETTING_PRIVACY;
918
919         /* The current setting for static address has two purposes. The
920          * first is to indicate if the static address will be used and
921          * the second is to indicate if it is actually set.
922          *
923          * This means if the static address is not configured, this flag
924          * will never be set. If the address is configured, then if the
925          * address is actually used decides if the flag is set or not.
926          *
927          * For single mode LE only controllers and dual-mode controllers
928          * with BR/EDR disabled, the existence of the static address will
929          * be evaluated.
930          */
931         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
932             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
933             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
934                 if (bacmp(&hdev->static_addr, BDADDR_ANY))
935                         settings |= MGMT_SETTING_STATIC_ADDRESS;
936         }
937
938         if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
939                 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
940
941         if (cis_central_capable(hdev))
942                 settings |= MGMT_SETTING_CIS_CENTRAL;
943
944         if (cis_peripheral_capable(hdev))
945                 settings |= MGMT_SETTING_CIS_PERIPHERAL;
946
947         return settings;
948 }
949
950 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
951 {
952         return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
953 }
954
955 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
956 {
957         struct mgmt_pending_cmd *cmd;
958
959         /* If there's a pending mgmt command the flags will not yet have
960          * their final values, so check for this first.
961          */
962         cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
963         if (cmd) {
964                 struct mgmt_mode *cp = cmd->param;
965                 if (cp->val == 0x01)
966                         return LE_AD_GENERAL;
967                 else if (cp->val == 0x02)
968                         return LE_AD_LIMITED;
969         } else {
970                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
971                         return LE_AD_LIMITED;
972                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
973                         return LE_AD_GENERAL;
974         }
975
976         return 0;
977 }
978
979 bool mgmt_get_connectable(struct hci_dev *hdev)
980 {
981         struct mgmt_pending_cmd *cmd;
982
983         /* If there's a pending mgmt command the flag will not yet have
984          * it's final value, so check for this first.
985          */
986         cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
987         if (cmd) {
988                 struct mgmt_mode *cp = cmd->param;
989
990                 return cp->val;
991         }
992
993         return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
994 }
995
996 static int service_cache_sync(struct hci_dev *hdev, void *data)
997 {
998         hci_update_eir_sync(hdev);
999         hci_update_class_sync(hdev);
1000
1001         return 0;
1002 }
1003
1004 static void service_cache_off(struct work_struct *work)
1005 {
1006         struct hci_dev *hdev = container_of(work, struct hci_dev,
1007                                             service_cache.work);
1008
1009         if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1010                 return;
1011
1012         hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1013 }
1014
1015 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1016 {
1017         /* The generation of a new RPA and programming it into the
1018          * controller happens in the hci_req_enable_advertising()
1019          * function.
1020          */
1021         if (ext_adv_capable(hdev))
1022                 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1023         else
1024                 return hci_enable_advertising_sync(hdev);
1025 }
1026
1027 static void rpa_expired(struct work_struct *work)
1028 {
1029         struct hci_dev *hdev = container_of(work, struct hci_dev,
1030                                             rpa_expired.work);
1031
1032         bt_dev_dbg(hdev, "");
1033
1034         hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1035
1036         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1037                 return;
1038
1039         hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1040 }
1041
1042 static void discov_off(struct work_struct *work)
1043 {
1044         struct hci_dev *hdev = container_of(work, struct hci_dev,
1045                                             discov_off.work);
1046
1047         bt_dev_dbg(hdev, "");
1048
1049         hci_dev_lock(hdev);
1050
1051         /* When discoverable timeout triggers, then just make sure
1052          * the limited discoverable flag is cleared. Even in the case
1053          * of a timeout triggered from general discoverable, it is
1054          * safe to unconditionally clear the flag.
1055          */
1056         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1057         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1058         hdev->discov_timeout = 0;
1059
1060         hci_update_discoverable(hdev);
1061
1062         mgmt_new_settings(hdev);
1063
1064         hci_dev_unlock(hdev);
1065 }
1066
1067 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1068
1069 static void mesh_send_complete(struct hci_dev *hdev,
1070                                struct mgmt_mesh_tx *mesh_tx, bool silent)
1071 {
1072         u8 handle = mesh_tx->handle;
1073
1074         if (!silent)
1075                 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1076                            sizeof(handle), NULL);
1077
1078         mgmt_mesh_remove(mesh_tx);
1079 }
1080
1081 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1082 {
1083         struct mgmt_mesh_tx *mesh_tx;
1084
1085         hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1086         hci_disable_advertising_sync(hdev);
1087         mesh_tx = mgmt_mesh_next(hdev, NULL);
1088
1089         if (mesh_tx)
1090                 mesh_send_complete(hdev, mesh_tx, false);
1091
1092         return 0;
1093 }
1094
1095 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1096 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1097 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1098 {
1099         struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1100
1101         if (!mesh_tx)
1102                 return;
1103
1104         err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1105                                  mesh_send_start_complete);
1106
1107         if (err < 0)
1108                 mesh_send_complete(hdev, mesh_tx, false);
1109         else
1110                 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1111 }
1112
1113 static void mesh_send_done(struct work_struct *work)
1114 {
1115         struct hci_dev *hdev = container_of(work, struct hci_dev,
1116                                             mesh_send_done.work);
1117
1118         if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1119                 return;
1120
1121         hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1122 }
1123
1124 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1125 {
1126         if (hci_dev_test_flag(hdev, HCI_MGMT))
1127                 return;
1128
1129         BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1130
1131         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1132         INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1133         INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1134         INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1135
1136         /* Non-mgmt controlled devices get this bit set
1137          * implicitly so that pairing works for them, however
1138          * for mgmt we require user-space to explicitly enable
1139          * it
1140          */
1141         hci_dev_clear_flag(hdev, HCI_BONDABLE);
1142
1143         hci_dev_set_flag(hdev, HCI_MGMT);
1144 }
1145
1146 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1147                                 void *data, u16 data_len)
1148 {
1149         struct mgmt_rp_read_info rp;
1150
1151         bt_dev_dbg(hdev, "sock %p", sk);
1152
1153         hci_dev_lock(hdev);
1154
1155         memset(&rp, 0, sizeof(rp));
1156
1157         bacpy(&rp.bdaddr, &hdev->bdaddr);
1158
1159         rp.version = hdev->hci_ver;
1160         rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1161
1162         rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1163         rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1164
1165         memcpy(rp.dev_class, hdev->dev_class, 3);
1166
1167         memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1168         memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1169
1170         hci_dev_unlock(hdev);
1171
1172         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1173                                  sizeof(rp));
1174 }
1175
1176 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1177 {
1178         u16 eir_len = 0;
1179         size_t name_len;
1180
1181         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1182                 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1183                                           hdev->dev_class, 3);
1184
1185         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1186                 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1187                                           hdev->appearance);
1188
1189         name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1190         eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1191                                   hdev->dev_name, name_len);
1192
1193         name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1194         eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1195                                   hdev->short_name, name_len);
1196
1197         return eir_len;
1198 }
1199
1200 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1201                                     void *data, u16 data_len)
1202 {
1203         char buf[512];
1204         struct mgmt_rp_read_ext_info *rp = (void *)buf;
1205         u16 eir_len;
1206
1207         bt_dev_dbg(hdev, "sock %p", sk);
1208
1209         memset(&buf, 0, sizeof(buf));
1210
1211         hci_dev_lock(hdev);
1212
1213         bacpy(&rp->bdaddr, &hdev->bdaddr);
1214
1215         rp->version = hdev->hci_ver;
1216         rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1217
1218         rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1219         rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1220
1221
1222         eir_len = append_eir_data_to_buf(hdev, rp->eir);
1223         rp->eir_len = cpu_to_le16(eir_len);
1224
1225         hci_dev_unlock(hdev);
1226
1227         /* If this command is called at least once, then the events
1228          * for class of device and local name changes are disabled
1229          * and only the new extended controller information event
1230          * is used.
1231          */
1232         hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1233         hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1234         hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1235
1236         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1237                                  sizeof(*rp) + eir_len);
1238 }
1239
1240 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1241 {
1242         char buf[512];
1243         struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1244         u16 eir_len;
1245
1246         memset(buf, 0, sizeof(buf));
1247
1248         eir_len = append_eir_data_to_buf(hdev, ev->eir);
1249         ev->eir_len = cpu_to_le16(eir_len);
1250
1251         return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1252                                   sizeof(*ev) + eir_len,
1253                                   HCI_MGMT_EXT_INFO_EVENTS, skip);
1254 }
1255
1256 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1257 {
1258         __le32 settings = cpu_to_le32(get_current_settings(hdev));
1259
1260         return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1261                                  sizeof(settings));
1262 }
1263
1264 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1265 {
1266         struct mgmt_ev_advertising_added ev;
1267
1268         ev.instance = instance;
1269
1270         mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1271 }
1272
1273 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1274                               u8 instance)
1275 {
1276         struct mgmt_ev_advertising_removed ev;
1277
1278         ev.instance = instance;
1279
1280         mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1281 }
1282
1283 static void cancel_adv_timeout(struct hci_dev *hdev)
1284 {
1285         if (hdev->adv_instance_timeout) {
1286                 hdev->adv_instance_timeout = 0;
1287                 cancel_delayed_work(&hdev->adv_instance_expire);
1288         }
1289 }
1290
1291 /* This function requires the caller holds hdev->lock */
1292 static void restart_le_actions(struct hci_dev *hdev)
1293 {
1294         struct hci_conn_params *p;
1295
1296         list_for_each_entry(p, &hdev->le_conn_params, list) {
1297                 /* Needed for AUTO_OFF case where might not "really"
1298                  * have been powered off.
1299                  */
1300                 list_del_init(&p->action);
1301
1302                 switch (p->auto_connect) {
1303                 case HCI_AUTO_CONN_DIRECT:
1304                 case HCI_AUTO_CONN_ALWAYS:
1305                         list_add(&p->action, &hdev->pend_le_conns);
1306                         break;
1307                 case HCI_AUTO_CONN_REPORT:
1308                         list_add(&p->action, &hdev->pend_le_reports);
1309                         break;
1310                 default:
1311                         break;
1312                 }
1313         }
1314 }
1315
1316 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1317 {
1318         __le32 ev = cpu_to_le32(get_current_settings(hdev));
1319
1320         return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1321                                   sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1322 }
1323
1324 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1325 {
1326         struct mgmt_pending_cmd *cmd = data;
1327         struct mgmt_mode *cp;
1328
1329         /* Make sure cmd still outstanding. */
1330         if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1331                 return;
1332
1333         cp = cmd->param;
1334
1335         bt_dev_dbg(hdev, "err %d", err);
1336
1337         if (!err) {
1338                 if (cp->val) {
1339                         hci_dev_lock(hdev);
1340                         restart_le_actions(hdev);
1341                         hci_update_passive_scan(hdev);
1342                         hci_dev_unlock(hdev);
1343                 }
1344
1345                 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1346
1347                 /* Only call new_setting for power on as power off is deferred
1348                  * to hdev->power_off work which does call hci_dev_do_close.
1349                  */
1350                 if (cp->val)
1351                         new_settings(hdev, cmd->sk);
1352         } else {
1353                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1354                                 mgmt_status(err));
1355         }
1356
1357         mgmt_pending_remove(cmd);
1358 }
1359
1360 static int set_powered_sync(struct hci_dev *hdev, void *data)
1361 {
1362         struct mgmt_pending_cmd *cmd = data;
1363         struct mgmt_mode *cp = cmd->param;
1364
1365         BT_DBG("%s", hdev->name);
1366
1367         return hci_set_powered_sync(hdev, cp->val);
1368 }
1369
1370 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1371                        u16 len)
1372 {
1373         struct mgmt_mode *cp = data;
1374         struct mgmt_pending_cmd *cmd;
1375         int err;
1376
1377         bt_dev_dbg(hdev, "sock %p", sk);
1378
1379         if (cp->val != 0x00 && cp->val != 0x01)
1380                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1381                                        MGMT_STATUS_INVALID_PARAMS);
1382
1383         hci_dev_lock(hdev);
1384
1385         if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1386                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1387                                       MGMT_STATUS_BUSY);
1388                 goto failed;
1389         }
1390
1391         if (!!cp->val == hdev_is_powered(hdev)) {
1392                 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1393                 goto failed;
1394         }
1395
1396         cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1397         if (!cmd) {
1398                 err = -ENOMEM;
1399                 goto failed;
1400         }
1401
1402         /* Cancel potentially blocking sync operation before power off */
1403         if (cp->val == 0x00) {
1404                 __hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
1405                 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1406                                          mgmt_set_powered_complete);
1407         } else {
1408                 /* Use hci_cmd_sync_submit since hdev might not be running */
1409                 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1410                                           mgmt_set_powered_complete);
1411         }
1412
1413         if (err < 0)
1414                 mgmt_pending_remove(cmd);
1415
1416 failed:
1417         hci_dev_unlock(hdev);
1418         return err;
1419 }
1420
1421 int mgmt_new_settings(struct hci_dev *hdev)
1422 {
1423         return new_settings(hdev, NULL);
1424 }
1425
1426 struct cmd_lookup {
1427         struct sock *sk;
1428         struct hci_dev *hdev;
1429         u8 mgmt_status;
1430 };
1431
1432 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1433 {
1434         struct cmd_lookup *match = data;
1435
1436         send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1437
1438         list_del(&cmd->list);
1439
1440         if (match->sk == NULL) {
1441                 match->sk = cmd->sk;
1442                 sock_hold(match->sk);
1443         }
1444
1445         mgmt_pending_free(cmd);
1446 }
1447
1448 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1449 {
1450         u8 *status = data;
1451
1452         mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1453         mgmt_pending_remove(cmd);
1454 }
1455
1456 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1457 {
1458         if (cmd->cmd_complete) {
1459                 u8 *status = data;
1460
1461                 cmd->cmd_complete(cmd, *status);
1462                 mgmt_pending_remove(cmd);
1463
1464                 return;
1465         }
1466
1467         cmd_status_rsp(cmd, data);
1468 }
1469
1470 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1471 {
1472         return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1473                                  cmd->param, cmd->param_len);
1474 }
1475
1476 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1477 {
1478         return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1479                                  cmd->param, sizeof(struct mgmt_addr_info));
1480 }
1481
1482 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1483 {
1484         if (!lmp_bredr_capable(hdev))
1485                 return MGMT_STATUS_NOT_SUPPORTED;
1486         else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1487                 return MGMT_STATUS_REJECTED;
1488         else
1489                 return MGMT_STATUS_SUCCESS;
1490 }
1491
1492 static u8 mgmt_le_support(struct hci_dev *hdev)
1493 {
1494         if (!lmp_le_capable(hdev))
1495                 return MGMT_STATUS_NOT_SUPPORTED;
1496         else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1497                 return MGMT_STATUS_REJECTED;
1498         else
1499                 return MGMT_STATUS_SUCCESS;
1500 }
1501
1502 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1503                                            int err)
1504 {
1505         struct mgmt_pending_cmd *cmd = data;
1506
1507         bt_dev_dbg(hdev, "err %d", err);
1508
1509         /* Make sure cmd still outstanding. */
1510         if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1511                 return;
1512
1513         hci_dev_lock(hdev);
1514
1515         if (err) {
1516                 u8 mgmt_err = mgmt_status(err);
1517                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1518                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1519                 goto done;
1520         }
1521
1522         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1523             hdev->discov_timeout > 0) {
1524                 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1525                 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1526         }
1527
1528         send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1529         new_settings(hdev, cmd->sk);
1530
1531 done:
1532         mgmt_pending_remove(cmd);
1533         hci_dev_unlock(hdev);
1534 }
1535
1536 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1537 {
1538         BT_DBG("%s", hdev->name);
1539
1540         return hci_update_discoverable_sync(hdev);
1541 }
1542
1543 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1544                             u16 len)
1545 {
1546         struct mgmt_cp_set_discoverable *cp = data;
1547         struct mgmt_pending_cmd *cmd;
1548         u16 timeout;
1549         int err;
1550
1551         bt_dev_dbg(hdev, "sock %p", sk);
1552
1553         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1554             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1555                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1556                                        MGMT_STATUS_REJECTED);
1557
1558         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1559                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1560                                        MGMT_STATUS_INVALID_PARAMS);
1561
1562         timeout = __le16_to_cpu(cp->timeout);
1563
1564         /* Disabling discoverable requires that no timeout is set,
1565          * and enabling limited discoverable requires a timeout.
1566          */
1567         if ((cp->val == 0x00 && timeout > 0) ||
1568             (cp->val == 0x02 && timeout == 0))
1569                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1570                                        MGMT_STATUS_INVALID_PARAMS);
1571
1572         hci_dev_lock(hdev);
1573
1574         if (!hdev_is_powered(hdev) && timeout > 0) {
1575                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1576                                       MGMT_STATUS_NOT_POWERED);
1577                 goto failed;
1578         }
1579
1580         if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1581             pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1582                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1583                                       MGMT_STATUS_BUSY);
1584                 goto failed;
1585         }
1586
1587         if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1588                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1589                                       MGMT_STATUS_REJECTED);
1590                 goto failed;
1591         }
1592
1593         if (hdev->advertising_paused) {
1594                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1595                                       MGMT_STATUS_BUSY);
1596                 goto failed;
1597         }
1598
1599         if (!hdev_is_powered(hdev)) {
1600                 bool changed = false;
1601
1602                 /* Setting limited discoverable when powered off is
1603                  * not a valid operation since it requires a timeout
1604                  * and so no need to check HCI_LIMITED_DISCOVERABLE.
1605                  */
1606                 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1607                         hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1608                         changed = true;
1609                 }
1610
1611                 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1612                 if (err < 0)
1613                         goto failed;
1614
1615                 if (changed)
1616                         err = new_settings(hdev, sk);
1617
1618                 goto failed;
1619         }
1620
1621         /* If the current mode is the same, then just update the timeout
1622          * value with the new value. And if only the timeout gets updated,
1623          * then no need for any HCI transactions.
1624          */
1625         if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1626             (cp->val == 0x02) == hci_dev_test_flag(hdev,
1627                                                    HCI_LIMITED_DISCOVERABLE)) {
1628                 cancel_delayed_work(&hdev->discov_off);
1629                 hdev->discov_timeout = timeout;
1630
1631                 if (cp->val && hdev->discov_timeout > 0) {
1632                         int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1633                         queue_delayed_work(hdev->req_workqueue,
1634                                            &hdev->discov_off, to);
1635                 }
1636
1637                 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1638                 goto failed;
1639         }
1640
1641         cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1642         if (!cmd) {
1643                 err = -ENOMEM;
1644                 goto failed;
1645         }
1646
1647         /* Cancel any potential discoverable timeout that might be
1648          * still active and store new timeout value. The arming of
1649          * the timeout happens in the complete handler.
1650          */
1651         cancel_delayed_work(&hdev->discov_off);
1652         hdev->discov_timeout = timeout;
1653
1654         if (cp->val)
1655                 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1656         else
1657                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1658
1659         /* Limited discoverable mode */
1660         if (cp->val == 0x02)
1661                 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1662         else
1663                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1664
1665         err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1666                                  mgmt_set_discoverable_complete);
1667
1668         if (err < 0)
1669                 mgmt_pending_remove(cmd);
1670
1671 failed:
1672         hci_dev_unlock(hdev);
1673         return err;
1674 }
1675
1676 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1677                                           int err)
1678 {
1679         struct mgmt_pending_cmd *cmd = data;
1680
1681         bt_dev_dbg(hdev, "err %d", err);
1682
1683         /* Make sure cmd still outstanding. */
1684         if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1685                 return;
1686
1687         hci_dev_lock(hdev);
1688
1689         if (err) {
1690                 u8 mgmt_err = mgmt_status(err);
1691                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1692                 goto done;
1693         }
1694
1695         send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1696         new_settings(hdev, cmd->sk);
1697
1698 done:
1699         if (cmd)
1700                 mgmt_pending_remove(cmd);
1701
1702         hci_dev_unlock(hdev);
1703 }
1704
1705 static int set_connectable_update_settings(struct hci_dev *hdev,
1706                                            struct sock *sk, u8 val)
1707 {
1708         bool changed = false;
1709         int err;
1710
1711         if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1712                 changed = true;
1713
1714         if (val) {
1715                 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1716         } else {
1717                 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1718                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1719         }
1720
1721         err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1722         if (err < 0)
1723                 return err;
1724
1725         if (changed) {
1726                 hci_update_scan(hdev);
1727                 hci_update_passive_scan(hdev);
1728                 return new_settings(hdev, sk);
1729         }
1730
1731         return 0;
1732 }
1733
1734 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1735 {
1736         BT_DBG("%s", hdev->name);
1737
1738         return hci_update_connectable_sync(hdev);
1739 }
1740
1741 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1742                            u16 len)
1743 {
1744         struct mgmt_mode *cp = data;
1745         struct mgmt_pending_cmd *cmd;
1746         int err;
1747
1748         bt_dev_dbg(hdev, "sock %p", sk);
1749
1750         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1751             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1752                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1753                                        MGMT_STATUS_REJECTED);
1754
1755         if (cp->val != 0x00 && cp->val != 0x01)
1756                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1757                                        MGMT_STATUS_INVALID_PARAMS);
1758
1759         hci_dev_lock(hdev);
1760
1761         if (!hdev_is_powered(hdev)) {
1762                 err = set_connectable_update_settings(hdev, sk, cp->val);
1763                 goto failed;
1764         }
1765
1766         if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1767             pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1768                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1769                                       MGMT_STATUS_BUSY);
1770                 goto failed;
1771         }
1772
1773         cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1774         if (!cmd) {
1775                 err = -ENOMEM;
1776                 goto failed;
1777         }
1778
1779         if (cp->val) {
1780                 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1781         } else {
1782                 if (hdev->discov_timeout > 0)
1783                         cancel_delayed_work(&hdev->discov_off);
1784
1785                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1786                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1787                 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1788         }
1789
1790         err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1791                                  mgmt_set_connectable_complete);
1792
1793         if (err < 0)
1794                 mgmt_pending_remove(cmd);
1795
1796 failed:
1797         hci_dev_unlock(hdev);
1798         return err;
1799 }
1800
1801 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1802                         u16 len)
1803 {
1804         struct mgmt_mode *cp = data;
1805         bool changed;
1806         int err;
1807
1808         bt_dev_dbg(hdev, "sock %p", sk);
1809
1810         if (cp->val != 0x00 && cp->val != 0x01)
1811                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1812                                        MGMT_STATUS_INVALID_PARAMS);
1813
1814         hci_dev_lock(hdev);
1815
1816         if (cp->val)
1817                 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1818         else
1819                 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1820
1821         err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1822         if (err < 0)
1823                 goto unlock;
1824
1825         if (changed) {
1826                 /* In limited privacy mode the change of bondable mode
1827                  * may affect the local advertising address.
1828                  */
1829                 hci_update_discoverable(hdev);
1830
1831                 err = new_settings(hdev, sk);
1832         }
1833
1834 unlock:
1835         hci_dev_unlock(hdev);
1836         return err;
1837 }
1838
1839 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1840                              u16 len)
1841 {
1842         struct mgmt_mode *cp = data;
1843         struct mgmt_pending_cmd *cmd;
1844         u8 val, status;
1845         int err;
1846
1847         bt_dev_dbg(hdev, "sock %p", sk);
1848
1849         status = mgmt_bredr_support(hdev);
1850         if (status)
1851                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1852                                        status);
1853
1854         if (cp->val != 0x00 && cp->val != 0x01)
1855                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1856                                        MGMT_STATUS_INVALID_PARAMS);
1857
1858         hci_dev_lock(hdev);
1859
1860         if (!hdev_is_powered(hdev)) {
1861                 bool changed = false;
1862
1863                 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1864                         hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1865                         changed = true;
1866                 }
1867
1868                 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1869                 if (err < 0)
1870                         goto failed;
1871
1872                 if (changed)
1873                         err = new_settings(hdev, sk);
1874
1875                 goto failed;
1876         }
1877
1878         if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1879                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1880                                       MGMT_STATUS_BUSY);
1881                 goto failed;
1882         }
1883
1884         val = !!cp->val;
1885
1886         if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1887                 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1888                 goto failed;
1889         }
1890
1891         cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1892         if (!cmd) {
1893                 err = -ENOMEM;
1894                 goto failed;
1895         }
1896
1897         err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1898         if (err < 0) {
1899                 mgmt_pending_remove(cmd);
1900                 goto failed;
1901         }
1902
1903 failed:
1904         hci_dev_unlock(hdev);
1905         return err;
1906 }
1907
1908 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1909 {
1910         struct cmd_lookup match = { NULL, hdev };
1911         struct mgmt_pending_cmd *cmd = data;
1912         struct mgmt_mode *cp = cmd->param;
1913         u8 enable = cp->val;
1914         bool changed;
1915
1916         /* Make sure cmd still outstanding. */
1917         if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1918                 return;
1919
1920         if (err) {
1921                 u8 mgmt_err = mgmt_status(err);
1922
1923                 if (enable && hci_dev_test_and_clear_flag(hdev,
1924                                                           HCI_SSP_ENABLED)) {
1925                         hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1926                         new_settings(hdev, NULL);
1927                 }
1928
1929                 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1930                                      &mgmt_err);
1931                 return;
1932         }
1933
1934         if (enable) {
1935                 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1936         } else {
1937                 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1938
1939                 if (!changed)
1940                         changed = hci_dev_test_and_clear_flag(hdev,
1941                                                               HCI_HS_ENABLED);
1942                 else
1943                         hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1944         }
1945
1946         mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1947
1948         if (changed)
1949                 new_settings(hdev, match.sk);
1950
1951         if (match.sk)
1952                 sock_put(match.sk);
1953
1954         hci_update_eir_sync(hdev);
1955 }
1956
1957 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1958 {
1959         struct mgmt_pending_cmd *cmd = data;
1960         struct mgmt_mode *cp = cmd->param;
1961         bool changed = false;
1962         int err;
1963
1964         if (cp->val)
1965                 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1966
1967         err = hci_write_ssp_mode_sync(hdev, cp->val);
1968
1969         if (!err && changed)
1970                 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1971
1972         return err;
1973 }
1974
1975 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1976 {
1977         struct mgmt_mode *cp = data;
1978         struct mgmt_pending_cmd *cmd;
1979         u8 status;
1980         int err;
1981
1982         bt_dev_dbg(hdev, "sock %p", sk);
1983
1984         status = mgmt_bredr_support(hdev);
1985         if (status)
1986                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1987
1988         if (!lmp_ssp_capable(hdev))
1989                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1990                                        MGMT_STATUS_NOT_SUPPORTED);
1991
1992         if (cp->val != 0x00 && cp->val != 0x01)
1993                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1994                                        MGMT_STATUS_INVALID_PARAMS);
1995
1996         hci_dev_lock(hdev);
1997
1998         if (!hdev_is_powered(hdev)) {
1999                 bool changed;
2000
2001                 if (cp->val) {
2002                         changed = !hci_dev_test_and_set_flag(hdev,
2003                                                              HCI_SSP_ENABLED);
2004                 } else {
2005                         changed = hci_dev_test_and_clear_flag(hdev,
2006                                                               HCI_SSP_ENABLED);
2007                         if (!changed)
2008                                 changed = hci_dev_test_and_clear_flag(hdev,
2009                                                                       HCI_HS_ENABLED);
2010                         else
2011                                 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2012                 }
2013
2014                 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2015                 if (err < 0)
2016                         goto failed;
2017
2018                 if (changed)
2019                         err = new_settings(hdev, sk);
2020
2021                 goto failed;
2022         }
2023
2024         if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2025                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2026                                       MGMT_STATUS_BUSY);
2027                 goto failed;
2028         }
2029
2030         if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2031                 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2032                 goto failed;
2033         }
2034
2035         cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2036         if (!cmd)
2037                 err = -ENOMEM;
2038         else
2039                 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2040                                          set_ssp_complete);
2041
2042         if (err < 0) {
2043                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2044                                       MGMT_STATUS_FAILED);
2045
2046                 if (cmd)
2047                         mgmt_pending_remove(cmd);
2048         }
2049
2050 failed:
2051         hci_dev_unlock(hdev);
2052         return err;
2053 }
2054
2055 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2056 {
2057         struct mgmt_mode *cp = data;
2058         bool changed;
2059         u8 status;
2060         int err;
2061
2062         bt_dev_dbg(hdev, "sock %p", sk);
2063
2064         if (!IS_ENABLED(CONFIG_BT_HS))
2065                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2066                                        MGMT_STATUS_NOT_SUPPORTED);
2067
2068         status = mgmt_bredr_support(hdev);
2069         if (status)
2070                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2071
2072         if (!lmp_ssp_capable(hdev))
2073                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2074                                        MGMT_STATUS_NOT_SUPPORTED);
2075
2076         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2077                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2078                                        MGMT_STATUS_REJECTED);
2079
2080         if (cp->val != 0x00 && cp->val != 0x01)
2081                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2082                                        MGMT_STATUS_INVALID_PARAMS);
2083
2084         hci_dev_lock(hdev);
2085
2086         if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2087                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2088                                       MGMT_STATUS_BUSY);
2089                 goto unlock;
2090         }
2091
2092         if (cp->val) {
2093                 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2094         } else {
2095                 if (hdev_is_powered(hdev)) {
2096                         err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2097                                               MGMT_STATUS_REJECTED);
2098                         goto unlock;
2099                 }
2100
2101                 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2102         }
2103
2104         err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2105         if (err < 0)
2106                 goto unlock;
2107
2108         if (changed)
2109                 err = new_settings(hdev, sk);
2110
2111 unlock:
2112         hci_dev_unlock(hdev);
2113         return err;
2114 }
2115
2116 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2117 {
2118         struct cmd_lookup match = { NULL, hdev };
2119         u8 status = mgmt_status(err);
2120
2121         bt_dev_dbg(hdev, "err %d", err);
2122
2123         if (status) {
2124                 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2125                                                         &status);
2126                 return;
2127         }
2128
2129         mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2130
2131         new_settings(hdev, match.sk);
2132
2133         if (match.sk)
2134                 sock_put(match.sk);
2135 }
2136
2137 static int set_le_sync(struct hci_dev *hdev, void *data)
2138 {
2139         struct mgmt_pending_cmd *cmd = data;
2140         struct mgmt_mode *cp = cmd->param;
2141         u8 val = !!cp->val;
2142         int err;
2143
2144         if (!val) {
2145                 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2146
2147                 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2148                         hci_disable_advertising_sync(hdev);
2149
2150                 if (ext_adv_capable(hdev))
2151                         hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2152         } else {
2153                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2154         }
2155
2156         err = hci_write_le_host_supported_sync(hdev, val, 0);
2157
2158         /* Make sure the controller has a good default for
2159          * advertising data. Restrict the update to when LE
2160          * has actually been enabled. During power on, the
2161          * update in powered_update_hci will take care of it.
2162          */
2163         if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2164                 if (ext_adv_capable(hdev)) {
2165                         int status;
2166
2167                         status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2168                         if (!status)
2169                                 hci_update_scan_rsp_data_sync(hdev, 0x00);
2170                 } else {
2171                         hci_update_adv_data_sync(hdev, 0x00);
2172                         hci_update_scan_rsp_data_sync(hdev, 0x00);
2173                 }
2174
2175                 hci_update_passive_scan(hdev);
2176         }
2177
2178         return err;
2179 }
2180
2181 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2182 {
2183         struct mgmt_pending_cmd *cmd = data;
2184         u8 status = mgmt_status(err);
2185         struct sock *sk = cmd->sk;
2186
2187         if (status) {
2188                 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2189                                      cmd_status_rsp, &status);
2190                 return;
2191         }
2192
2193         mgmt_pending_remove(cmd);
2194         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2195 }
2196
2197 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2198 {
2199         struct mgmt_pending_cmd *cmd = data;
2200         struct mgmt_cp_set_mesh *cp = cmd->param;
2201         size_t len = cmd->param_len;
2202
2203         memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2204
2205         if (cp->enable)
2206                 hci_dev_set_flag(hdev, HCI_MESH);
2207         else
2208                 hci_dev_clear_flag(hdev, HCI_MESH);
2209
2210         len -= sizeof(*cp);
2211
2212         /* If filters don't fit, forward all adv pkts */
2213         if (len <= sizeof(hdev->mesh_ad_types))
2214                 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2215
2216         hci_update_passive_scan_sync(hdev);
2217         return 0;
2218 }
2219
2220 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2221 {
2222         struct mgmt_cp_set_mesh *cp = data;
2223         struct mgmt_pending_cmd *cmd;
2224         int err = 0;
2225
2226         bt_dev_dbg(hdev, "sock %p", sk);
2227
2228         if (!lmp_le_capable(hdev) ||
2229             !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2230                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2231                                        MGMT_STATUS_NOT_SUPPORTED);
2232
2233         if (cp->enable != 0x00 && cp->enable != 0x01)
2234                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2235                                        MGMT_STATUS_INVALID_PARAMS);
2236
2237         hci_dev_lock(hdev);
2238
2239         cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2240         if (!cmd)
2241                 err = -ENOMEM;
2242         else
2243                 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2244                                          set_mesh_complete);
2245
2246         if (err < 0) {
2247                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2248                                       MGMT_STATUS_FAILED);
2249
2250                 if (cmd)
2251                         mgmt_pending_remove(cmd);
2252         }
2253
2254         hci_dev_unlock(hdev);
2255         return err;
2256 }
2257
2258 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2259 {
2260         struct mgmt_mesh_tx *mesh_tx = data;
2261         struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2262         unsigned long mesh_send_interval;
2263         u8 mgmt_err = mgmt_status(err);
2264
2265         /* Report any errors here, but don't report completion */
2266
2267         if (mgmt_err) {
2268                 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2269                 /* Send Complete Error Code for handle */
2270                 mesh_send_complete(hdev, mesh_tx, false);
2271                 return;
2272         }
2273
2274         mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2275         queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2276                            mesh_send_interval);
2277 }
2278
2279 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2280 {
2281         struct mgmt_mesh_tx *mesh_tx = data;
2282         struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2283         struct adv_info *adv, *next_instance;
2284         u8 instance = hdev->le_num_of_adv_sets + 1;
2285         u16 timeout, duration;
2286         int err = 0;
2287
2288         if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2289                 return MGMT_STATUS_BUSY;
2290
2291         timeout = 1000;
2292         duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2293         adv = hci_add_adv_instance(hdev, instance, 0,
2294                                    send->adv_data_len, send->adv_data,
2295                                    0, NULL,
2296                                    timeout, duration,
2297                                    HCI_ADV_TX_POWER_NO_PREFERENCE,
2298                                    hdev->le_adv_min_interval,
2299                                    hdev->le_adv_max_interval,
2300                                    mesh_tx->handle);
2301
2302         if (!IS_ERR(adv))
2303                 mesh_tx->instance = instance;
2304         else
2305                 err = PTR_ERR(adv);
2306
2307         if (hdev->cur_adv_instance == instance) {
2308                 /* If the currently advertised instance is being changed then
2309                  * cancel the current advertising and schedule the next
2310                  * instance. If there is only one instance then the overridden
2311                  * advertising data will be visible right away.
2312                  */
2313                 cancel_adv_timeout(hdev);
2314
2315                 next_instance = hci_get_next_instance(hdev, instance);
2316                 if (next_instance)
2317                         instance = next_instance->instance;
2318                 else
2319                         instance = 0;
2320         } else if (hdev->adv_instance_timeout) {
2321                 /* Immediately advertise the new instance if no other, or
2322                  * let it go naturally from queue if ADV is already happening
2323                  */
2324                 instance = 0;
2325         }
2326
2327         if (instance)
2328                 return hci_schedule_adv_instance_sync(hdev, instance, true);
2329
2330         return err;
2331 }
2332
2333 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2334 {
2335         struct mgmt_rp_mesh_read_features *rp = data;
2336
2337         if (rp->used_handles >= rp->max_handles)
2338                 return;
2339
2340         rp->handles[rp->used_handles++] = mesh_tx->handle;
2341 }
2342
2343 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2344                          void *data, u16 len)
2345 {
2346         struct mgmt_rp_mesh_read_features rp;
2347
2348         if (!lmp_le_capable(hdev) ||
2349             !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2350                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2351                                        MGMT_STATUS_NOT_SUPPORTED);
2352
2353         memset(&rp, 0, sizeof(rp));
2354         rp.index = cpu_to_le16(hdev->id);
2355         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2356                 rp.max_handles = MESH_HANDLES_MAX;
2357
2358         hci_dev_lock(hdev);
2359
2360         if (rp.max_handles)
2361                 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2362
2363         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2364                           rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2365
2366         hci_dev_unlock(hdev);
2367         return 0;
2368 }
2369
2370 static int send_cancel(struct hci_dev *hdev, void *data)
2371 {
2372         struct mgmt_pending_cmd *cmd = data;
2373         struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2374         struct mgmt_mesh_tx *mesh_tx;
2375
2376         if (!cancel->handle) {
2377                 do {
2378                         mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2379
2380                         if (mesh_tx)
2381                                 mesh_send_complete(hdev, mesh_tx, false);
2382                 } while (mesh_tx);
2383         } else {
2384                 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2385
2386                 if (mesh_tx && mesh_tx->sk == cmd->sk)
2387                         mesh_send_complete(hdev, mesh_tx, false);
2388         }
2389
2390         mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2391                           0, NULL, 0);
2392         mgmt_pending_free(cmd);
2393
2394         return 0;
2395 }
2396
2397 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2398                             void *data, u16 len)
2399 {
2400         struct mgmt_pending_cmd *cmd;
2401         int err;
2402
2403         if (!lmp_le_capable(hdev) ||
2404             !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2405                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2406                                        MGMT_STATUS_NOT_SUPPORTED);
2407
2408         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2409                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2410                                        MGMT_STATUS_REJECTED);
2411
2412         hci_dev_lock(hdev);
2413         cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2414         if (!cmd)
2415                 err = -ENOMEM;
2416         else
2417                 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2418
2419         if (err < 0) {
2420                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2421                                       MGMT_STATUS_FAILED);
2422
2423                 if (cmd)
2424                         mgmt_pending_free(cmd);
2425         }
2426
2427         hci_dev_unlock(hdev);
2428         return err;
2429 }
2430
2431 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2432 {
2433         struct mgmt_mesh_tx *mesh_tx;
2434         struct mgmt_cp_mesh_send *send = data;
2435         struct mgmt_rp_mesh_read_features rp;
2436         bool sending;
2437         int err = 0;
2438
2439         if (!lmp_le_capable(hdev) ||
2440             !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2441                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2442                                        MGMT_STATUS_NOT_SUPPORTED);
2443         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2444             len <= MGMT_MESH_SEND_SIZE ||
2445             len > (MGMT_MESH_SEND_SIZE + 31))
2446                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2447                                        MGMT_STATUS_REJECTED);
2448
2449         hci_dev_lock(hdev);
2450
2451         memset(&rp, 0, sizeof(rp));
2452         rp.max_handles = MESH_HANDLES_MAX;
2453
2454         mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2455
2456         if (rp.max_handles <= rp.used_handles) {
2457                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2458                                       MGMT_STATUS_BUSY);
2459                 goto done;
2460         }
2461
2462         sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2463         mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2464
2465         if (!mesh_tx)
2466                 err = -ENOMEM;
2467         else if (!sending)
2468                 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2469                                          mesh_send_start_complete);
2470
2471         if (err < 0) {
2472                 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2473                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2474                                       MGMT_STATUS_FAILED);
2475
2476                 if (mesh_tx) {
2477                         if (sending)
2478                                 mgmt_mesh_remove(mesh_tx);
2479                 }
2480         } else {
2481                 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2482
2483                 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2484                                   &mesh_tx->handle, 1);
2485         }
2486
2487 done:
2488         hci_dev_unlock(hdev);
2489         return err;
2490 }
2491
2492 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2493 {
2494         struct mgmt_mode *cp = data;
2495         struct mgmt_pending_cmd *cmd;
2496         int err;
2497         u8 val, enabled;
2498
2499         bt_dev_dbg(hdev, "sock %p", sk);
2500
2501         if (!lmp_le_capable(hdev))
2502                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2503                                        MGMT_STATUS_NOT_SUPPORTED);
2504
2505         if (cp->val != 0x00 && cp->val != 0x01)
2506                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2507                                        MGMT_STATUS_INVALID_PARAMS);
2508
2509         /* Bluetooth single mode LE only controllers or dual-mode
2510          * controllers configured as LE only devices, do not allow
2511          * switching LE off. These have either LE enabled explicitly
2512          * or BR/EDR has been previously switched off.
2513          *
2514          * When trying to enable an already enabled LE, then gracefully
2515          * send a positive response. Trying to disable it however will
2516          * result into rejection.
2517          */
2518         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2519                 if (cp->val == 0x01)
2520                         return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2521
2522                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2523                                        MGMT_STATUS_REJECTED);
2524         }
2525
2526         hci_dev_lock(hdev);
2527
2528         val = !!cp->val;
2529         enabled = lmp_host_le_capable(hdev);
2530
2531         if (!hdev_is_powered(hdev) || val == enabled) {
2532                 bool changed = false;
2533
2534                 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2535                         hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2536                         changed = true;
2537                 }
2538
2539                 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2540                         hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2541                         changed = true;
2542                 }
2543
2544                 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2545                 if (err < 0)
2546                         goto unlock;
2547
2548                 if (changed)
2549                         err = new_settings(hdev, sk);
2550
2551                 goto unlock;
2552         }
2553
2554         if (pending_find(MGMT_OP_SET_LE, hdev) ||
2555             pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2556                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2557                                       MGMT_STATUS_BUSY);
2558                 goto unlock;
2559         }
2560
2561         cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2562         if (!cmd)
2563                 err = -ENOMEM;
2564         else
2565                 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2566                                          set_le_complete);
2567
2568         if (err < 0) {
2569                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2570                                       MGMT_STATUS_FAILED);
2571
2572                 if (cmd)
2573                         mgmt_pending_remove(cmd);
2574         }
2575
2576 unlock:
2577         hci_dev_unlock(hdev);
2578         return err;
2579 }
2580
2581 /* This is a helper function to test for pending mgmt commands that can
2582  * cause CoD or EIR HCI commands. We can only allow one such pending
2583  * mgmt command at a time since otherwise we cannot easily track what
2584  * the current values are, will be, and based on that calculate if a new
2585  * HCI command needs to be sent and if yes with what value.
2586  */
2587 static bool pending_eir_or_class(struct hci_dev *hdev)
2588 {
2589         struct mgmt_pending_cmd *cmd;
2590
2591         list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2592                 switch (cmd->opcode) {
2593                 case MGMT_OP_ADD_UUID:
2594                 case MGMT_OP_REMOVE_UUID:
2595                 case MGMT_OP_SET_DEV_CLASS:
2596                 case MGMT_OP_SET_POWERED:
2597                         return true;
2598                 }
2599         }
2600
2601         return false;
2602 }
2603
2604 static const u8 bluetooth_base_uuid[] = {
2605                         0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2606                         0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2607 };
2608
2609 static u8 get_uuid_size(const u8 *uuid)
2610 {
2611         u32 val;
2612
2613         if (memcmp(uuid, bluetooth_base_uuid, 12))
2614                 return 128;
2615
2616         val = get_unaligned_le32(&uuid[12]);
2617         if (val > 0xffff)
2618                 return 32;
2619
2620         return 16;
2621 }
2622
2623 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2624 {
2625         struct mgmt_pending_cmd *cmd = data;
2626
2627         bt_dev_dbg(hdev, "err %d", err);
2628
2629         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2630                           mgmt_status(err), hdev->dev_class, 3);
2631
2632         mgmt_pending_free(cmd);
2633 }
2634
2635 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2636 {
2637         int err;
2638
2639         err = hci_update_class_sync(hdev);
2640         if (err)
2641                 return err;
2642
2643         return hci_update_eir_sync(hdev);
2644 }
2645
2646 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2647 {
2648         struct mgmt_cp_add_uuid *cp = data;
2649         struct mgmt_pending_cmd *cmd;
2650         struct bt_uuid *uuid;
2651         int err;
2652
2653         bt_dev_dbg(hdev, "sock %p", sk);
2654
2655         hci_dev_lock(hdev);
2656
2657         if (pending_eir_or_class(hdev)) {
2658                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2659                                       MGMT_STATUS_BUSY);
2660                 goto failed;
2661         }
2662
2663         uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2664         if (!uuid) {
2665                 err = -ENOMEM;
2666                 goto failed;
2667         }
2668
2669         memcpy(uuid->uuid, cp->uuid, 16);
2670         uuid->svc_hint = cp->svc_hint;
2671         uuid->size = get_uuid_size(cp->uuid);
2672
2673         list_add_tail(&uuid->list, &hdev->uuids);
2674
2675         cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2676         if (!cmd) {
2677                 err = -ENOMEM;
2678                 goto failed;
2679         }
2680
2681         err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2682         if (err < 0) {
2683                 mgmt_pending_free(cmd);
2684                 goto failed;
2685         }
2686
2687 failed:
2688         hci_dev_unlock(hdev);
2689         return err;
2690 }
2691
2692 static bool enable_service_cache(struct hci_dev *hdev)
2693 {
2694         if (!hdev_is_powered(hdev))
2695                 return false;
2696
2697         if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2698                 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2699                                    CACHE_TIMEOUT);
2700                 return true;
2701         }
2702
2703         return false;
2704 }
2705
2706 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2707 {
2708         int err;
2709
2710         err = hci_update_class_sync(hdev);
2711         if (err)
2712                 return err;
2713
2714         return hci_update_eir_sync(hdev);
2715 }
2716
2717 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2718                        u16 len)
2719 {
2720         struct mgmt_cp_remove_uuid *cp = data;
2721         struct mgmt_pending_cmd *cmd;
2722         struct bt_uuid *match, *tmp;
2723         static const u8 bt_uuid_any[] = {
2724                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2725         };
2726         int err, found;
2727
2728         bt_dev_dbg(hdev, "sock %p", sk);
2729
2730         hci_dev_lock(hdev);
2731
2732         if (pending_eir_or_class(hdev)) {
2733                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2734                                       MGMT_STATUS_BUSY);
2735                 goto unlock;
2736         }
2737
2738         if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2739                 hci_uuids_clear(hdev);
2740
2741                 if (enable_service_cache(hdev)) {
2742                         err = mgmt_cmd_complete(sk, hdev->id,
2743                                                 MGMT_OP_REMOVE_UUID,
2744                                                 0, hdev->dev_class, 3);
2745                         goto unlock;
2746                 }
2747
2748                 goto update_class;
2749         }
2750
2751         found = 0;
2752
2753         list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2754                 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2755                         continue;
2756
2757                 list_del(&match->list);
2758                 kfree(match);
2759                 found++;
2760         }
2761
2762         if (found == 0) {
2763                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2764                                       MGMT_STATUS_INVALID_PARAMS);
2765                 goto unlock;
2766         }
2767
2768 update_class:
2769         cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2770         if (!cmd) {
2771                 err = -ENOMEM;
2772                 goto unlock;
2773         }
2774
2775         err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2776                                  mgmt_class_complete);
2777         if (err < 0)
2778                 mgmt_pending_free(cmd);
2779
2780 unlock:
2781         hci_dev_unlock(hdev);
2782         return err;
2783 }
2784
2785 static int set_class_sync(struct hci_dev *hdev, void *data)
2786 {
2787         int err = 0;
2788
2789         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2790                 cancel_delayed_work_sync(&hdev->service_cache);
2791                 err = hci_update_eir_sync(hdev);
2792         }
2793
2794         if (err)
2795                 return err;
2796
2797         return hci_update_class_sync(hdev);
2798 }
2799
2800 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2801                          u16 len)
2802 {
2803         struct mgmt_cp_set_dev_class *cp = data;
2804         struct mgmt_pending_cmd *cmd;
2805         int err;
2806
2807         bt_dev_dbg(hdev, "sock %p", sk);
2808
2809         if (!lmp_bredr_capable(hdev))
2810                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2811                                        MGMT_STATUS_NOT_SUPPORTED);
2812
2813         hci_dev_lock(hdev);
2814
2815         if (pending_eir_or_class(hdev)) {
2816                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2817                                       MGMT_STATUS_BUSY);
2818                 goto unlock;
2819         }
2820
2821         if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2822                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2823                                       MGMT_STATUS_INVALID_PARAMS);
2824                 goto unlock;
2825         }
2826
2827         hdev->major_class = cp->major;
2828         hdev->minor_class = cp->minor;
2829
2830         if (!hdev_is_powered(hdev)) {
2831                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2832                                         hdev->dev_class, 3);
2833                 goto unlock;
2834         }
2835
2836         cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2837         if (!cmd) {
2838                 err = -ENOMEM;
2839                 goto unlock;
2840         }
2841
2842         err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2843                                  mgmt_class_complete);
2844         if (err < 0)
2845                 mgmt_pending_free(cmd);
2846
2847 unlock:
2848         hci_dev_unlock(hdev);
2849         return err;
2850 }
2851
2852 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2853                           u16 len)
2854 {
2855         struct mgmt_cp_load_link_keys *cp = data;
2856         const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2857                                    sizeof(struct mgmt_link_key_info));
2858         u16 key_count, expected_len;
2859         bool changed;
2860         int i;
2861
2862         bt_dev_dbg(hdev, "sock %p", sk);
2863
2864         if (!lmp_bredr_capable(hdev))
2865                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2866                                        MGMT_STATUS_NOT_SUPPORTED);
2867
2868         key_count = __le16_to_cpu(cp->key_count);
2869         if (key_count > max_key_count) {
2870                 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2871                            key_count);
2872                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2873                                        MGMT_STATUS_INVALID_PARAMS);
2874         }
2875
2876         expected_len = struct_size(cp, keys, key_count);
2877         if (expected_len != len) {
2878                 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2879                            expected_len, len);
2880                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2881                                        MGMT_STATUS_INVALID_PARAMS);
2882         }
2883
2884         if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2885                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2886                                        MGMT_STATUS_INVALID_PARAMS);
2887
2888         bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2889                    key_count);
2890
2891         for (i = 0; i < key_count; i++) {
2892                 struct mgmt_link_key_info *key = &cp->keys[i];
2893
2894                 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2895                         return mgmt_cmd_status(sk, hdev->id,
2896                                                MGMT_OP_LOAD_LINK_KEYS,
2897                                                MGMT_STATUS_INVALID_PARAMS);
2898         }
2899
2900         hci_dev_lock(hdev);
2901
2902         hci_link_keys_clear(hdev);
2903
2904         if (cp->debug_keys)
2905                 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2906         else
2907                 changed = hci_dev_test_and_clear_flag(hdev,
2908                                                       HCI_KEEP_DEBUG_KEYS);
2909
2910         if (changed)
2911                 new_settings(hdev, NULL);
2912
2913         for (i = 0; i < key_count; i++) {
2914                 struct mgmt_link_key_info *key = &cp->keys[i];
2915
2916                 if (hci_is_blocked_key(hdev,
2917                                        HCI_BLOCKED_KEY_TYPE_LINKKEY,
2918                                        key->val)) {
2919                         bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2920                                     &key->addr.bdaddr);
2921                         continue;
2922                 }
2923
2924                 /* Always ignore debug keys and require a new pairing if
2925                  * the user wants to use them.
2926                  */
2927                 if (key->type == HCI_LK_DEBUG_COMBINATION)
2928                         continue;
2929
2930                 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2931                                  key->type, key->pin_len, NULL);
2932         }
2933
2934         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2935
2936         hci_dev_unlock(hdev);
2937
2938         return 0;
2939 }
2940
2941 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2942                            u8 addr_type, struct sock *skip_sk)
2943 {
2944         struct mgmt_ev_device_unpaired ev;
2945
2946         bacpy(&ev.addr.bdaddr, bdaddr);
2947         ev.addr.type = addr_type;
2948
2949         return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2950                           skip_sk);
2951 }
2952
2953 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2954 {
2955         struct mgmt_pending_cmd *cmd = data;
2956         struct mgmt_cp_unpair_device *cp = cmd->param;
2957
2958         if (!err)
2959                 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2960
2961         cmd->cmd_complete(cmd, err);
2962         mgmt_pending_free(cmd);
2963 }
2964
2965 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2966 {
2967         struct mgmt_pending_cmd *cmd = data;
2968         struct mgmt_cp_unpair_device *cp = cmd->param;
2969         struct hci_conn *conn;
2970
2971         if (cp->addr.type == BDADDR_BREDR)
2972                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2973                                                &cp->addr.bdaddr);
2974         else
2975                 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2976                                                le_addr_type(cp->addr.type));
2977
2978         if (!conn)
2979                 return 0;
2980
2981         return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2982 }
2983
2984 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2985                          u16 len)
2986 {
2987         struct mgmt_cp_unpair_device *cp = data;
2988         struct mgmt_rp_unpair_device rp;
2989         struct hci_conn_params *params;
2990         struct mgmt_pending_cmd *cmd;
2991         struct hci_conn *conn;
2992         u8 addr_type;
2993         int err;
2994
2995         memset(&rp, 0, sizeof(rp));
2996         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2997         rp.addr.type = cp->addr.type;
2998
2999         if (!bdaddr_type_is_valid(cp->addr.type))
3000                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3001                                          MGMT_STATUS_INVALID_PARAMS,
3002                                          &rp, sizeof(rp));
3003
3004         if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3005                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3006                                          MGMT_STATUS_INVALID_PARAMS,
3007                                          &rp, sizeof(rp));
3008
3009         hci_dev_lock(hdev);
3010
3011         if (!hdev_is_powered(hdev)) {
3012                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3013                                         MGMT_STATUS_NOT_POWERED, &rp,
3014                                         sizeof(rp));
3015                 goto unlock;
3016         }
3017
3018         if (cp->addr.type == BDADDR_BREDR) {
3019                 /* If disconnection is requested, then look up the
3020                  * connection. If the remote device is connected, it
3021                  * will be later used to terminate the link.
3022                  *
3023                  * Setting it to NULL explicitly will cause no
3024                  * termination of the link.
3025                  */
3026                 if (cp->disconnect)
3027                         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3028                                                        &cp->addr.bdaddr);
3029                 else
3030                         conn = NULL;
3031
3032                 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3033                 if (err < 0) {
3034                         err = mgmt_cmd_complete(sk, hdev->id,
3035                                                 MGMT_OP_UNPAIR_DEVICE,
3036                                                 MGMT_STATUS_NOT_PAIRED, &rp,
3037                                                 sizeof(rp));
3038                         goto unlock;
3039                 }
3040
3041                 goto done;
3042         }
3043
3044         /* LE address type */
3045         addr_type = le_addr_type(cp->addr.type);
3046
3047         /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3048         err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3049         if (err < 0) {
3050                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3051                                         MGMT_STATUS_NOT_PAIRED, &rp,
3052                                         sizeof(rp));
3053                 goto unlock;
3054         }
3055
3056         conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3057         if (!conn) {
3058                 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3059                 goto done;
3060         }
3061
3062
3063         /* Defer clearing up the connection parameters until closing to
3064          * give a chance of keeping them if a repairing happens.
3065          */
3066         set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3067
3068         /* Disable auto-connection parameters if present */
3069         params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3070         if (params) {
3071                 if (params->explicit_connect)
3072                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3073                 else
3074                         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3075         }
3076
3077         /* If disconnection is not requested, then clear the connection
3078          * variable so that the link is not terminated.
3079          */
3080         if (!cp->disconnect)
3081                 conn = NULL;
3082
3083 done:
3084         /* If the connection variable is set, then termination of the
3085          * link is requested.
3086          */
3087         if (!conn) {
3088                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3089                                         &rp, sizeof(rp));
3090                 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3091                 goto unlock;
3092         }
3093
3094         cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3095                                sizeof(*cp));
3096         if (!cmd) {
3097                 err = -ENOMEM;
3098                 goto unlock;
3099         }
3100
3101         cmd->cmd_complete = addr_cmd_complete;
3102
3103         err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3104                                  unpair_device_complete);
3105         if (err < 0)
3106                 mgmt_pending_free(cmd);
3107
3108 unlock:
3109         hci_dev_unlock(hdev);
3110         return err;
3111 }
3112
3113 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3114                       u16 len)
3115 {
3116         struct mgmt_cp_disconnect *cp = data;
3117         struct mgmt_rp_disconnect rp;
3118         struct mgmt_pending_cmd *cmd;
3119         struct hci_conn *conn;
3120         int err;
3121
3122         bt_dev_dbg(hdev, "sock %p", sk);
3123
3124         memset(&rp, 0, sizeof(rp));
3125         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3126         rp.addr.type = cp->addr.type;
3127
3128         if (!bdaddr_type_is_valid(cp->addr.type))
3129                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3130                                          MGMT_STATUS_INVALID_PARAMS,
3131                                          &rp, sizeof(rp));
3132
3133         hci_dev_lock(hdev);
3134
3135         if (!test_bit(HCI_UP, &hdev->flags)) {
3136                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3137                                         MGMT_STATUS_NOT_POWERED, &rp,
3138                                         sizeof(rp));
3139                 goto failed;
3140         }
3141
3142         if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3143                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3144                                         MGMT_STATUS_BUSY, &rp, sizeof(rp));
3145                 goto failed;
3146         }
3147
3148         if (cp->addr.type == BDADDR_BREDR)
3149                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3150                                                &cp->addr.bdaddr);
3151         else
3152                 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3153                                                le_addr_type(cp->addr.type));
3154
3155         if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3156                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3157                                         MGMT_STATUS_NOT_CONNECTED, &rp,
3158                                         sizeof(rp));
3159                 goto failed;
3160         }
3161
3162         cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3163         if (!cmd) {
3164                 err = -ENOMEM;
3165                 goto failed;
3166         }
3167
3168         cmd->cmd_complete = generic_cmd_complete;
3169
3170         err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3171         if (err < 0)
3172                 mgmt_pending_remove(cmd);
3173
3174 failed:
3175         hci_dev_unlock(hdev);
3176         return err;
3177 }
3178
3179 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3180 {
3181         switch (link_type) {
3182         case LE_LINK:
3183                 switch (addr_type) {
3184                 case ADDR_LE_DEV_PUBLIC:
3185                         return BDADDR_LE_PUBLIC;
3186
3187                 default:
3188                         /* Fallback to LE Random address type */
3189                         return BDADDR_LE_RANDOM;
3190                 }
3191
3192         default:
3193                 /* Fallback to BR/EDR type */
3194                 return BDADDR_BREDR;
3195         }
3196 }
3197
3198 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3199                            u16 data_len)
3200 {
3201         struct mgmt_rp_get_connections *rp;
3202         struct hci_conn *c;
3203         int err;
3204         u16 i;
3205
3206         bt_dev_dbg(hdev, "sock %p", sk);
3207
3208         hci_dev_lock(hdev);
3209
3210         if (!hdev_is_powered(hdev)) {
3211                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3212                                       MGMT_STATUS_NOT_POWERED);
3213                 goto unlock;
3214         }
3215
3216         i = 0;
3217         list_for_each_entry(c, &hdev->conn_hash.list, list) {
3218                 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3219                         i++;
3220         }
3221
3222         rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3223         if (!rp) {
3224                 err = -ENOMEM;
3225                 goto unlock;
3226         }
3227
3228         i = 0;
3229         list_for_each_entry(c, &hdev->conn_hash.list, list) {
3230                 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3231                         continue;
3232                 bacpy(&rp->addr[i].bdaddr, &c->dst);
3233                 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3234                 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3235                         continue;
3236                 i++;
3237         }
3238
3239         rp->conn_count = cpu_to_le16(i);
3240
3241         /* Recalculate length in case of filtered SCO connections, etc */
3242         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3243                                 struct_size(rp, addr, i));
3244
3245         kfree(rp);
3246
3247 unlock:
3248         hci_dev_unlock(hdev);
3249         return err;
3250 }
3251
3252 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3253                                    struct mgmt_cp_pin_code_neg_reply *cp)
3254 {
3255         struct mgmt_pending_cmd *cmd;
3256         int err;
3257
3258         cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3259                                sizeof(*cp));
3260         if (!cmd)
3261                 return -ENOMEM;
3262
3263         cmd->cmd_complete = addr_cmd_complete;
3264
3265         err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3266                            sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3267         if (err < 0)
3268                 mgmt_pending_remove(cmd);
3269
3270         return err;
3271 }
3272
3273 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3274                           u16 len)
3275 {
3276         struct hci_conn *conn;
3277         struct mgmt_cp_pin_code_reply *cp = data;
3278         struct hci_cp_pin_code_reply reply;
3279         struct mgmt_pending_cmd *cmd;
3280         int err;
3281
3282         bt_dev_dbg(hdev, "sock %p", sk);
3283
3284         hci_dev_lock(hdev);
3285
3286         if (!hdev_is_powered(hdev)) {
3287                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3288                                       MGMT_STATUS_NOT_POWERED);
3289                 goto failed;
3290         }
3291
3292         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3293         if (!conn) {
3294                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3295                                       MGMT_STATUS_NOT_CONNECTED);
3296                 goto failed;
3297         }
3298
3299         if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3300                 struct mgmt_cp_pin_code_neg_reply ncp;
3301
3302                 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3303
3304                 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3305
3306                 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3307                 if (err >= 0)
3308                         err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3309                                               MGMT_STATUS_INVALID_PARAMS);
3310
3311                 goto failed;
3312         }
3313
3314         cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3315         if (!cmd) {
3316                 err = -ENOMEM;
3317                 goto failed;
3318         }
3319
3320         cmd->cmd_complete = addr_cmd_complete;
3321
3322         bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3323         reply.pin_len = cp->pin_len;
3324         memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3325
3326         err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3327         if (err < 0)
3328                 mgmt_pending_remove(cmd);
3329
3330 failed:
3331         hci_dev_unlock(hdev);
3332         return err;
3333 }
3334
3335 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3336                              u16 len)
3337 {
3338         struct mgmt_cp_set_io_capability *cp = data;
3339
3340         bt_dev_dbg(hdev, "sock %p", sk);
3341
3342         if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3343                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3344                                        MGMT_STATUS_INVALID_PARAMS);
3345
3346         hci_dev_lock(hdev);
3347
3348         hdev->io_capability = cp->io_capability;
3349
3350         bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3351
3352         hci_dev_unlock(hdev);
3353
3354         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3355                                  NULL, 0);
3356 }
3357
3358 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3359 {
3360         struct hci_dev *hdev = conn->hdev;
3361         struct mgmt_pending_cmd *cmd;
3362
3363         list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3364                 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3365                         continue;
3366
3367                 if (cmd->user_data != conn)
3368                         continue;
3369
3370                 return cmd;
3371         }
3372
3373         return NULL;
3374 }
3375
3376 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3377 {
3378         struct mgmt_rp_pair_device rp;
3379         struct hci_conn *conn = cmd->user_data;
3380         int err;
3381
3382         bacpy(&rp.addr.bdaddr, &conn->dst);
3383         rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3384
3385         err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3386                                 status, &rp, sizeof(rp));
3387
3388         /* So we don't get further callbacks for this connection */
3389         conn->connect_cfm_cb = NULL;
3390         conn->security_cfm_cb = NULL;
3391         conn->disconn_cfm_cb = NULL;
3392
3393         hci_conn_drop(conn);
3394
3395         /* The device is paired so there is no need to remove
3396          * its connection parameters anymore.
3397          */
3398         clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3399
3400         hci_conn_put(conn);
3401
3402         return err;
3403 }
3404
3405 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3406 {
3407         u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3408         struct mgmt_pending_cmd *cmd;
3409
3410         cmd = find_pairing(conn);
3411         if (cmd) {
3412                 cmd->cmd_complete(cmd, status);
3413                 mgmt_pending_remove(cmd);
3414         }
3415 }
3416
3417 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3418 {
3419         struct mgmt_pending_cmd *cmd;
3420
3421         BT_DBG("status %u", status);
3422
3423         cmd = find_pairing(conn);
3424         if (!cmd) {
3425                 BT_DBG("Unable to find a pending command");
3426                 return;
3427         }
3428
3429         cmd->cmd_complete(cmd, mgmt_status(status));
3430         mgmt_pending_remove(cmd);
3431 }
3432
3433 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3434 {
3435         struct mgmt_pending_cmd *cmd;
3436
3437         BT_DBG("status %u", status);
3438
3439         if (!status)
3440                 return;
3441
3442         cmd = find_pairing(conn);
3443         if (!cmd) {
3444                 BT_DBG("Unable to find a pending command");
3445                 return;
3446         }
3447
3448         cmd->cmd_complete(cmd, mgmt_status(status));
3449         mgmt_pending_remove(cmd);
3450 }
3451
3452 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3453                        u16 len)
3454 {
3455         struct mgmt_cp_pair_device *cp = data;
3456         struct mgmt_rp_pair_device rp;
3457         struct mgmt_pending_cmd *cmd;
3458         u8 sec_level, auth_type;
3459         struct hci_conn *conn;
3460         int err;
3461
3462         bt_dev_dbg(hdev, "sock %p", sk);
3463
3464         memset(&rp, 0, sizeof(rp));
3465         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3466         rp.addr.type = cp->addr.type;
3467
3468         if (!bdaddr_type_is_valid(cp->addr.type))
3469                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3470                                          MGMT_STATUS_INVALID_PARAMS,
3471                                          &rp, sizeof(rp));
3472
3473         if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3474                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3475                                          MGMT_STATUS_INVALID_PARAMS,
3476                                          &rp, sizeof(rp));
3477
3478         hci_dev_lock(hdev);
3479
3480         if (!hdev_is_powered(hdev)) {
3481                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3482                                         MGMT_STATUS_NOT_POWERED, &rp,
3483                                         sizeof(rp));
3484                 goto unlock;
3485         }
3486
3487         if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3488                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3489                                         MGMT_STATUS_ALREADY_PAIRED, &rp,
3490                                         sizeof(rp));
3491                 goto unlock;
3492         }
3493
3494         sec_level = BT_SECURITY_MEDIUM;
3495         auth_type = HCI_AT_DEDICATED_BONDING;
3496
3497         if (cp->addr.type == BDADDR_BREDR) {
3498                 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3499                                        auth_type, CONN_REASON_PAIR_DEVICE);
3500         } else {
3501                 u8 addr_type = le_addr_type(cp->addr.type);
3502                 struct hci_conn_params *p;
3503
3504                 /* When pairing a new device, it is expected to remember
3505                  * this device for future connections. Adding the connection
3506                  * parameter information ahead of time allows tracking
3507                  * of the peripheral preferred values and will speed up any
3508                  * further connection establishment.
3509                  *
3510                  * If connection parameters already exist, then they
3511                  * will be kept and this function does nothing.
3512                  */
3513                 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3514
3515                 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3516                         p->auto_connect = HCI_AUTO_CONN_DISABLED;
3517
3518                 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3519                                            sec_level, HCI_LE_CONN_TIMEOUT,
3520                                            CONN_REASON_PAIR_DEVICE);
3521         }
3522
3523         if (IS_ERR(conn)) {
3524                 int status;
3525
3526                 if (PTR_ERR(conn) == -EBUSY)
3527                         status = MGMT_STATUS_BUSY;
3528                 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3529                         status = MGMT_STATUS_NOT_SUPPORTED;
3530                 else if (PTR_ERR(conn) == -ECONNREFUSED)
3531                         status = MGMT_STATUS_REJECTED;
3532                 else
3533                         status = MGMT_STATUS_CONNECT_FAILED;
3534
3535                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3536                                         status, &rp, sizeof(rp));
3537                 goto unlock;
3538         }
3539
3540         if (conn->connect_cfm_cb) {
3541                 hci_conn_drop(conn);
3542                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3543                                         MGMT_STATUS_BUSY, &rp, sizeof(rp));
3544                 goto unlock;
3545         }
3546
3547         cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3548         if (!cmd) {
3549                 err = -ENOMEM;
3550                 hci_conn_drop(conn);
3551                 goto unlock;
3552         }
3553
3554         cmd->cmd_complete = pairing_complete;
3555
3556         /* For LE, just connecting isn't a proof that the pairing finished */
3557         if (cp->addr.type == BDADDR_BREDR) {
3558                 conn->connect_cfm_cb = pairing_complete_cb;
3559                 conn->security_cfm_cb = pairing_complete_cb;
3560                 conn->disconn_cfm_cb = pairing_complete_cb;
3561         } else {
3562                 conn->connect_cfm_cb = le_pairing_complete_cb;
3563                 conn->security_cfm_cb = le_pairing_complete_cb;
3564                 conn->disconn_cfm_cb = le_pairing_complete_cb;
3565         }
3566
3567         conn->io_capability = cp->io_cap;
3568         cmd->user_data = hci_conn_get(conn);
3569
3570         if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3571             hci_conn_security(conn, sec_level, auth_type, true)) {
3572                 cmd->cmd_complete(cmd, 0);
3573                 mgmt_pending_remove(cmd);
3574         }
3575
3576         err = 0;
3577
3578 unlock:
3579         hci_dev_unlock(hdev);
3580         return err;
3581 }
3582
3583 static int abort_conn_sync(struct hci_dev *hdev, void *data)
3584 {
3585         struct hci_conn *conn;
3586         u16 handle = PTR_ERR(data);
3587
3588         conn = hci_conn_hash_lookup_handle(hdev, handle);
3589         if (!conn)
3590                 return 0;
3591
3592         return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
3593 }
3594
3595 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3596                               u16 len)
3597 {
3598         struct mgmt_addr_info *addr = data;
3599         struct mgmt_pending_cmd *cmd;
3600         struct hci_conn *conn;
3601         int err;
3602
3603         bt_dev_dbg(hdev, "sock %p", sk);
3604
3605         hci_dev_lock(hdev);
3606
3607         if (!hdev_is_powered(hdev)) {
3608                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3609                                       MGMT_STATUS_NOT_POWERED);
3610                 goto unlock;
3611         }
3612
3613         cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3614         if (!cmd) {
3615                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3616                                       MGMT_STATUS_INVALID_PARAMS);
3617                 goto unlock;
3618         }
3619
3620         conn = cmd->user_data;
3621
3622         if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3623                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3624                                       MGMT_STATUS_INVALID_PARAMS);
3625                 goto unlock;
3626         }
3627
3628         cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3629         mgmt_pending_remove(cmd);
3630
3631         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3632                                 addr, sizeof(*addr));
3633
3634         /* Since user doesn't want to proceed with the connection, abort any
3635          * ongoing pairing and then terminate the link if it was created
3636          * because of the pair device action.
3637          */
3638         if (addr->type == BDADDR_BREDR)
3639                 hci_remove_link_key(hdev, &addr->bdaddr);
3640         else
3641                 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3642                                               le_addr_type(addr->type));
3643
3644         if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3645                 hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
3646                                    NULL);
3647
3648 unlock:
3649         hci_dev_unlock(hdev);
3650         return err;
3651 }
3652
3653 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3654                              struct mgmt_addr_info *addr, u16 mgmt_op,
3655                              u16 hci_op, __le32 passkey)
3656 {
3657         struct mgmt_pending_cmd *cmd;
3658         struct hci_conn *conn;
3659         int err;
3660
3661         hci_dev_lock(hdev);
3662
3663         if (!hdev_is_powered(hdev)) {
3664                 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3665                                         MGMT_STATUS_NOT_POWERED, addr,
3666                                         sizeof(*addr));
3667                 goto done;
3668         }
3669
3670         if (addr->type == BDADDR_BREDR)
3671                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3672         else
3673                 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3674                                                le_addr_type(addr->type));
3675
3676         if (!conn) {
3677                 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3678                                         MGMT_STATUS_NOT_CONNECTED, addr,
3679                                         sizeof(*addr));
3680                 goto done;
3681         }
3682
3683         if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3684                 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3685                 if (!err)
3686                         err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3687                                                 MGMT_STATUS_SUCCESS, addr,
3688                                                 sizeof(*addr));
3689                 else
3690                         err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3691                                                 MGMT_STATUS_FAILED, addr,
3692                                                 sizeof(*addr));
3693
3694                 goto done;
3695         }
3696
3697         cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3698         if (!cmd) {
3699                 err = -ENOMEM;
3700                 goto done;
3701         }
3702
3703         cmd->cmd_complete = addr_cmd_complete;
3704
3705         /* Continue with pairing via HCI */
3706         if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3707                 struct hci_cp_user_passkey_reply cp;
3708
3709                 bacpy(&cp.bdaddr, &addr->bdaddr);
3710                 cp.passkey = passkey;
3711                 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3712         } else
3713                 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3714                                    &addr->bdaddr);
3715
3716         if (err < 0)
3717                 mgmt_pending_remove(cmd);
3718
3719 done:
3720         hci_dev_unlock(hdev);
3721         return err;
3722 }
3723
3724 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3725                               void *data, u16 len)
3726 {
3727         struct mgmt_cp_pin_code_neg_reply *cp = data;
3728
3729         bt_dev_dbg(hdev, "sock %p", sk);
3730
3731         return user_pairing_resp(sk, hdev, &cp->addr,
3732                                 MGMT_OP_PIN_CODE_NEG_REPLY,
3733                                 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3734 }
3735
3736 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3737                               u16 len)
3738 {
3739         struct mgmt_cp_user_confirm_reply *cp = data;
3740
3741         bt_dev_dbg(hdev, "sock %p", sk);
3742
3743         if (len != sizeof(*cp))
3744                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3745                                        MGMT_STATUS_INVALID_PARAMS);
3746
3747         return user_pairing_resp(sk, hdev, &cp->addr,
3748                                  MGMT_OP_USER_CONFIRM_REPLY,
3749                                  HCI_OP_USER_CONFIRM_REPLY, 0);
3750 }
3751
3752 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3753                                   void *data, u16 len)
3754 {
3755         struct mgmt_cp_user_confirm_neg_reply *cp = data;
3756
3757         bt_dev_dbg(hdev, "sock %p", sk);
3758
3759         return user_pairing_resp(sk, hdev, &cp->addr,
3760                                  MGMT_OP_USER_CONFIRM_NEG_REPLY,
3761                                  HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3762 }
3763
3764 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3765                               u16 len)
3766 {
3767         struct mgmt_cp_user_passkey_reply *cp = data;
3768
3769         bt_dev_dbg(hdev, "sock %p", sk);
3770
3771         return user_pairing_resp(sk, hdev, &cp->addr,
3772                                  MGMT_OP_USER_PASSKEY_REPLY,
3773                                  HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3774 }
3775
3776 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3777                                   void *data, u16 len)
3778 {
3779         struct mgmt_cp_user_passkey_neg_reply *cp = data;
3780
3781         bt_dev_dbg(hdev, "sock %p", sk);
3782
3783         return user_pairing_resp(sk, hdev, &cp->addr,
3784                                  MGMT_OP_USER_PASSKEY_NEG_REPLY,
3785                                  HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3786 }
3787
3788 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3789 {
3790         struct adv_info *adv_instance;
3791
3792         adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3793         if (!adv_instance)
3794                 return 0;
3795
3796         /* stop if current instance doesn't need to be changed */
3797         if (!(adv_instance->flags & flags))
3798                 return 0;
3799
3800         cancel_adv_timeout(hdev);
3801
3802         adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3803         if (!adv_instance)
3804                 return 0;
3805
3806         hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3807
3808         return 0;
3809 }
3810
3811 static int name_changed_sync(struct hci_dev *hdev, void *data)
3812 {
3813         return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3814 }
3815
3816 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3817 {
3818         struct mgmt_pending_cmd *cmd = data;
3819         struct mgmt_cp_set_local_name *cp = cmd->param;
3820         u8 status = mgmt_status(err);
3821
3822         bt_dev_dbg(hdev, "err %d", err);
3823
3824         if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3825                 return;
3826
3827         if (status) {
3828                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3829                                 status);
3830         } else {
3831                 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3832                                   cp, sizeof(*cp));
3833
3834                 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3835                         hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3836         }
3837
3838         mgmt_pending_remove(cmd);
3839 }
3840
3841 static int set_name_sync(struct hci_dev *hdev, void *data)
3842 {
3843         if (lmp_bredr_capable(hdev)) {
3844                 hci_update_name_sync(hdev);
3845                 hci_update_eir_sync(hdev);
3846         }
3847
3848         /* The name is stored in the scan response data and so
3849          * no need to update the advertising data here.
3850          */
3851         if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3852                 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3853
3854         return 0;
3855 }
3856
3857 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3858                           u16 len)
3859 {
3860         struct mgmt_cp_set_local_name *cp = data;
3861         struct mgmt_pending_cmd *cmd;
3862         int err;
3863
3864         bt_dev_dbg(hdev, "sock %p", sk);
3865
3866         hci_dev_lock(hdev);
3867
3868         /* If the old values are the same as the new ones just return a
3869          * direct command complete event.
3870          */
3871         if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3872             !memcmp(hdev->short_name, cp->short_name,
3873                     sizeof(hdev->short_name))) {
3874                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3875                                         data, len);
3876                 goto failed;
3877         }
3878
3879         memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3880
3881         if (!hdev_is_powered(hdev)) {
3882                 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3883
3884                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3885                                         data, len);
3886                 if (err < 0)
3887                         goto failed;
3888
3889                 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3890                                          len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3891                 ext_info_changed(hdev, sk);
3892
3893                 goto failed;
3894         }
3895
3896         cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3897         if (!cmd)
3898                 err = -ENOMEM;
3899         else
3900                 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3901                                          set_name_complete);
3902
3903         if (err < 0) {
3904                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3905                                       MGMT_STATUS_FAILED);
3906
3907                 if (cmd)
3908                         mgmt_pending_remove(cmd);
3909
3910                 goto failed;
3911         }
3912
3913         memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3914
3915 failed:
3916         hci_dev_unlock(hdev);
3917         return err;
3918 }
3919
3920 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3921 {
3922         return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3923 }
3924
3925 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3926                           u16 len)
3927 {
3928         struct mgmt_cp_set_appearance *cp = data;
3929         u16 appearance;
3930         int err;
3931
3932         bt_dev_dbg(hdev, "sock %p", sk);
3933
3934         if (!lmp_le_capable(hdev))
3935                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3936                                        MGMT_STATUS_NOT_SUPPORTED);
3937
3938         appearance = le16_to_cpu(cp->appearance);
3939
3940         hci_dev_lock(hdev);
3941
3942         if (hdev->appearance != appearance) {
3943                 hdev->appearance = appearance;
3944
3945                 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3946                         hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3947                                            NULL);
3948
3949                 ext_info_changed(hdev, sk);
3950         }
3951
3952         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3953                                 0);
3954
3955         hci_dev_unlock(hdev);
3956
3957         return err;
3958 }
3959
3960 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3961                                  void *data, u16 len)
3962 {
3963         struct mgmt_rp_get_phy_configuration rp;
3964
3965         bt_dev_dbg(hdev, "sock %p", sk);
3966
3967         hci_dev_lock(hdev);
3968
3969         memset(&rp, 0, sizeof(rp));
3970
3971         rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3972         rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3973         rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3974
3975         hci_dev_unlock(hdev);
3976
3977         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3978                                  &rp, sizeof(rp));
3979 }
3980
3981 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3982 {
3983         struct mgmt_ev_phy_configuration_changed ev;
3984
3985         memset(&ev, 0, sizeof(ev));
3986
3987         ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3988
3989         return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3990                           sizeof(ev), skip);
3991 }
3992
3993 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3994 {
3995         struct mgmt_pending_cmd *cmd = data;
3996         struct sk_buff *skb = cmd->skb;
3997         u8 status = mgmt_status(err);
3998
3999         if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
4000                 return;
4001
4002         if (!status) {
4003                 if (!skb)
4004                         status = MGMT_STATUS_FAILED;
4005                 else if (IS_ERR(skb))
4006                         status = mgmt_status(PTR_ERR(skb));
4007                 else
4008                         status = mgmt_status(skb->data[0]);
4009         }
4010
4011         bt_dev_dbg(hdev, "status %d", status);
4012
4013         if (status) {
4014                 mgmt_cmd_status(cmd->sk, hdev->id,
4015                                 MGMT_OP_SET_PHY_CONFIGURATION, status);
4016         } else {
4017                 mgmt_cmd_complete(cmd->sk, hdev->id,
4018                                   MGMT_OP_SET_PHY_CONFIGURATION, 0,
4019                                   NULL, 0);
4020
4021                 mgmt_phy_configuration_changed(hdev, cmd->sk);
4022         }
4023
4024         if (skb && !IS_ERR(skb))
4025                 kfree_skb(skb);
4026
4027         mgmt_pending_remove(cmd);
4028 }
4029
4030 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4031 {
4032         struct mgmt_pending_cmd *cmd = data;
4033         struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4034         struct hci_cp_le_set_default_phy cp_phy;
4035         u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4036
4037         memset(&cp_phy, 0, sizeof(cp_phy));
4038
4039         if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4040                 cp_phy.all_phys |= 0x01;
4041
4042         if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4043                 cp_phy.all_phys |= 0x02;
4044
4045         if (selected_phys & MGMT_PHY_LE_1M_TX)
4046                 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4047
4048         if (selected_phys & MGMT_PHY_LE_2M_TX)
4049                 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4050
4051         if (selected_phys & MGMT_PHY_LE_CODED_TX)
4052                 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4053
4054         if (selected_phys & MGMT_PHY_LE_1M_RX)
4055                 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4056
4057         if (selected_phys & MGMT_PHY_LE_2M_RX)
4058                 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4059
4060         if (selected_phys & MGMT_PHY_LE_CODED_RX)
4061                 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4062
4063         cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4064                                    sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4065
4066         return 0;
4067 }
4068
4069 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4070                                  void *data, u16 len)
4071 {
4072         struct mgmt_cp_set_phy_configuration *cp = data;
4073         struct mgmt_pending_cmd *cmd;
4074         u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4075         u16 pkt_type = (HCI_DH1 | HCI_DM1);
4076         bool changed = false;
4077         int err;
4078
4079         bt_dev_dbg(hdev, "sock %p", sk);
4080
4081         configurable_phys = get_configurable_phys(hdev);
4082         supported_phys = get_supported_phys(hdev);
4083         selected_phys = __le32_to_cpu(cp->selected_phys);
4084
4085         if (selected_phys & ~supported_phys)
4086                 return mgmt_cmd_status(sk, hdev->id,
4087                                        MGMT_OP_SET_PHY_CONFIGURATION,
4088                                        MGMT_STATUS_INVALID_PARAMS);
4089
4090         unconfigure_phys = supported_phys & ~configurable_phys;
4091
4092         if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4093                 return mgmt_cmd_status(sk, hdev->id,
4094                                        MGMT_OP_SET_PHY_CONFIGURATION,
4095                                        MGMT_STATUS_INVALID_PARAMS);
4096
4097         if (selected_phys == get_selected_phys(hdev))
4098                 return mgmt_cmd_complete(sk, hdev->id,
4099                                          MGMT_OP_SET_PHY_CONFIGURATION,
4100                                          0, NULL, 0);
4101
4102         hci_dev_lock(hdev);
4103
4104         if (!hdev_is_powered(hdev)) {
4105                 err = mgmt_cmd_status(sk, hdev->id,
4106                                       MGMT_OP_SET_PHY_CONFIGURATION,
4107                                       MGMT_STATUS_REJECTED);
4108                 goto unlock;
4109         }
4110
4111         if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4112                 err = mgmt_cmd_status(sk, hdev->id,
4113                                       MGMT_OP_SET_PHY_CONFIGURATION,
4114                                       MGMT_STATUS_BUSY);
4115                 goto unlock;
4116         }
4117
4118         if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4119                 pkt_type |= (HCI_DH3 | HCI_DM3);
4120         else
4121                 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4122
4123         if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4124                 pkt_type |= (HCI_DH5 | HCI_DM5);
4125         else
4126                 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4127
4128         if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4129                 pkt_type &= ~HCI_2DH1;
4130         else
4131                 pkt_type |= HCI_2DH1;
4132
4133         if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4134                 pkt_type &= ~HCI_2DH3;
4135         else
4136                 pkt_type |= HCI_2DH3;
4137
4138         if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4139                 pkt_type &= ~HCI_2DH5;
4140         else
4141                 pkt_type |= HCI_2DH5;
4142
4143         if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4144                 pkt_type &= ~HCI_3DH1;
4145         else
4146                 pkt_type |= HCI_3DH1;
4147
4148         if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4149                 pkt_type &= ~HCI_3DH3;
4150         else
4151                 pkt_type |= HCI_3DH3;
4152
4153         if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4154                 pkt_type &= ~HCI_3DH5;
4155         else
4156                 pkt_type |= HCI_3DH5;
4157
4158         if (pkt_type != hdev->pkt_type) {
4159                 hdev->pkt_type = pkt_type;
4160                 changed = true;
4161         }
4162
4163         if ((selected_phys & MGMT_PHY_LE_MASK) ==
4164             (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4165                 if (changed)
4166                         mgmt_phy_configuration_changed(hdev, sk);
4167
4168                 err = mgmt_cmd_complete(sk, hdev->id,
4169                                         MGMT_OP_SET_PHY_CONFIGURATION,
4170                                         0, NULL, 0);
4171
4172                 goto unlock;
4173         }
4174
4175         cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4176                                len);
4177         if (!cmd)
4178                 err = -ENOMEM;
4179         else
4180                 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4181                                          set_default_phy_complete);
4182
4183         if (err < 0) {
4184                 err = mgmt_cmd_status(sk, hdev->id,
4185                                       MGMT_OP_SET_PHY_CONFIGURATION,
4186                                       MGMT_STATUS_FAILED);
4187
4188                 if (cmd)
4189                         mgmt_pending_remove(cmd);
4190         }
4191
4192 unlock:
4193         hci_dev_unlock(hdev);
4194
4195         return err;
4196 }
4197
4198 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4199                             u16 len)
4200 {
4201         int err = MGMT_STATUS_SUCCESS;
4202         struct mgmt_cp_set_blocked_keys *keys = data;
4203         const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4204                                    sizeof(struct mgmt_blocked_key_info));
4205         u16 key_count, expected_len;
4206         int i;
4207
4208         bt_dev_dbg(hdev, "sock %p", sk);
4209
4210         key_count = __le16_to_cpu(keys->key_count);
4211         if (key_count > max_key_count) {
4212                 bt_dev_err(hdev, "too big key_count value %u", key_count);
4213                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4214                                        MGMT_STATUS_INVALID_PARAMS);
4215         }
4216
4217         expected_len = struct_size(keys, keys, key_count);
4218         if (expected_len != len) {
4219                 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4220                            expected_len, len);
4221                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4222                                        MGMT_STATUS_INVALID_PARAMS);
4223         }
4224
4225         hci_dev_lock(hdev);
4226
4227         hci_blocked_keys_clear(hdev);
4228
4229         for (i = 0; i < key_count; ++i) {
4230                 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4231
4232                 if (!b) {
4233                         err = MGMT_STATUS_NO_RESOURCES;
4234                         break;
4235                 }
4236
4237                 b->type = keys->keys[i].type;
4238                 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4239                 list_add_rcu(&b->list, &hdev->blocked_keys);
4240         }
4241         hci_dev_unlock(hdev);
4242
4243         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4244                                 err, NULL, 0);
4245 }
4246
4247 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4248                                void *data, u16 len)
4249 {
4250         struct mgmt_mode *cp = data;
4251         int err;
4252         bool changed = false;
4253
4254         bt_dev_dbg(hdev, "sock %p", sk);
4255
4256         if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4257                 return mgmt_cmd_status(sk, hdev->id,
4258                                        MGMT_OP_SET_WIDEBAND_SPEECH,
4259                                        MGMT_STATUS_NOT_SUPPORTED);
4260
4261         if (cp->val != 0x00 && cp->val != 0x01)
4262                 return mgmt_cmd_status(sk, hdev->id,
4263                                        MGMT_OP_SET_WIDEBAND_SPEECH,
4264                                        MGMT_STATUS_INVALID_PARAMS);
4265
4266         hci_dev_lock(hdev);
4267
4268         if (hdev_is_powered(hdev) &&
4269             !!cp->val != hci_dev_test_flag(hdev,
4270                                            HCI_WIDEBAND_SPEECH_ENABLED)) {
4271                 err = mgmt_cmd_status(sk, hdev->id,
4272                                       MGMT_OP_SET_WIDEBAND_SPEECH,
4273                                       MGMT_STATUS_REJECTED);
4274                 goto unlock;
4275         }
4276
4277         if (cp->val)
4278                 changed = !hci_dev_test_and_set_flag(hdev,
4279                                                    HCI_WIDEBAND_SPEECH_ENABLED);
4280         else
4281                 changed = hci_dev_test_and_clear_flag(hdev,
4282                                                    HCI_WIDEBAND_SPEECH_ENABLED);
4283
4284         err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4285         if (err < 0)
4286                 goto unlock;
4287
4288         if (changed)
4289                 err = new_settings(hdev, sk);
4290
4291 unlock:
4292         hci_dev_unlock(hdev);
4293         return err;
4294 }
4295
4296 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4297                                void *data, u16 data_len)
4298 {
4299         char buf[20];
4300         struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4301         u16 cap_len = 0;
4302         u8 flags = 0;
4303         u8 tx_power_range[2];
4304
4305         bt_dev_dbg(hdev, "sock %p", sk);
4306
4307         memset(&buf, 0, sizeof(buf));
4308
4309         hci_dev_lock(hdev);
4310
4311         /* When the Read Simple Pairing Options command is supported, then
4312          * the remote public key validation is supported.
4313          *
4314          * Alternatively, when Microsoft extensions are available, they can
4315          * indicate support for public key validation as well.
4316          */
4317         if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4318                 flags |= 0x01;  /* Remote public key validation (BR/EDR) */
4319
4320         flags |= 0x02;          /* Remote public key validation (LE) */
4321
4322         /* When the Read Encryption Key Size command is supported, then the
4323          * encryption key size is enforced.
4324          */
4325         if (hdev->commands[20] & 0x10)
4326                 flags |= 0x04;  /* Encryption key size enforcement (BR/EDR) */
4327
4328         flags |= 0x08;          /* Encryption key size enforcement (LE) */
4329
4330         cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4331                                   &flags, 1);
4332
4333         /* When the Read Simple Pairing Options command is supported, then
4334          * also max encryption key size information is provided.
4335          */
4336         if (hdev->commands[41] & 0x08)
4337                 cap_len = eir_append_le16(rp->cap, cap_len,
4338                                           MGMT_CAP_MAX_ENC_KEY_SIZE,
4339                                           hdev->max_enc_key_size);
4340
4341         cap_len = eir_append_le16(rp->cap, cap_len,
4342                                   MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4343                                   SMP_MAX_ENC_KEY_SIZE);
4344
4345         /* Append the min/max LE tx power parameters if we were able to fetch
4346          * it from the controller
4347          */
4348         if (hdev->commands[38] & 0x80) {
4349                 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4350                 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4351                 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4352                                           tx_power_range, 2);
4353         }
4354
4355         rp->cap_len = cpu_to_le16(cap_len);
4356
4357         hci_dev_unlock(hdev);
4358
4359         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4360                                  rp, sizeof(*rp) + cap_len);
4361 }
4362
4363 #ifdef CONFIG_BT_FEATURE_DEBUG
4364 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4365 static const u8 debug_uuid[16] = {
4366         0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4367         0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4368 };
4369 #endif
4370
4371 /* 330859bc-7506-492d-9370-9a6f0614037f */
4372 static const u8 quality_report_uuid[16] = {
4373         0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4374         0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4375 };
4376
4377 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4378 static const u8 offload_codecs_uuid[16] = {
4379         0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4380         0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4381 };
4382
4383 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4384 static const u8 le_simultaneous_roles_uuid[16] = {
4385         0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4386         0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4387 };
4388
4389 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4390 static const u8 rpa_resolution_uuid[16] = {
4391         0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4392         0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4393 };
4394
4395 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4396 static const u8 iso_socket_uuid[16] = {
4397         0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4398         0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4399 };
4400
4401 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4402 static const u8 mgmt_mesh_uuid[16] = {
4403         0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4404         0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4405 };
4406
4407 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4408                                   void *data, u16 data_len)
4409 {
4410         struct mgmt_rp_read_exp_features_info *rp;
4411         size_t len;
4412         u16 idx = 0;
4413         u32 flags;
4414         int status;
4415
4416         bt_dev_dbg(hdev, "sock %p", sk);
4417
4418         /* Enough space for 7 features */
4419         len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4420         rp = kzalloc(len, GFP_KERNEL);
4421         if (!rp)
4422                 return -ENOMEM;
4423
4424 #ifdef CONFIG_BT_FEATURE_DEBUG
4425         if (!hdev) {
4426                 flags = bt_dbg_get() ? BIT(0) : 0;
4427
4428                 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4429                 rp->features[idx].flags = cpu_to_le32(flags);
4430                 idx++;
4431         }
4432 #endif
4433
4434         if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4435                 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4436                         flags = BIT(0);
4437                 else
4438                         flags = 0;
4439
4440                 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4441                 rp->features[idx].flags = cpu_to_le32(flags);
4442                 idx++;
4443         }
4444
4445         if (hdev && ll_privacy_capable(hdev)) {
4446                 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4447                         flags = BIT(0) | BIT(1);
4448                 else
4449                         flags = BIT(1);
4450
4451                 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4452                 rp->features[idx].flags = cpu_to_le32(flags);
4453                 idx++;
4454         }
4455
4456         if (hdev && (aosp_has_quality_report(hdev) ||
4457                      hdev->set_quality_report)) {
4458                 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4459                         flags = BIT(0);
4460                 else
4461                         flags = 0;
4462
4463                 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4464                 rp->features[idx].flags = cpu_to_le32(flags);
4465                 idx++;
4466         }
4467
4468         if (hdev && hdev->get_data_path_id) {
4469                 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4470                         flags = BIT(0);
4471                 else
4472                         flags = 0;
4473
4474                 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4475                 rp->features[idx].flags = cpu_to_le32(flags);
4476                 idx++;
4477         }
4478
4479         if (IS_ENABLED(CONFIG_BT_LE)) {
4480                 flags = iso_enabled() ? BIT(0) : 0;
4481                 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4482                 rp->features[idx].flags = cpu_to_le32(flags);
4483                 idx++;
4484         }
4485
4486         if (hdev && lmp_le_capable(hdev)) {
4487                 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4488                         flags = BIT(0);
4489                 else
4490                         flags = 0;
4491
4492                 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4493                 rp->features[idx].flags = cpu_to_le32(flags);
4494                 idx++;
4495         }
4496
4497         rp->feature_count = cpu_to_le16(idx);
4498
4499         /* After reading the experimental features information, enable
4500          * the events to update client on any future change.
4501          */
4502         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4503
4504         status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4505                                    MGMT_OP_READ_EXP_FEATURES_INFO,
4506                                    0, rp, sizeof(*rp) + (20 * idx));
4507
4508         kfree(rp);
4509         return status;
4510 }
4511
4512 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4513                                           struct sock *skip)
4514 {
4515         struct mgmt_ev_exp_feature_changed ev;
4516
4517         memset(&ev, 0, sizeof(ev));
4518         memcpy(ev.uuid, rpa_resolution_uuid, 16);
4519         ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4520
4521         // Do we need to be atomic with the conn_flags?
4522         if (enabled && privacy_mode_capable(hdev))
4523                 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4524         else
4525                 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4526
4527         return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4528                                   &ev, sizeof(ev),
4529                                   HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4530
4531 }
4532
4533 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4534                                bool enabled, struct sock *skip)
4535 {
4536         struct mgmt_ev_exp_feature_changed ev;
4537
4538         memset(&ev, 0, sizeof(ev));
4539         memcpy(ev.uuid, uuid, 16);
4540         ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4541
4542         return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4543                                   &ev, sizeof(ev),
4544                                   HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4545 }
4546
4547 #define EXP_FEAT(_uuid, _set_func)      \
4548 {                                       \
4549         .uuid = _uuid,                  \
4550         .set_func = _set_func,          \
4551 }
4552
4553 /* The zero key uuid is special. Multiple exp features are set through it. */
4554 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4555                              struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4556 {
4557         struct mgmt_rp_set_exp_feature rp;
4558
4559         memset(rp.uuid, 0, 16);
4560         rp.flags = cpu_to_le32(0);
4561
4562 #ifdef CONFIG_BT_FEATURE_DEBUG
4563         if (!hdev) {
4564                 bool changed = bt_dbg_get();
4565
4566                 bt_dbg_set(false);
4567
4568                 if (changed)
4569                         exp_feature_changed(NULL, ZERO_KEY, false, sk);
4570         }
4571 #endif
4572
4573         if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4574                 bool changed;
4575
4576                 changed = hci_dev_test_and_clear_flag(hdev,
4577                                                       HCI_ENABLE_LL_PRIVACY);
4578                 if (changed)
4579                         exp_feature_changed(hdev, rpa_resolution_uuid, false,
4580                                             sk);
4581         }
4582
4583         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4584
4585         return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4586                                  MGMT_OP_SET_EXP_FEATURE, 0,
4587                                  &rp, sizeof(rp));
4588 }
4589
4590 #ifdef CONFIG_BT_FEATURE_DEBUG
4591 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4592                           struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4593 {
4594         struct mgmt_rp_set_exp_feature rp;
4595
4596         bool val, changed;
4597         int err;
4598
4599         /* Command requires to use the non-controller index */
4600         if (hdev)
4601                 return mgmt_cmd_status(sk, hdev->id,
4602                                        MGMT_OP_SET_EXP_FEATURE,
4603                                        MGMT_STATUS_INVALID_INDEX);
4604
4605         /* Parameters are limited to a single octet */
4606         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4607                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4608                                        MGMT_OP_SET_EXP_FEATURE,
4609                                        MGMT_STATUS_INVALID_PARAMS);
4610
4611         /* Only boolean on/off is supported */
4612         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4613                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4614                                        MGMT_OP_SET_EXP_FEATURE,
4615                                        MGMT_STATUS_INVALID_PARAMS);
4616
4617         val = !!cp->param[0];
4618         changed = val ? !bt_dbg_get() : bt_dbg_get();
4619         bt_dbg_set(val);
4620
4621         memcpy(rp.uuid, debug_uuid, 16);
4622         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4623
4624         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4625
4626         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4627                                 MGMT_OP_SET_EXP_FEATURE, 0,
4628                                 &rp, sizeof(rp));
4629
4630         if (changed)
4631                 exp_feature_changed(hdev, debug_uuid, val, sk);
4632
4633         return err;
4634 }
4635 #endif
4636
4637 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4638                               struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4639 {
4640         struct mgmt_rp_set_exp_feature rp;
4641         bool val, changed;
4642         int err;
4643
4644         /* Command requires to use the controller index */
4645         if (!hdev)
4646                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4647                                        MGMT_OP_SET_EXP_FEATURE,
4648                                        MGMT_STATUS_INVALID_INDEX);
4649
4650         /* Parameters are limited to a single octet */
4651         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4652                 return mgmt_cmd_status(sk, hdev->id,
4653                                        MGMT_OP_SET_EXP_FEATURE,
4654                                        MGMT_STATUS_INVALID_PARAMS);
4655
4656         /* Only boolean on/off is supported */
4657         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4658                 return mgmt_cmd_status(sk, hdev->id,
4659                                        MGMT_OP_SET_EXP_FEATURE,
4660                                        MGMT_STATUS_INVALID_PARAMS);
4661
4662         val = !!cp->param[0];
4663
4664         if (val) {
4665                 changed = !hci_dev_test_and_set_flag(hdev,
4666                                                      HCI_MESH_EXPERIMENTAL);
4667         } else {
4668                 hci_dev_clear_flag(hdev, HCI_MESH);
4669                 changed = hci_dev_test_and_clear_flag(hdev,
4670                                                       HCI_MESH_EXPERIMENTAL);
4671         }
4672
4673         memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4674         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4675
4676         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4677
4678         err = mgmt_cmd_complete(sk, hdev->id,
4679                                 MGMT_OP_SET_EXP_FEATURE, 0,
4680                                 &rp, sizeof(rp));
4681
4682         if (changed)
4683                 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4684
4685         return err;
4686 }
4687
4688 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4689                                    struct mgmt_cp_set_exp_feature *cp,
4690                                    u16 data_len)
4691 {
4692         struct mgmt_rp_set_exp_feature rp;
4693         bool val, changed;
4694         int err;
4695         u32 flags;
4696
4697         /* Command requires to use the controller index */
4698         if (!hdev)
4699                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4700                                        MGMT_OP_SET_EXP_FEATURE,
4701                                        MGMT_STATUS_INVALID_INDEX);
4702
4703         /* Changes can only be made when controller is powered down */
4704         if (hdev_is_powered(hdev))
4705                 return mgmt_cmd_status(sk, hdev->id,
4706                                        MGMT_OP_SET_EXP_FEATURE,
4707                                        MGMT_STATUS_REJECTED);
4708
4709         /* Parameters are limited to a single octet */
4710         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4711                 return mgmt_cmd_status(sk, hdev->id,
4712                                        MGMT_OP_SET_EXP_FEATURE,
4713                                        MGMT_STATUS_INVALID_PARAMS);
4714
4715         /* Only boolean on/off is supported */
4716         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4717                 return mgmt_cmd_status(sk, hdev->id,
4718                                        MGMT_OP_SET_EXP_FEATURE,
4719                                        MGMT_STATUS_INVALID_PARAMS);
4720
4721         val = !!cp->param[0];
4722
4723         if (val) {
4724                 changed = !hci_dev_test_and_set_flag(hdev,
4725                                                      HCI_ENABLE_LL_PRIVACY);
4726                 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4727
4728                 /* Enable LL privacy + supported settings changed */
4729                 flags = BIT(0) | BIT(1);
4730         } else {
4731                 changed = hci_dev_test_and_clear_flag(hdev,
4732                                                       HCI_ENABLE_LL_PRIVACY);
4733
4734                 /* Disable LL privacy + supported settings changed */
4735                 flags = BIT(1);
4736         }
4737
4738         memcpy(rp.uuid, rpa_resolution_uuid, 16);
4739         rp.flags = cpu_to_le32(flags);
4740
4741         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4742
4743         err = mgmt_cmd_complete(sk, hdev->id,
4744                                 MGMT_OP_SET_EXP_FEATURE, 0,
4745                                 &rp, sizeof(rp));
4746
4747         if (changed)
4748                 exp_ll_privacy_feature_changed(val, hdev, sk);
4749
4750         return err;
4751 }
4752
4753 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4754                                    struct mgmt_cp_set_exp_feature *cp,
4755                                    u16 data_len)
4756 {
4757         struct mgmt_rp_set_exp_feature rp;
4758         bool val, changed;
4759         int err;
4760
4761         /* Command requires to use a valid controller index */
4762         if (!hdev)
4763                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4764                                        MGMT_OP_SET_EXP_FEATURE,
4765                                        MGMT_STATUS_INVALID_INDEX);
4766
4767         /* Parameters are limited to a single octet */
4768         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4769                 return mgmt_cmd_status(sk, hdev->id,
4770                                        MGMT_OP_SET_EXP_FEATURE,
4771                                        MGMT_STATUS_INVALID_PARAMS);
4772
4773         /* Only boolean on/off is supported */
4774         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4775                 return mgmt_cmd_status(sk, hdev->id,
4776                                        MGMT_OP_SET_EXP_FEATURE,
4777                                        MGMT_STATUS_INVALID_PARAMS);
4778
4779         hci_req_sync_lock(hdev);
4780
4781         val = !!cp->param[0];
4782         changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4783
4784         if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4785                 err = mgmt_cmd_status(sk, hdev->id,
4786                                       MGMT_OP_SET_EXP_FEATURE,
4787                                       MGMT_STATUS_NOT_SUPPORTED);
4788                 goto unlock_quality_report;
4789         }
4790
4791         if (changed) {
4792                 if (hdev->set_quality_report)
4793                         err = hdev->set_quality_report(hdev, val);
4794                 else
4795                         err = aosp_set_quality_report(hdev, val);
4796
4797                 if (err) {
4798                         err = mgmt_cmd_status(sk, hdev->id,
4799                                               MGMT_OP_SET_EXP_FEATURE,
4800                                               MGMT_STATUS_FAILED);
4801                         goto unlock_quality_report;
4802                 }
4803
4804                 if (val)
4805                         hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4806                 else
4807                         hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4808         }
4809
4810         bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4811
4812         memcpy(rp.uuid, quality_report_uuid, 16);
4813         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4814         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4815
4816         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4817                                 &rp, sizeof(rp));
4818
4819         if (changed)
4820                 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4821
4822 unlock_quality_report:
4823         hci_req_sync_unlock(hdev);
4824         return err;
4825 }
4826
4827 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4828                                   struct mgmt_cp_set_exp_feature *cp,
4829                                   u16 data_len)
4830 {
4831         bool val, changed;
4832         int err;
4833         struct mgmt_rp_set_exp_feature rp;
4834
4835         /* Command requires to use a valid controller index */
4836         if (!hdev)
4837                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4838                                        MGMT_OP_SET_EXP_FEATURE,
4839                                        MGMT_STATUS_INVALID_INDEX);
4840
4841         /* Parameters are limited to a single octet */
4842         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4843                 return mgmt_cmd_status(sk, hdev->id,
4844                                        MGMT_OP_SET_EXP_FEATURE,
4845                                        MGMT_STATUS_INVALID_PARAMS);
4846
4847         /* Only boolean on/off is supported */
4848         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4849                 return mgmt_cmd_status(sk, hdev->id,
4850                                        MGMT_OP_SET_EXP_FEATURE,
4851                                        MGMT_STATUS_INVALID_PARAMS);
4852
4853         val = !!cp->param[0];
4854         changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4855
4856         if (!hdev->get_data_path_id) {
4857                 return mgmt_cmd_status(sk, hdev->id,
4858                                        MGMT_OP_SET_EXP_FEATURE,
4859                                        MGMT_STATUS_NOT_SUPPORTED);
4860         }
4861
4862         if (changed) {
4863                 if (val)
4864                         hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4865                 else
4866                         hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4867         }
4868
4869         bt_dev_info(hdev, "offload codecs enable %d changed %d",
4870                     val, changed);
4871
4872         memcpy(rp.uuid, offload_codecs_uuid, 16);
4873         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4874         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4875         err = mgmt_cmd_complete(sk, hdev->id,
4876                                 MGMT_OP_SET_EXP_FEATURE, 0,
4877                                 &rp, sizeof(rp));
4878
4879         if (changed)
4880                 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4881
4882         return err;
4883 }
4884
4885 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4886                                           struct mgmt_cp_set_exp_feature *cp,
4887                                           u16 data_len)
4888 {
4889         bool val, changed;
4890         int err;
4891         struct mgmt_rp_set_exp_feature rp;
4892
4893         /* Command requires to use a valid controller index */
4894         if (!hdev)
4895                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4896                                        MGMT_OP_SET_EXP_FEATURE,
4897                                        MGMT_STATUS_INVALID_INDEX);
4898
4899         /* Parameters are limited to a single octet */
4900         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4901                 return mgmt_cmd_status(sk, hdev->id,
4902                                        MGMT_OP_SET_EXP_FEATURE,
4903                                        MGMT_STATUS_INVALID_PARAMS);
4904
4905         /* Only boolean on/off is supported */
4906         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4907                 return mgmt_cmd_status(sk, hdev->id,
4908                                        MGMT_OP_SET_EXP_FEATURE,
4909                                        MGMT_STATUS_INVALID_PARAMS);
4910
4911         val = !!cp->param[0];
4912         changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4913
4914         if (!hci_dev_le_state_simultaneous(hdev)) {
4915                 return mgmt_cmd_status(sk, hdev->id,
4916                                        MGMT_OP_SET_EXP_FEATURE,
4917                                        MGMT_STATUS_NOT_SUPPORTED);
4918         }
4919
4920         if (changed) {
4921                 if (val)
4922                         hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4923                 else
4924                         hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4925         }
4926
4927         bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4928                     val, changed);
4929
4930         memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4931         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4932         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4933         err = mgmt_cmd_complete(sk, hdev->id,
4934                                 MGMT_OP_SET_EXP_FEATURE, 0,
4935                                 &rp, sizeof(rp));
4936
4937         if (changed)
4938                 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4939
4940         return err;
4941 }
4942
4943 #ifdef CONFIG_BT_LE
4944 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4945                                struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4946 {
4947         struct mgmt_rp_set_exp_feature rp;
4948         bool val, changed = false;
4949         int err;
4950
4951         /* Command requires to use the non-controller index */
4952         if (hdev)
4953                 return mgmt_cmd_status(sk, hdev->id,
4954                                        MGMT_OP_SET_EXP_FEATURE,
4955                                        MGMT_STATUS_INVALID_INDEX);
4956
4957         /* Parameters are limited to a single octet */
4958         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4959                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4960                                        MGMT_OP_SET_EXP_FEATURE,
4961                                        MGMT_STATUS_INVALID_PARAMS);
4962
4963         /* Only boolean on/off is supported */
4964         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4965                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4966                                        MGMT_OP_SET_EXP_FEATURE,
4967                                        MGMT_STATUS_INVALID_PARAMS);
4968
4969         val = cp->param[0] ? true : false;
4970         if (val)
4971                 err = iso_init();
4972         else
4973                 err = iso_exit();
4974
4975         if (!err)
4976                 changed = true;
4977
4978         memcpy(rp.uuid, iso_socket_uuid, 16);
4979         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4980
4981         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4982
4983         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4984                                 MGMT_OP_SET_EXP_FEATURE, 0,
4985                                 &rp, sizeof(rp));
4986
4987         if (changed)
4988                 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4989
4990         return err;
4991 }
4992 #endif
4993
4994 static const struct mgmt_exp_feature {
4995         const u8 *uuid;
4996         int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4997                         struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4998 } exp_features[] = {
4999         EXP_FEAT(ZERO_KEY, set_zero_key_func),
5000 #ifdef CONFIG_BT_FEATURE_DEBUG
5001         EXP_FEAT(debug_uuid, set_debug_func),
5002 #endif
5003         EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
5004         EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
5005         EXP_FEAT(quality_report_uuid, set_quality_report_func),
5006         EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5007         EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5008 #ifdef CONFIG_BT_LE
5009         EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5010 #endif
5011
5012         /* end with a null feature */
5013         EXP_FEAT(NULL, NULL)
5014 };
5015
5016 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5017                            void *data, u16 data_len)
5018 {
5019         struct mgmt_cp_set_exp_feature *cp = data;
5020         size_t i = 0;
5021
5022         bt_dev_dbg(hdev, "sock %p", sk);
5023
5024         for (i = 0; exp_features[i].uuid; i++) {
5025                 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5026                         return exp_features[i].set_func(sk, hdev, cp, data_len);
5027         }
5028
5029         return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5030                                MGMT_OP_SET_EXP_FEATURE,
5031                                MGMT_STATUS_NOT_SUPPORTED);
5032 }
5033
5034 static u32 get_params_flags(struct hci_dev *hdev,
5035                             struct hci_conn_params *params)
5036 {
5037         u32 flags = hdev->conn_flags;
5038
5039         /* Devices using RPAs can only be programmed in the acceptlist if
5040          * LL Privacy has been enable otherwise they cannot mark
5041          * HCI_CONN_FLAG_REMOTE_WAKEUP.
5042          */
5043         if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5044             hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
5045                 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5046
5047         return flags;
5048 }
5049
5050 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5051                             u16 data_len)
5052 {
5053         struct mgmt_cp_get_device_flags *cp = data;
5054         struct mgmt_rp_get_device_flags rp;
5055         struct bdaddr_list_with_flags *br_params;
5056         struct hci_conn_params *params;
5057         u32 supported_flags;
5058         u32 current_flags = 0;
5059         u8 status = MGMT_STATUS_INVALID_PARAMS;
5060
5061         bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5062                    &cp->addr.bdaddr, cp->addr.type);
5063
5064         hci_dev_lock(hdev);
5065
5066         supported_flags = hdev->conn_flags;
5067
5068         memset(&rp, 0, sizeof(rp));
5069
5070         if (cp->addr.type == BDADDR_BREDR) {
5071                 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5072                                                               &cp->addr.bdaddr,
5073                                                               cp->addr.type);
5074                 if (!br_params)
5075                         goto done;
5076
5077                 current_flags = br_params->flags;
5078         } else {
5079                 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5080                                                 le_addr_type(cp->addr.type));
5081                 if (!params)
5082                         goto done;
5083
5084                 supported_flags = get_params_flags(hdev, params);
5085                 current_flags = params->flags;
5086         }
5087
5088         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5089         rp.addr.type = cp->addr.type;
5090         rp.supported_flags = cpu_to_le32(supported_flags);
5091         rp.current_flags = cpu_to_le32(current_flags);
5092
5093         status = MGMT_STATUS_SUCCESS;
5094
5095 done:
5096         hci_dev_unlock(hdev);
5097
5098         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5099                                 &rp, sizeof(rp));
5100 }
5101
5102 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5103                                  bdaddr_t *bdaddr, u8 bdaddr_type,
5104                                  u32 supported_flags, u32 current_flags)
5105 {
5106         struct mgmt_ev_device_flags_changed ev;
5107
5108         bacpy(&ev.addr.bdaddr, bdaddr);
5109         ev.addr.type = bdaddr_type;
5110         ev.supported_flags = cpu_to_le32(supported_flags);
5111         ev.current_flags = cpu_to_le32(current_flags);
5112
5113         mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5114 }
5115
5116 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5117                             u16 len)
5118 {
5119         struct mgmt_cp_set_device_flags *cp = data;
5120         struct bdaddr_list_with_flags *br_params;
5121         struct hci_conn_params *params;
5122         u8 status = MGMT_STATUS_INVALID_PARAMS;
5123         u32 supported_flags;
5124         u32 current_flags = __le32_to_cpu(cp->current_flags);
5125
5126         bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5127                    &cp->addr.bdaddr, cp->addr.type, current_flags);
5128
5129         // We should take hci_dev_lock() early, I think.. conn_flags can change
5130         supported_flags = hdev->conn_flags;
5131
5132         if ((supported_flags | current_flags) != supported_flags) {
5133                 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5134                             current_flags, supported_flags);
5135                 goto done;
5136         }
5137
5138         hci_dev_lock(hdev);
5139
5140         if (cp->addr.type == BDADDR_BREDR) {
5141                 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5142                                                               &cp->addr.bdaddr,
5143                                                               cp->addr.type);
5144
5145                 if (br_params) {
5146                         br_params->flags = current_flags;
5147                         status = MGMT_STATUS_SUCCESS;
5148                 } else {
5149                         bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5150                                     &cp->addr.bdaddr, cp->addr.type);
5151                 }
5152
5153                 goto unlock;
5154         }
5155
5156         params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5157                                         le_addr_type(cp->addr.type));
5158         if (!params) {
5159                 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5160                             &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5161                 goto unlock;
5162         }
5163
5164         supported_flags = get_params_flags(hdev, params);
5165
5166         if ((supported_flags | current_flags) != supported_flags) {
5167                 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5168                             current_flags, supported_flags);
5169                 goto unlock;
5170         }
5171
5172         params->flags = current_flags;
5173         status = MGMT_STATUS_SUCCESS;
5174
5175         /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5176          * has been set.
5177          */
5178         if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5179                 hci_update_passive_scan(hdev);
5180
5181 unlock:
5182         hci_dev_unlock(hdev);
5183
5184 done:
5185         if (status == MGMT_STATUS_SUCCESS)
5186                 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5187                                      supported_flags, current_flags);
5188
5189         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5190                                  &cp->addr, sizeof(cp->addr));
5191 }
5192
5193 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5194                                    u16 handle)
5195 {
5196         struct mgmt_ev_adv_monitor_added ev;
5197
5198         ev.monitor_handle = cpu_to_le16(handle);
5199
5200         mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5201 }
5202
5203 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5204 {
5205         struct mgmt_ev_adv_monitor_removed ev;
5206         struct mgmt_pending_cmd *cmd;
5207         struct sock *sk_skip = NULL;
5208         struct mgmt_cp_remove_adv_monitor *cp;
5209
5210         cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5211         if (cmd) {
5212                 cp = cmd->param;
5213
5214                 if (cp->monitor_handle)
5215                         sk_skip = cmd->sk;
5216         }
5217
5218         ev.monitor_handle = cpu_to_le16(handle);
5219
5220         mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5221 }
5222
5223 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5224                                  void *data, u16 len)
5225 {
5226         struct adv_monitor *monitor = NULL;
5227         struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5228         int handle, err;
5229         size_t rp_size = 0;
5230         __u32 supported = 0;
5231         __u32 enabled = 0;
5232         __u16 num_handles = 0;
5233         __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5234
5235         BT_DBG("request for %s", hdev->name);
5236
5237         hci_dev_lock(hdev);
5238
5239         if (msft_monitor_supported(hdev))
5240                 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5241
5242         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5243                 handles[num_handles++] = monitor->handle;
5244
5245         hci_dev_unlock(hdev);
5246
5247         rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5248         rp = kmalloc(rp_size, GFP_KERNEL);
5249         if (!rp)
5250                 return -ENOMEM;
5251
5252         /* All supported features are currently enabled */
5253         enabled = supported;
5254
5255         rp->supported_features = cpu_to_le32(supported);
5256         rp->enabled_features = cpu_to_le32(enabled);
5257         rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5258         rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5259         rp->num_handles = cpu_to_le16(num_handles);
5260         if (num_handles)
5261                 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5262
5263         err = mgmt_cmd_complete(sk, hdev->id,
5264                                 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5265                                 MGMT_STATUS_SUCCESS, rp, rp_size);
5266
5267         kfree(rp);
5268
5269         return err;
5270 }
5271
5272 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5273                                                    void *data, int status)
5274 {
5275         struct mgmt_rp_add_adv_patterns_monitor rp;
5276         struct mgmt_pending_cmd *cmd = data;
5277         struct adv_monitor *monitor = cmd->user_data;
5278
5279         hci_dev_lock(hdev);
5280
5281         rp.monitor_handle = cpu_to_le16(monitor->handle);
5282
5283         if (!status) {
5284                 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5285                 hdev->adv_monitors_cnt++;
5286                 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5287                         monitor->state = ADV_MONITOR_STATE_REGISTERED;
5288                 hci_update_passive_scan(hdev);
5289         }
5290
5291         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5292                           mgmt_status(status), &rp, sizeof(rp));
5293         mgmt_pending_remove(cmd);
5294
5295         hci_dev_unlock(hdev);
5296         bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5297                    rp.monitor_handle, status);
5298 }
5299
5300 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5301 {
5302         struct mgmt_pending_cmd *cmd = data;
5303         struct adv_monitor *monitor = cmd->user_data;
5304
5305         return hci_add_adv_monitor(hdev, monitor);
5306 }
5307
5308 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5309                                       struct adv_monitor *m, u8 status,
5310                                       void *data, u16 len, u16 op)
5311 {
5312         struct mgmt_pending_cmd *cmd;
5313         int err;
5314
5315         hci_dev_lock(hdev);
5316
5317         if (status)
5318                 goto unlock;
5319
5320         if (pending_find(MGMT_OP_SET_LE, hdev) ||
5321             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5322             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5323             pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5324                 status = MGMT_STATUS_BUSY;
5325                 goto unlock;
5326         }
5327
5328         cmd = mgmt_pending_add(sk, op, hdev, data, len);
5329         if (!cmd) {
5330                 status = MGMT_STATUS_NO_RESOURCES;
5331                 goto unlock;
5332         }
5333
5334         cmd->user_data = m;
5335         err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5336                                  mgmt_add_adv_patterns_monitor_complete);
5337         if (err) {
5338                 if (err == -ENOMEM)
5339                         status = MGMT_STATUS_NO_RESOURCES;
5340                 else
5341                         status = MGMT_STATUS_FAILED;
5342
5343                 goto unlock;
5344         }
5345
5346         hci_dev_unlock(hdev);
5347
5348         return 0;
5349
5350 unlock:
5351         hci_free_adv_monitor(hdev, m);
5352         hci_dev_unlock(hdev);
5353         return mgmt_cmd_status(sk, hdev->id, op, status);
5354 }
5355
5356 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5357                                    struct mgmt_adv_rssi_thresholds *rssi)
5358 {
5359         if (rssi) {
5360                 m->rssi.low_threshold = rssi->low_threshold;
5361                 m->rssi.low_threshold_timeout =
5362                     __le16_to_cpu(rssi->low_threshold_timeout);
5363                 m->rssi.high_threshold = rssi->high_threshold;
5364                 m->rssi.high_threshold_timeout =
5365                     __le16_to_cpu(rssi->high_threshold_timeout);
5366                 m->rssi.sampling_period = rssi->sampling_period;
5367         } else {
5368                 /* Default values. These numbers are the least constricting
5369                  * parameters for MSFT API to work, so it behaves as if there
5370                  * are no rssi parameter to consider. May need to be changed
5371                  * if other API are to be supported.
5372                  */
5373                 m->rssi.low_threshold = -127;
5374                 m->rssi.low_threshold_timeout = 60;
5375                 m->rssi.high_threshold = -127;
5376                 m->rssi.high_threshold_timeout = 0;
5377                 m->rssi.sampling_period = 0;
5378         }
5379 }
5380
5381 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5382                                     struct mgmt_adv_pattern *patterns)
5383 {
5384         u8 offset = 0, length = 0;
5385         struct adv_pattern *p = NULL;
5386         int i;
5387
5388         for (i = 0; i < pattern_count; i++) {
5389                 offset = patterns[i].offset;
5390                 length = patterns[i].length;
5391                 if (offset >= HCI_MAX_AD_LENGTH ||
5392                     length > HCI_MAX_AD_LENGTH ||
5393                     (offset + length) > HCI_MAX_AD_LENGTH)
5394                         return MGMT_STATUS_INVALID_PARAMS;
5395
5396                 p = kmalloc(sizeof(*p), GFP_KERNEL);
5397                 if (!p)
5398                         return MGMT_STATUS_NO_RESOURCES;
5399
5400                 p->ad_type = patterns[i].ad_type;
5401                 p->offset = patterns[i].offset;
5402                 p->length = patterns[i].length;
5403                 memcpy(p->value, patterns[i].value, p->length);
5404
5405                 INIT_LIST_HEAD(&p->list);
5406                 list_add(&p->list, &m->patterns);
5407         }
5408
5409         return MGMT_STATUS_SUCCESS;
5410 }
5411
5412 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5413                                     void *data, u16 len)
5414 {
5415         struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5416         struct adv_monitor *m = NULL;
5417         u8 status = MGMT_STATUS_SUCCESS;
5418         size_t expected_size = sizeof(*cp);
5419
5420         BT_DBG("request for %s", hdev->name);
5421
5422         if (len <= sizeof(*cp)) {
5423                 status = MGMT_STATUS_INVALID_PARAMS;
5424                 goto done;
5425         }
5426
5427         expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5428         if (len != expected_size) {
5429                 status = MGMT_STATUS_INVALID_PARAMS;
5430                 goto done;
5431         }
5432
5433         m = kzalloc(sizeof(*m), GFP_KERNEL);
5434         if (!m) {
5435                 status = MGMT_STATUS_NO_RESOURCES;
5436                 goto done;
5437         }
5438
5439         INIT_LIST_HEAD(&m->patterns);
5440
5441         parse_adv_monitor_rssi(m, NULL);
5442         status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5443
5444 done:
5445         return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5446                                           MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5447 }
5448
5449 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5450                                          void *data, u16 len)
5451 {
5452         struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5453         struct adv_monitor *m = NULL;
5454         u8 status = MGMT_STATUS_SUCCESS;
5455         size_t expected_size = sizeof(*cp);
5456
5457         BT_DBG("request for %s", hdev->name);
5458
5459         if (len <= sizeof(*cp)) {
5460                 status = MGMT_STATUS_INVALID_PARAMS;
5461                 goto done;
5462         }
5463
5464         expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5465         if (len != expected_size) {
5466                 status = MGMT_STATUS_INVALID_PARAMS;
5467                 goto done;
5468         }
5469
5470         m = kzalloc(sizeof(*m), GFP_KERNEL);
5471         if (!m) {
5472                 status = MGMT_STATUS_NO_RESOURCES;
5473                 goto done;
5474         }
5475
5476         INIT_LIST_HEAD(&m->patterns);
5477
5478         parse_adv_monitor_rssi(m, &cp->rssi);
5479         status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5480
5481 done:
5482         return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5483                                          MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5484 }
5485
5486 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5487                                              void *data, int status)
5488 {
5489         struct mgmt_rp_remove_adv_monitor rp;
5490         struct mgmt_pending_cmd *cmd = data;
5491         struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5492
5493         hci_dev_lock(hdev);
5494
5495         rp.monitor_handle = cp->monitor_handle;
5496
5497         if (!status)
5498                 hci_update_passive_scan(hdev);
5499
5500         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5501                           mgmt_status(status), &rp, sizeof(rp));
5502         mgmt_pending_remove(cmd);
5503
5504         hci_dev_unlock(hdev);
5505         bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5506                    rp.monitor_handle, status);
5507 }
5508
5509 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5510 {
5511         struct mgmt_pending_cmd *cmd = data;
5512         struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5513         u16 handle = __le16_to_cpu(cp->monitor_handle);
5514
5515         if (!handle)
5516                 return hci_remove_all_adv_monitor(hdev);
5517
5518         return hci_remove_single_adv_monitor(hdev, handle);
5519 }
5520
5521 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5522                               void *data, u16 len)
5523 {
5524         struct mgmt_pending_cmd *cmd;
5525         int err, status;
5526
5527         hci_dev_lock(hdev);
5528
5529         if (pending_find(MGMT_OP_SET_LE, hdev) ||
5530             pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5531             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5532             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5533                 status = MGMT_STATUS_BUSY;
5534                 goto unlock;
5535         }
5536
5537         cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5538         if (!cmd) {
5539                 status = MGMT_STATUS_NO_RESOURCES;
5540                 goto unlock;
5541         }
5542
5543         err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5544                                  mgmt_remove_adv_monitor_complete);
5545
5546         if (err) {
5547                 mgmt_pending_remove(cmd);
5548
5549                 if (err == -ENOMEM)
5550                         status = MGMT_STATUS_NO_RESOURCES;
5551                 else
5552                         status = MGMT_STATUS_FAILED;
5553
5554                 goto unlock;
5555         }
5556
5557         hci_dev_unlock(hdev);
5558
5559         return 0;
5560
5561 unlock:
5562         hci_dev_unlock(hdev);
5563         return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5564                                status);
5565 }
5566
5567 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5568 {
5569         struct mgmt_rp_read_local_oob_data mgmt_rp;
5570         size_t rp_size = sizeof(mgmt_rp);
5571         struct mgmt_pending_cmd *cmd = data;
5572         struct sk_buff *skb = cmd->skb;
5573         u8 status = mgmt_status(err);
5574
5575         if (!status) {
5576                 if (!skb)
5577                         status = MGMT_STATUS_FAILED;
5578                 else if (IS_ERR(skb))
5579                         status = mgmt_status(PTR_ERR(skb));
5580                 else
5581                         status = mgmt_status(skb->data[0]);
5582         }
5583
5584         bt_dev_dbg(hdev, "status %d", status);
5585
5586         if (status) {
5587                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5588                 goto remove;
5589         }
5590
5591         memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5592
5593         if (!bredr_sc_enabled(hdev)) {
5594                 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5595
5596                 if (skb->len < sizeof(*rp)) {
5597                         mgmt_cmd_status(cmd->sk, hdev->id,
5598                                         MGMT_OP_READ_LOCAL_OOB_DATA,
5599                                         MGMT_STATUS_FAILED);
5600                         goto remove;
5601                 }
5602
5603                 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5604                 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5605
5606                 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5607         } else {
5608                 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5609
5610                 if (skb->len < sizeof(*rp)) {
5611                         mgmt_cmd_status(cmd->sk, hdev->id,
5612                                         MGMT_OP_READ_LOCAL_OOB_DATA,
5613                                         MGMT_STATUS_FAILED);
5614                         goto remove;
5615                 }
5616
5617                 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5618                 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5619
5620                 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5621                 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5622         }
5623
5624         mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5625                           MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5626
5627 remove:
5628         if (skb && !IS_ERR(skb))
5629                 kfree_skb(skb);
5630
5631         mgmt_pending_free(cmd);
5632 }
5633
5634 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5635 {
5636         struct mgmt_pending_cmd *cmd = data;
5637
5638         if (bredr_sc_enabled(hdev))
5639                 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5640         else
5641                 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5642
5643         if (IS_ERR(cmd->skb))
5644                 return PTR_ERR(cmd->skb);
5645         else
5646                 return 0;
5647 }
5648
5649 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5650                                void *data, u16 data_len)
5651 {
5652         struct mgmt_pending_cmd *cmd;
5653         int err;
5654
5655         bt_dev_dbg(hdev, "sock %p", sk);
5656
5657         hci_dev_lock(hdev);
5658
5659         if (!hdev_is_powered(hdev)) {
5660                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5661                                       MGMT_STATUS_NOT_POWERED);
5662                 goto unlock;
5663         }
5664
5665         if (!lmp_ssp_capable(hdev)) {
5666                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5667                                       MGMT_STATUS_NOT_SUPPORTED);
5668                 goto unlock;
5669         }
5670
5671         cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5672         if (!cmd)
5673                 err = -ENOMEM;
5674         else
5675                 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5676                                          read_local_oob_data_complete);
5677
5678         if (err < 0) {
5679                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5680                                       MGMT_STATUS_FAILED);
5681
5682                 if (cmd)
5683                         mgmt_pending_free(cmd);
5684         }
5685
5686 unlock:
5687         hci_dev_unlock(hdev);
5688         return err;
5689 }
5690
5691 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5692                                void *data, u16 len)
5693 {
5694         struct mgmt_addr_info *addr = data;
5695         int err;
5696
5697         bt_dev_dbg(hdev, "sock %p", sk);
5698
5699         if (!bdaddr_type_is_valid(addr->type))
5700                 return mgmt_cmd_complete(sk, hdev->id,
5701                                          MGMT_OP_ADD_REMOTE_OOB_DATA,
5702                                          MGMT_STATUS_INVALID_PARAMS,
5703                                          addr, sizeof(*addr));
5704
5705         hci_dev_lock(hdev);
5706
5707         if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5708                 struct mgmt_cp_add_remote_oob_data *cp = data;
5709                 u8 status;
5710
5711                 if (cp->addr.type != BDADDR_BREDR) {
5712                         err = mgmt_cmd_complete(sk, hdev->id,
5713                                                 MGMT_OP_ADD_REMOTE_OOB_DATA,
5714                                                 MGMT_STATUS_INVALID_PARAMS,
5715                                                 &cp->addr, sizeof(cp->addr));
5716                         goto unlock;
5717                 }
5718
5719                 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5720                                               cp->addr.type, cp->hash,
5721                                               cp->rand, NULL, NULL);
5722                 if (err < 0)
5723                         status = MGMT_STATUS_FAILED;
5724                 else
5725                         status = MGMT_STATUS_SUCCESS;
5726
5727                 err = mgmt_cmd_complete(sk, hdev->id,
5728                                         MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5729                                         &cp->addr, sizeof(cp->addr));
5730         } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5731                 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5732                 u8 *rand192, *hash192, *rand256, *hash256;
5733                 u8 status;
5734
5735                 if (bdaddr_type_is_le(cp->addr.type)) {
5736                         /* Enforce zero-valued 192-bit parameters as
5737                          * long as legacy SMP OOB isn't implemented.
5738                          */
5739                         if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5740                             memcmp(cp->hash192, ZERO_KEY, 16)) {
5741                                 err = mgmt_cmd_complete(sk, hdev->id,
5742                                                         MGMT_OP_ADD_REMOTE_OOB_DATA,
5743                                                         MGMT_STATUS_INVALID_PARAMS,
5744                                                         addr, sizeof(*addr));
5745                                 goto unlock;
5746                         }
5747
5748                         rand192 = NULL;
5749                         hash192 = NULL;
5750                 } else {
5751                         /* In case one of the P-192 values is set to zero,
5752                          * then just disable OOB data for P-192.
5753                          */
5754                         if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5755                             !memcmp(cp->hash192, ZERO_KEY, 16)) {
5756                                 rand192 = NULL;
5757                                 hash192 = NULL;
5758                         } else {
5759                                 rand192 = cp->rand192;
5760                                 hash192 = cp->hash192;
5761                         }
5762                 }
5763
5764                 /* In case one of the P-256 values is set to zero, then just
5765                  * disable OOB data for P-256.
5766                  */
5767                 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5768                     !memcmp(cp->hash256, ZERO_KEY, 16)) {
5769                         rand256 = NULL;
5770                         hash256 = NULL;
5771                 } else {
5772                         rand256 = cp->rand256;
5773                         hash256 = cp->hash256;
5774                 }
5775
5776                 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5777                                               cp->addr.type, hash192, rand192,
5778                                               hash256, rand256);
5779                 if (err < 0)
5780                         status = MGMT_STATUS_FAILED;
5781                 else
5782                         status = MGMT_STATUS_SUCCESS;
5783
5784                 err = mgmt_cmd_complete(sk, hdev->id,
5785                                         MGMT_OP_ADD_REMOTE_OOB_DATA,
5786                                         status, &cp->addr, sizeof(cp->addr));
5787         } else {
5788                 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5789                            len);
5790                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5791                                       MGMT_STATUS_INVALID_PARAMS);
5792         }
5793
5794 unlock:
5795         hci_dev_unlock(hdev);
5796         return err;
5797 }
5798
5799 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5800                                   void *data, u16 len)
5801 {
5802         struct mgmt_cp_remove_remote_oob_data *cp = data;
5803         u8 status;
5804         int err;
5805
5806         bt_dev_dbg(hdev, "sock %p", sk);
5807
5808         if (cp->addr.type != BDADDR_BREDR)
5809                 return mgmt_cmd_complete(sk, hdev->id,
5810                                          MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5811                                          MGMT_STATUS_INVALID_PARAMS,
5812                                          &cp->addr, sizeof(cp->addr));
5813
5814         hci_dev_lock(hdev);
5815
5816         if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5817                 hci_remote_oob_data_clear(hdev);
5818                 status = MGMT_STATUS_SUCCESS;
5819                 goto done;
5820         }
5821
5822         err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5823         if (err < 0)
5824                 status = MGMT_STATUS_INVALID_PARAMS;
5825         else
5826                 status = MGMT_STATUS_SUCCESS;
5827
5828 done:
5829         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5830                                 status, &cp->addr, sizeof(cp->addr));
5831
5832         hci_dev_unlock(hdev);
5833         return err;
5834 }
5835
5836 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5837 {
5838         struct mgmt_pending_cmd *cmd;
5839
5840         bt_dev_dbg(hdev, "status %u", status);
5841
5842         hci_dev_lock(hdev);
5843
5844         cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5845         if (!cmd)
5846                 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5847
5848         if (!cmd)
5849                 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5850
5851         if (cmd) {
5852                 cmd->cmd_complete(cmd, mgmt_status(status));
5853                 mgmt_pending_remove(cmd);
5854         }
5855
5856         hci_dev_unlock(hdev);
5857 }
5858
5859 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5860                                     uint8_t *mgmt_status)
5861 {
5862         switch (type) {
5863         case DISCOV_TYPE_LE:
5864                 *mgmt_status = mgmt_le_support(hdev);
5865                 if (*mgmt_status)
5866                         return false;
5867                 break;
5868         case DISCOV_TYPE_INTERLEAVED:
5869                 *mgmt_status = mgmt_le_support(hdev);
5870                 if (*mgmt_status)
5871                         return false;
5872                 fallthrough;
5873         case DISCOV_TYPE_BREDR:
5874                 *mgmt_status = mgmt_bredr_support(hdev);
5875                 if (*mgmt_status)
5876                         return false;
5877                 break;
5878         default:
5879                 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5880                 return false;
5881         }
5882
5883         return true;
5884 }
5885
5886 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5887 {
5888         struct mgmt_pending_cmd *cmd = data;
5889
5890         if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5891             cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5892             cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5893                 return;
5894
5895         bt_dev_dbg(hdev, "err %d", err);
5896
5897         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5898                           cmd->param, 1);
5899         mgmt_pending_remove(cmd);
5900
5901         hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5902                                 DISCOVERY_FINDING);
5903 }
5904
5905 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5906 {
5907         return hci_start_discovery_sync(hdev);
5908 }
5909
5910 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5911                                     u16 op, void *data, u16 len)
5912 {
5913         struct mgmt_cp_start_discovery *cp = data;
5914         struct mgmt_pending_cmd *cmd;
5915         u8 status;
5916         int err;
5917
5918         bt_dev_dbg(hdev, "sock %p", sk);
5919
5920         hci_dev_lock(hdev);
5921
5922         if (!hdev_is_powered(hdev)) {
5923                 err = mgmt_cmd_complete(sk, hdev->id, op,
5924                                         MGMT_STATUS_NOT_POWERED,
5925                                         &cp->type, sizeof(cp->type));
5926                 goto failed;
5927         }
5928
5929         if (hdev->discovery.state != DISCOVERY_STOPPED ||
5930             hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5931                 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5932                                         &cp->type, sizeof(cp->type));
5933                 goto failed;
5934         }
5935
5936         if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5937                 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5938                                         &cp->type, sizeof(cp->type));
5939                 goto failed;
5940         }
5941
5942         /* Can't start discovery when it is paused */
5943         if (hdev->discovery_paused) {
5944                 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5945                                         &cp->type, sizeof(cp->type));
5946                 goto failed;
5947         }
5948
5949         /* Clear the discovery filter first to free any previously
5950          * allocated memory for the UUID list.
5951          */
5952         hci_discovery_filter_clear(hdev);
5953
5954         hdev->discovery.type = cp->type;
5955         hdev->discovery.report_invalid_rssi = false;
5956         if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5957                 hdev->discovery.limited = true;
5958         else
5959                 hdev->discovery.limited = false;
5960
5961         cmd = mgmt_pending_add(sk, op, hdev, data, len);
5962         if (!cmd) {
5963                 err = -ENOMEM;
5964                 goto failed;
5965         }
5966
5967         err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5968                                  start_discovery_complete);
5969         if (err < 0) {
5970                 mgmt_pending_remove(cmd);
5971                 goto failed;
5972         }
5973
5974         hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5975
5976 failed:
5977         hci_dev_unlock(hdev);
5978         return err;
5979 }
5980
5981 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5982                            void *data, u16 len)
5983 {
5984         return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5985                                         data, len);
5986 }
5987
5988 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5989                                    void *data, u16 len)
5990 {
5991         return start_discovery_internal(sk, hdev,
5992                                         MGMT_OP_START_LIMITED_DISCOVERY,
5993                                         data, len);
5994 }
5995
5996 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5997                                    void *data, u16 len)
5998 {
5999         struct mgmt_cp_start_service_discovery *cp = data;
6000         struct mgmt_pending_cmd *cmd;
6001         const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
6002         u16 uuid_count, expected_len;
6003         u8 status;
6004         int err;
6005
6006         bt_dev_dbg(hdev, "sock %p", sk);
6007
6008         hci_dev_lock(hdev);
6009
6010         if (!hdev_is_powered(hdev)) {
6011                 err = mgmt_cmd_complete(sk, hdev->id,
6012                                         MGMT_OP_START_SERVICE_DISCOVERY,
6013                                         MGMT_STATUS_NOT_POWERED,
6014                                         &cp->type, sizeof(cp->type));
6015                 goto failed;
6016         }
6017
6018         if (hdev->discovery.state != DISCOVERY_STOPPED ||
6019             hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6020                 err = mgmt_cmd_complete(sk, hdev->id,
6021                                         MGMT_OP_START_SERVICE_DISCOVERY,
6022                                         MGMT_STATUS_BUSY, &cp->type,
6023                                         sizeof(cp->type));
6024                 goto failed;
6025         }
6026
6027         if (hdev->discovery_paused) {
6028                 err = mgmt_cmd_complete(sk, hdev->id,
6029                                         MGMT_OP_START_SERVICE_DISCOVERY,
6030                                         MGMT_STATUS_BUSY, &cp->type,
6031                                         sizeof(cp->type));
6032                 goto failed;
6033         }
6034
6035         uuid_count = __le16_to_cpu(cp->uuid_count);
6036         if (uuid_count > max_uuid_count) {
6037                 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6038                            uuid_count);
6039                 err = mgmt_cmd_complete(sk, hdev->id,
6040                                         MGMT_OP_START_SERVICE_DISCOVERY,
6041                                         MGMT_STATUS_INVALID_PARAMS, &cp->type,
6042                                         sizeof(cp->type));
6043                 goto failed;
6044         }
6045
6046         expected_len = sizeof(*cp) + uuid_count * 16;
6047         if (expected_len != len) {
6048                 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6049                            expected_len, len);
6050                 err = mgmt_cmd_complete(sk, hdev->id,
6051                                         MGMT_OP_START_SERVICE_DISCOVERY,
6052                                         MGMT_STATUS_INVALID_PARAMS, &cp->type,
6053                                         sizeof(cp->type));
6054                 goto failed;
6055         }
6056
6057         if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6058                 err = mgmt_cmd_complete(sk, hdev->id,
6059                                         MGMT_OP_START_SERVICE_DISCOVERY,
6060                                         status, &cp->type, sizeof(cp->type));
6061                 goto failed;
6062         }
6063
6064         cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6065                                hdev, data, len);
6066         if (!cmd) {
6067                 err = -ENOMEM;
6068                 goto failed;
6069         }
6070
6071         /* Clear the discovery filter first to free any previously
6072          * allocated memory for the UUID list.
6073          */
6074         hci_discovery_filter_clear(hdev);
6075
6076         hdev->discovery.result_filtering = true;
6077         hdev->discovery.type = cp->type;
6078         hdev->discovery.rssi = cp->rssi;
6079         hdev->discovery.uuid_count = uuid_count;
6080
6081         if (uuid_count > 0) {
6082                 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6083                                                 GFP_KERNEL);
6084                 if (!hdev->discovery.uuids) {
6085                         err = mgmt_cmd_complete(sk, hdev->id,
6086                                                 MGMT_OP_START_SERVICE_DISCOVERY,
6087                                                 MGMT_STATUS_FAILED,
6088                                                 &cp->type, sizeof(cp->type));
6089                         mgmt_pending_remove(cmd);
6090                         goto failed;
6091                 }
6092         }
6093
6094         err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6095                                  start_discovery_complete);
6096         if (err < 0) {
6097                 mgmt_pending_remove(cmd);
6098                 goto failed;
6099         }
6100
6101         hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6102
6103 failed:
6104         hci_dev_unlock(hdev);
6105         return err;
6106 }
6107
6108 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6109 {
6110         struct mgmt_pending_cmd *cmd;
6111
6112         bt_dev_dbg(hdev, "status %u", status);
6113
6114         hci_dev_lock(hdev);
6115
6116         cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6117         if (cmd) {
6118                 cmd->cmd_complete(cmd, mgmt_status(status));
6119                 mgmt_pending_remove(cmd);
6120         }
6121
6122         hci_dev_unlock(hdev);
6123 }
6124
6125 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6126 {
6127         struct mgmt_pending_cmd *cmd = data;
6128
6129         if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6130                 return;
6131
6132         bt_dev_dbg(hdev, "err %d", err);
6133
6134         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6135                           cmd->param, 1);
6136         mgmt_pending_remove(cmd);
6137
6138         if (!err)
6139                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6140 }
6141
6142 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6143 {
6144         return hci_stop_discovery_sync(hdev);
6145 }
6146
6147 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6148                           u16 len)
6149 {
6150         struct mgmt_cp_stop_discovery *mgmt_cp = data;
6151         struct mgmt_pending_cmd *cmd;
6152         int err;
6153
6154         bt_dev_dbg(hdev, "sock %p", sk);
6155
6156         hci_dev_lock(hdev);
6157
6158         if (!hci_discovery_active(hdev)) {
6159                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6160                                         MGMT_STATUS_REJECTED, &mgmt_cp->type,
6161                                         sizeof(mgmt_cp->type));
6162                 goto unlock;
6163         }
6164
6165         if (hdev->discovery.type != mgmt_cp->type) {
6166                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6167                                         MGMT_STATUS_INVALID_PARAMS,
6168                                         &mgmt_cp->type, sizeof(mgmt_cp->type));
6169                 goto unlock;
6170         }
6171
6172         cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6173         if (!cmd) {
6174                 err = -ENOMEM;
6175                 goto unlock;
6176         }
6177
6178         err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6179                                  stop_discovery_complete);
6180         if (err < 0) {
6181                 mgmt_pending_remove(cmd);
6182                 goto unlock;
6183         }
6184
6185         hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6186
6187 unlock:
6188         hci_dev_unlock(hdev);
6189         return err;
6190 }
6191
6192 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6193                         u16 len)
6194 {
6195         struct mgmt_cp_confirm_name *cp = data;
6196         struct inquiry_entry *e;
6197         int err;
6198
6199         bt_dev_dbg(hdev, "sock %p", sk);
6200
6201         hci_dev_lock(hdev);
6202
6203         if (!hci_discovery_active(hdev)) {
6204                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6205                                         MGMT_STATUS_FAILED, &cp->addr,
6206                                         sizeof(cp->addr));
6207                 goto failed;
6208         }
6209
6210         e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6211         if (!e) {
6212                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6213                                         MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6214                                         sizeof(cp->addr));
6215                 goto failed;
6216         }
6217
6218         if (cp->name_known) {
6219                 e->name_state = NAME_KNOWN;
6220                 list_del(&e->list);
6221         } else {
6222                 e->name_state = NAME_NEEDED;
6223                 hci_inquiry_cache_update_resolve(hdev, e);
6224         }
6225
6226         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6227                                 &cp->addr, sizeof(cp->addr));
6228
6229 failed:
6230         hci_dev_unlock(hdev);
6231         return err;
6232 }
6233
6234 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6235                         u16 len)
6236 {
6237         struct mgmt_cp_block_device *cp = data;
6238         u8 status;
6239         int err;
6240
6241         bt_dev_dbg(hdev, "sock %p", sk);
6242
6243         if (!bdaddr_type_is_valid(cp->addr.type))
6244                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6245                                          MGMT_STATUS_INVALID_PARAMS,
6246                                          &cp->addr, sizeof(cp->addr));
6247
6248         hci_dev_lock(hdev);
6249
6250         err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6251                                   cp->addr.type);
6252         if (err < 0) {
6253                 status = MGMT_STATUS_FAILED;
6254                 goto done;
6255         }
6256
6257         mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6258                    sk);
6259         status = MGMT_STATUS_SUCCESS;
6260
6261 done:
6262         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6263                                 &cp->addr, sizeof(cp->addr));
6264
6265         hci_dev_unlock(hdev);
6266
6267         return err;
6268 }
6269
6270 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6271                           u16 len)
6272 {
6273         struct mgmt_cp_unblock_device *cp = data;
6274         u8 status;
6275         int err;
6276
6277         bt_dev_dbg(hdev, "sock %p", sk);
6278
6279         if (!bdaddr_type_is_valid(cp->addr.type))
6280                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6281                                          MGMT_STATUS_INVALID_PARAMS,
6282                                          &cp->addr, sizeof(cp->addr));
6283
6284         hci_dev_lock(hdev);
6285
6286         err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6287                                   cp->addr.type);
6288         if (err < 0) {
6289                 status = MGMT_STATUS_INVALID_PARAMS;
6290                 goto done;
6291         }
6292
6293         mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6294                    sk);
6295         status = MGMT_STATUS_SUCCESS;
6296
6297 done:
6298         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6299                                 &cp->addr, sizeof(cp->addr));
6300
6301         hci_dev_unlock(hdev);
6302
6303         return err;
6304 }
6305
6306 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6307 {
6308         return hci_update_eir_sync(hdev);
6309 }
6310
6311 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6312                          u16 len)
6313 {
6314         struct mgmt_cp_set_device_id *cp = data;
6315         int err;
6316         __u16 source;
6317
6318         bt_dev_dbg(hdev, "sock %p", sk);
6319
6320         source = __le16_to_cpu(cp->source);
6321
6322         if (source > 0x0002)
6323                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6324                                        MGMT_STATUS_INVALID_PARAMS);
6325
6326         hci_dev_lock(hdev);
6327
6328         hdev->devid_source = source;
6329         hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6330         hdev->devid_product = __le16_to_cpu(cp->product);
6331         hdev->devid_version = __le16_to_cpu(cp->version);
6332
6333         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6334                                 NULL, 0);
6335
6336         hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6337
6338         hci_dev_unlock(hdev);
6339
6340         return err;
6341 }
6342
6343 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6344 {
6345         if (err)
6346                 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6347         else
6348                 bt_dev_dbg(hdev, "status %d", err);
6349 }
6350
6351 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6352 {
6353         struct cmd_lookup match = { NULL, hdev };
6354         u8 instance;
6355         struct adv_info *adv_instance;
6356         u8 status = mgmt_status(err);
6357
6358         if (status) {
6359                 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6360                                      cmd_status_rsp, &status);
6361                 return;
6362         }
6363
6364         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6365                 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6366         else
6367                 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6368
6369         mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6370                              &match);
6371
6372         new_settings(hdev, match.sk);
6373
6374         if (match.sk)
6375                 sock_put(match.sk);
6376
6377         /* If "Set Advertising" was just disabled and instance advertising was
6378          * set up earlier, then re-enable multi-instance advertising.
6379          */
6380         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6381             list_empty(&hdev->adv_instances))
6382                 return;
6383
6384         instance = hdev->cur_adv_instance;
6385         if (!instance) {
6386                 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6387                                                         struct adv_info, list);
6388                 if (!adv_instance)
6389                         return;
6390
6391                 instance = adv_instance->instance;
6392         }
6393
6394         err = hci_schedule_adv_instance_sync(hdev, instance, true);
6395
6396         enable_advertising_instance(hdev, err);
6397 }
6398
6399 static int set_adv_sync(struct hci_dev *hdev, void *data)
6400 {
6401         struct mgmt_pending_cmd *cmd = data;
6402         struct mgmt_mode *cp = cmd->param;
6403         u8 val = !!cp->val;
6404
6405         if (cp->val == 0x02)
6406                 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6407         else
6408                 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6409
6410         cancel_adv_timeout(hdev);
6411
6412         if (val) {
6413                 /* Switch to instance "0" for the Set Advertising setting.
6414                  * We cannot use update_[adv|scan_rsp]_data() here as the
6415                  * HCI_ADVERTISING flag is not yet set.
6416                  */
6417                 hdev->cur_adv_instance = 0x00;
6418
6419                 if (ext_adv_capable(hdev)) {
6420                         hci_start_ext_adv_sync(hdev, 0x00);
6421                 } else {
6422                         hci_update_adv_data_sync(hdev, 0x00);
6423                         hci_update_scan_rsp_data_sync(hdev, 0x00);
6424                         hci_enable_advertising_sync(hdev);
6425                 }
6426         } else {
6427                 hci_disable_advertising_sync(hdev);
6428         }
6429
6430         return 0;
6431 }
6432
6433 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6434                            u16 len)
6435 {
6436         struct mgmt_mode *cp = data;
6437         struct mgmt_pending_cmd *cmd;
6438         u8 val, status;
6439         int err;
6440
6441         bt_dev_dbg(hdev, "sock %p", sk);
6442
6443         status = mgmt_le_support(hdev);
6444         if (status)
6445                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6446                                        status);
6447
6448         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6449                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6450                                        MGMT_STATUS_INVALID_PARAMS);
6451
6452         if (hdev->advertising_paused)
6453                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6454                                        MGMT_STATUS_BUSY);
6455
6456         hci_dev_lock(hdev);
6457
6458         val = !!cp->val;
6459
6460         /* The following conditions are ones which mean that we should
6461          * not do any HCI communication but directly send a mgmt
6462          * response to user space (after toggling the flag if
6463          * necessary).
6464          */
6465         if (!hdev_is_powered(hdev) ||
6466             (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6467              (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6468             hci_dev_test_flag(hdev, HCI_MESH) ||
6469             hci_conn_num(hdev, LE_LINK) > 0 ||
6470             (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6471              hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6472                 bool changed;
6473
6474                 if (cp->val) {
6475                         hdev->cur_adv_instance = 0x00;
6476                         changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6477                         if (cp->val == 0x02)
6478                                 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6479                         else
6480                                 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6481                 } else {
6482                         changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6483                         hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6484                 }
6485
6486                 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6487                 if (err < 0)
6488                         goto unlock;
6489
6490                 if (changed)
6491                         err = new_settings(hdev, sk);
6492
6493                 goto unlock;
6494         }
6495
6496         if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6497             pending_find(MGMT_OP_SET_LE, hdev)) {
6498                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6499                                       MGMT_STATUS_BUSY);
6500                 goto unlock;
6501         }
6502
6503         cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6504         if (!cmd)
6505                 err = -ENOMEM;
6506         else
6507                 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6508                                          set_advertising_complete);
6509
6510         if (err < 0 && cmd)
6511                 mgmt_pending_remove(cmd);
6512
6513 unlock:
6514         hci_dev_unlock(hdev);
6515         return err;
6516 }
6517
6518 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6519                               void *data, u16 len)
6520 {
6521         struct mgmt_cp_set_static_address *cp = data;
6522         int err;
6523
6524         bt_dev_dbg(hdev, "sock %p", sk);
6525
6526         if (!lmp_le_capable(hdev))
6527                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6528                                        MGMT_STATUS_NOT_SUPPORTED);
6529
6530         if (hdev_is_powered(hdev))
6531                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6532                                        MGMT_STATUS_REJECTED);
6533
6534         if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6535                 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6536                         return mgmt_cmd_status(sk, hdev->id,
6537                                                MGMT_OP_SET_STATIC_ADDRESS,
6538                                                MGMT_STATUS_INVALID_PARAMS);
6539
6540                 /* Two most significant bits shall be set */
6541                 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6542                         return mgmt_cmd_status(sk, hdev->id,
6543                                                MGMT_OP_SET_STATIC_ADDRESS,
6544                                                MGMT_STATUS_INVALID_PARAMS);
6545         }
6546
6547         hci_dev_lock(hdev);
6548
6549         bacpy(&hdev->static_addr, &cp->bdaddr);
6550
6551         err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6552         if (err < 0)
6553                 goto unlock;
6554
6555         err = new_settings(hdev, sk);
6556
6557 unlock:
6558         hci_dev_unlock(hdev);
6559         return err;
6560 }
6561
6562 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6563                            void *data, u16 len)
6564 {
6565         struct mgmt_cp_set_scan_params *cp = data;
6566         __u16 interval, window;
6567         int err;
6568
6569         bt_dev_dbg(hdev, "sock %p", sk);
6570
6571         if (!lmp_le_capable(hdev))
6572                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6573                                        MGMT_STATUS_NOT_SUPPORTED);
6574
6575         interval = __le16_to_cpu(cp->interval);
6576
6577         if (interval < 0x0004 || interval > 0x4000)
6578                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6579                                        MGMT_STATUS_INVALID_PARAMS);
6580
6581         window = __le16_to_cpu(cp->window);
6582
6583         if (window < 0x0004 || window > 0x4000)
6584                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6585                                        MGMT_STATUS_INVALID_PARAMS);
6586
6587         if (window > interval)
6588                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6589                                        MGMT_STATUS_INVALID_PARAMS);
6590
6591         hci_dev_lock(hdev);
6592
6593         hdev->le_scan_interval = interval;
6594         hdev->le_scan_window = window;
6595
6596         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6597                                 NULL, 0);
6598
6599         /* If background scan is running, restart it so new parameters are
6600          * loaded.
6601          */
6602         if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6603             hdev->discovery.state == DISCOVERY_STOPPED)
6604                 hci_update_passive_scan(hdev);
6605
6606         hci_dev_unlock(hdev);
6607
6608         return err;
6609 }
6610
6611 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6612 {
6613         struct mgmt_pending_cmd *cmd = data;
6614
6615         bt_dev_dbg(hdev, "err %d", err);
6616
6617         if (err) {
6618                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6619                                 mgmt_status(err));
6620         } else {
6621                 struct mgmt_mode *cp = cmd->param;
6622
6623                 if (cp->val)
6624                         hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6625                 else
6626                         hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6627
6628                 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6629                 new_settings(hdev, cmd->sk);
6630         }
6631
6632         mgmt_pending_free(cmd);
6633 }
6634
6635 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6636 {
6637         struct mgmt_pending_cmd *cmd = data;
6638         struct mgmt_mode *cp = cmd->param;
6639
6640         return hci_write_fast_connectable_sync(hdev, cp->val);
6641 }
6642
6643 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6644                                 void *data, u16 len)
6645 {
6646         struct mgmt_mode *cp = data;
6647         struct mgmt_pending_cmd *cmd;
6648         int err;
6649
6650         bt_dev_dbg(hdev, "sock %p", sk);
6651
6652         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6653             hdev->hci_ver < BLUETOOTH_VER_1_2)
6654                 return mgmt_cmd_status(sk, hdev->id,
6655                                        MGMT_OP_SET_FAST_CONNECTABLE,
6656                                        MGMT_STATUS_NOT_SUPPORTED);
6657
6658         if (cp->val != 0x00 && cp->val != 0x01)
6659                 return mgmt_cmd_status(sk, hdev->id,
6660                                        MGMT_OP_SET_FAST_CONNECTABLE,
6661                                        MGMT_STATUS_INVALID_PARAMS);
6662
6663         hci_dev_lock(hdev);
6664
6665         if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6666                 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6667                 goto unlock;
6668         }
6669
6670         if (!hdev_is_powered(hdev)) {
6671                 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6672                 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6673                 new_settings(hdev, sk);
6674                 goto unlock;
6675         }
6676
6677         cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6678                                len);
6679         if (!cmd)
6680                 err = -ENOMEM;
6681         else
6682                 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6683                                          fast_connectable_complete);
6684
6685         if (err < 0) {
6686                 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6687                                 MGMT_STATUS_FAILED);
6688
6689                 if (cmd)
6690                         mgmt_pending_free(cmd);
6691         }
6692
6693 unlock:
6694         hci_dev_unlock(hdev);
6695
6696         return err;
6697 }
6698
6699 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6700 {
6701         struct mgmt_pending_cmd *cmd = data;
6702
6703         bt_dev_dbg(hdev, "err %d", err);
6704
6705         if (err) {
6706                 u8 mgmt_err = mgmt_status(err);
6707
6708                 /* We need to restore the flag if related HCI commands
6709                  * failed.
6710                  */
6711                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6712
6713                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6714         } else {
6715                 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6716                 new_settings(hdev, cmd->sk);
6717         }
6718
6719         mgmt_pending_free(cmd);
6720 }
6721
6722 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6723 {
6724         int status;
6725
6726         status = hci_write_fast_connectable_sync(hdev, false);
6727
6728         if (!status)
6729                 status = hci_update_scan_sync(hdev);
6730
6731         /* Since only the advertising data flags will change, there
6732          * is no need to update the scan response data.
6733          */
6734         if (!status)
6735                 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6736
6737         return status;
6738 }
6739
6740 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6741 {
6742         struct mgmt_mode *cp = data;
6743         struct mgmt_pending_cmd *cmd;
6744         int err;
6745
6746         bt_dev_dbg(hdev, "sock %p", sk);
6747
6748         if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6749                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6750                                        MGMT_STATUS_NOT_SUPPORTED);
6751
6752         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6753                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6754                                        MGMT_STATUS_REJECTED);
6755
6756         if (cp->val != 0x00 && cp->val != 0x01)
6757                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6758                                        MGMT_STATUS_INVALID_PARAMS);
6759
6760         hci_dev_lock(hdev);
6761
6762         if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6763                 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6764                 goto unlock;
6765         }
6766
6767         if (!hdev_is_powered(hdev)) {
6768                 if (!cp->val) {
6769                         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6770                         hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6771                         hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6772                         hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6773                         hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6774                 }
6775
6776                 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6777
6778                 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6779                 if (err < 0)
6780                         goto unlock;
6781
6782                 err = new_settings(hdev, sk);
6783                 goto unlock;
6784         }
6785
6786         /* Reject disabling when powered on */
6787         if (!cp->val) {
6788                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6789                                       MGMT_STATUS_REJECTED);
6790                 goto unlock;
6791         } else {
6792                 /* When configuring a dual-mode controller to operate
6793                  * with LE only and using a static address, then switching
6794                  * BR/EDR back on is not allowed.
6795                  *
6796                  * Dual-mode controllers shall operate with the public
6797                  * address as its identity address for BR/EDR and LE. So
6798                  * reject the attempt to create an invalid configuration.
6799                  *
6800                  * The same restrictions applies when secure connections
6801                  * has been enabled. For BR/EDR this is a controller feature
6802                  * while for LE it is a host stack feature. This means that
6803                  * switching BR/EDR back on when secure connections has been
6804                  * enabled is not a supported transaction.
6805                  */
6806                 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6807                     (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6808                      hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6809                         err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6810                                               MGMT_STATUS_REJECTED);
6811                         goto unlock;
6812                 }
6813         }
6814
6815         cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6816         if (!cmd)
6817                 err = -ENOMEM;
6818         else
6819                 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6820                                          set_bredr_complete);
6821
6822         if (err < 0) {
6823                 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6824                                 MGMT_STATUS_FAILED);
6825                 if (cmd)
6826                         mgmt_pending_free(cmd);
6827
6828                 goto unlock;
6829         }
6830
6831         /* We need to flip the bit already here so that
6832          * hci_req_update_adv_data generates the correct flags.
6833          */
6834         hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6835
6836 unlock:
6837         hci_dev_unlock(hdev);
6838         return err;
6839 }
6840
6841 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6842 {
6843         struct mgmt_pending_cmd *cmd = data;
6844         struct mgmt_mode *cp;
6845
6846         bt_dev_dbg(hdev, "err %d", err);
6847
6848         if (err) {
6849                 u8 mgmt_err = mgmt_status(err);
6850
6851                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6852                 goto done;
6853         }
6854
6855         cp = cmd->param;
6856
6857         switch (cp->val) {
6858         case 0x00:
6859                 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6860                 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6861                 break;
6862         case 0x01:
6863                 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6864                 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6865                 break;
6866         case 0x02:
6867                 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6868                 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6869                 break;
6870         }
6871
6872         send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6873         new_settings(hdev, cmd->sk);
6874
6875 done:
6876         mgmt_pending_free(cmd);
6877 }
6878
6879 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6880 {
6881         struct mgmt_pending_cmd *cmd = data;
6882         struct mgmt_mode *cp = cmd->param;
6883         u8 val = !!cp->val;
6884
6885         /* Force write of val */
6886         hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6887
6888         return hci_write_sc_support_sync(hdev, val);
6889 }
6890
6891 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6892                            void *data, u16 len)
6893 {
6894         struct mgmt_mode *cp = data;
6895         struct mgmt_pending_cmd *cmd;
6896         u8 val;
6897         int err;
6898
6899         bt_dev_dbg(hdev, "sock %p", sk);
6900
6901         if (!lmp_sc_capable(hdev) &&
6902             !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6903                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6904                                        MGMT_STATUS_NOT_SUPPORTED);
6905
6906         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6907             lmp_sc_capable(hdev) &&
6908             !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6909                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6910                                        MGMT_STATUS_REJECTED);
6911
6912         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6913                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6914                                        MGMT_STATUS_INVALID_PARAMS);
6915
6916         hci_dev_lock(hdev);
6917
6918         if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6919             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6920                 bool changed;
6921
6922                 if (cp->val) {
6923                         changed = !hci_dev_test_and_set_flag(hdev,
6924                                                              HCI_SC_ENABLED);
6925                         if (cp->val == 0x02)
6926                                 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6927                         else
6928                                 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6929                 } else {
6930                         changed = hci_dev_test_and_clear_flag(hdev,
6931                                                               HCI_SC_ENABLED);
6932                         hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6933                 }
6934
6935                 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6936                 if (err < 0)
6937                         goto failed;
6938
6939                 if (changed)
6940                         err = new_settings(hdev, sk);
6941
6942                 goto failed;
6943         }
6944
6945         val = !!cp->val;
6946
6947         if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6948             (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6949                 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6950                 goto failed;
6951         }
6952
6953         cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6954         if (!cmd)
6955                 err = -ENOMEM;
6956         else
6957                 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6958                                          set_secure_conn_complete);
6959
6960         if (err < 0) {
6961                 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6962                                 MGMT_STATUS_FAILED);
6963                 if (cmd)
6964                         mgmt_pending_free(cmd);
6965         }
6966
6967 failed:
6968         hci_dev_unlock(hdev);
6969         return err;
6970 }
6971
6972 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6973                           void *data, u16 len)
6974 {
6975         struct mgmt_mode *cp = data;
6976         bool changed, use_changed;
6977         int err;
6978
6979         bt_dev_dbg(hdev, "sock %p", sk);
6980
6981         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6982                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6983                                        MGMT_STATUS_INVALID_PARAMS);
6984
6985         hci_dev_lock(hdev);
6986
6987         if (cp->val)
6988                 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6989         else
6990                 changed = hci_dev_test_and_clear_flag(hdev,
6991                                                       HCI_KEEP_DEBUG_KEYS);
6992
6993         if (cp->val == 0x02)
6994                 use_changed = !hci_dev_test_and_set_flag(hdev,
6995                                                          HCI_USE_DEBUG_KEYS);
6996         else
6997                 use_changed = hci_dev_test_and_clear_flag(hdev,
6998                                                           HCI_USE_DEBUG_KEYS);
6999
7000         if (hdev_is_powered(hdev) && use_changed &&
7001             hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7002                 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
7003                 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
7004                              sizeof(mode), &mode);
7005         }
7006
7007         err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7008         if (err < 0)
7009                 goto unlock;
7010
7011         if (changed)
7012                 err = new_settings(hdev, sk);
7013
7014 unlock:
7015         hci_dev_unlock(hdev);
7016         return err;
7017 }
7018
7019 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7020                        u16 len)
7021 {
7022         struct mgmt_cp_set_privacy *cp = cp_data;
7023         bool changed;
7024         int err;
7025
7026         bt_dev_dbg(hdev, "sock %p", sk);
7027
7028         if (!lmp_le_capable(hdev))
7029                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7030                                        MGMT_STATUS_NOT_SUPPORTED);
7031
7032         if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7033                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7034                                        MGMT_STATUS_INVALID_PARAMS);
7035
7036         if (hdev_is_powered(hdev))
7037                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7038                                        MGMT_STATUS_REJECTED);
7039
7040         hci_dev_lock(hdev);
7041
7042         /* If user space supports this command it is also expected to
7043          * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7044          */
7045         hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7046
7047         if (cp->privacy) {
7048                 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7049                 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7050                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7051                 hci_adv_instances_set_rpa_expired(hdev, true);
7052                 if (cp->privacy == 0x02)
7053                         hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7054                 else
7055                         hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7056         } else {
7057                 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7058                 memset(hdev->irk, 0, sizeof(hdev->irk));
7059                 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7060                 hci_adv_instances_set_rpa_expired(hdev, false);
7061                 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7062         }
7063
7064         err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7065         if (err < 0)
7066                 goto unlock;
7067
7068         if (changed)
7069                 err = new_settings(hdev, sk);
7070
7071 unlock:
7072         hci_dev_unlock(hdev);
7073         return err;
7074 }
7075
7076 static bool irk_is_valid(struct mgmt_irk_info *irk)
7077 {
7078         switch (irk->addr.type) {
7079         case BDADDR_LE_PUBLIC:
7080                 return true;
7081
7082         case BDADDR_LE_RANDOM:
7083                 /* Two most significant bits shall be set */
7084                 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7085                         return false;
7086                 return true;
7087         }
7088
7089         return false;
7090 }
7091
7092 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7093                      u16 len)
7094 {
7095         struct mgmt_cp_load_irks *cp = cp_data;
7096         const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7097                                    sizeof(struct mgmt_irk_info));
7098         u16 irk_count, expected_len;
7099         int i, err;
7100
7101         bt_dev_dbg(hdev, "sock %p", sk);
7102
7103         if (!lmp_le_capable(hdev))
7104                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7105                                        MGMT_STATUS_NOT_SUPPORTED);
7106
7107         irk_count = __le16_to_cpu(cp->irk_count);
7108         if (irk_count > max_irk_count) {
7109                 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7110                            irk_count);
7111                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7112                                        MGMT_STATUS_INVALID_PARAMS);
7113         }
7114
7115         expected_len = struct_size(cp, irks, irk_count);
7116         if (expected_len != len) {
7117                 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7118                            expected_len, len);
7119                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7120                                        MGMT_STATUS_INVALID_PARAMS);
7121         }
7122
7123         bt_dev_dbg(hdev, "irk_count %u", irk_count);
7124
7125         for (i = 0; i < irk_count; i++) {
7126                 struct mgmt_irk_info *key = &cp->irks[i];
7127
7128                 if (!irk_is_valid(key))
7129                         return mgmt_cmd_status(sk, hdev->id,
7130                                                MGMT_OP_LOAD_IRKS,
7131                                                MGMT_STATUS_INVALID_PARAMS);
7132         }
7133
7134         hci_dev_lock(hdev);
7135
7136         hci_smp_irks_clear(hdev);
7137
7138         for (i = 0; i < irk_count; i++) {
7139                 struct mgmt_irk_info *irk = &cp->irks[i];
7140
7141                 if (hci_is_blocked_key(hdev,
7142                                        HCI_BLOCKED_KEY_TYPE_IRK,
7143                                        irk->val)) {
7144                         bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7145                                     &irk->addr.bdaddr);
7146                         continue;
7147                 }
7148
7149                 hci_add_irk(hdev, &irk->addr.bdaddr,
7150                             le_addr_type(irk->addr.type), irk->val,
7151                             BDADDR_ANY);
7152         }
7153
7154         hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7155
7156         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7157
7158         hci_dev_unlock(hdev);
7159
7160         return err;
7161 }
7162
7163 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7164 {
7165         if (key->initiator != 0x00 && key->initiator != 0x01)
7166                 return false;
7167
7168         switch (key->addr.type) {
7169         case BDADDR_LE_PUBLIC:
7170                 return true;
7171
7172         case BDADDR_LE_RANDOM:
7173                 /* Two most significant bits shall be set */
7174                 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7175                         return false;
7176                 return true;
7177         }
7178
7179         return false;
7180 }
7181
7182 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7183                                void *cp_data, u16 len)
7184 {
7185         struct mgmt_cp_load_long_term_keys *cp = cp_data;
7186         const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7187                                    sizeof(struct mgmt_ltk_info));
7188         u16 key_count, expected_len;
7189         int i, err;
7190
7191         bt_dev_dbg(hdev, "sock %p", sk);
7192
7193         if (!lmp_le_capable(hdev))
7194                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7195                                        MGMT_STATUS_NOT_SUPPORTED);
7196
7197         key_count = __le16_to_cpu(cp->key_count);
7198         if (key_count > max_key_count) {
7199                 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7200                            key_count);
7201                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7202                                        MGMT_STATUS_INVALID_PARAMS);
7203         }
7204
7205         expected_len = struct_size(cp, keys, key_count);
7206         if (expected_len != len) {
7207                 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7208                            expected_len, len);
7209                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7210                                        MGMT_STATUS_INVALID_PARAMS);
7211         }
7212
7213         bt_dev_dbg(hdev, "key_count %u", key_count);
7214
7215         for (i = 0; i < key_count; i++) {
7216                 struct mgmt_ltk_info *key = &cp->keys[i];
7217
7218                 if (!ltk_is_valid(key))
7219                         return mgmt_cmd_status(sk, hdev->id,
7220                                                MGMT_OP_LOAD_LONG_TERM_KEYS,
7221                                                MGMT_STATUS_INVALID_PARAMS);
7222         }
7223
7224         hci_dev_lock(hdev);
7225
7226         hci_smp_ltks_clear(hdev);
7227
7228         for (i = 0; i < key_count; i++) {
7229                 struct mgmt_ltk_info *key = &cp->keys[i];
7230                 u8 type, authenticated;
7231
7232                 if (hci_is_blocked_key(hdev,
7233                                        HCI_BLOCKED_KEY_TYPE_LTK,
7234                                        key->val)) {
7235                         bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7236                                     &key->addr.bdaddr);
7237                         continue;
7238                 }
7239
7240                 switch (key->type) {
7241                 case MGMT_LTK_UNAUTHENTICATED:
7242                         authenticated = 0x00;
7243                         type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7244                         break;
7245                 case MGMT_LTK_AUTHENTICATED:
7246                         authenticated = 0x01;
7247                         type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7248                         break;
7249                 case MGMT_LTK_P256_UNAUTH:
7250                         authenticated = 0x00;
7251                         type = SMP_LTK_P256;
7252                         break;
7253                 case MGMT_LTK_P256_AUTH:
7254                         authenticated = 0x01;
7255                         type = SMP_LTK_P256;
7256                         break;
7257                 case MGMT_LTK_P256_DEBUG:
7258                         authenticated = 0x00;
7259                         type = SMP_LTK_P256_DEBUG;
7260                         fallthrough;
7261                 default:
7262                         continue;
7263                 }
7264
7265                 hci_add_ltk(hdev, &key->addr.bdaddr,
7266                             le_addr_type(key->addr.type), type, authenticated,
7267                             key->val, key->enc_size, key->ediv, key->rand);
7268         }
7269
7270         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7271                            NULL, 0);
7272
7273         hci_dev_unlock(hdev);
7274
7275         return err;
7276 }
7277
7278 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7279 {
7280         struct mgmt_pending_cmd *cmd = data;
7281         struct hci_conn *conn = cmd->user_data;
7282         struct mgmt_cp_get_conn_info *cp = cmd->param;
7283         struct mgmt_rp_get_conn_info rp;
7284         u8 status;
7285
7286         bt_dev_dbg(hdev, "err %d", err);
7287
7288         memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
7289
7290         status = mgmt_status(err);
7291         if (status == MGMT_STATUS_SUCCESS) {
7292                 rp.rssi = conn->rssi;
7293                 rp.tx_power = conn->tx_power;
7294                 rp.max_tx_power = conn->max_tx_power;
7295         } else {
7296                 rp.rssi = HCI_RSSI_INVALID;
7297                 rp.tx_power = HCI_TX_POWER_INVALID;
7298                 rp.max_tx_power = HCI_TX_POWER_INVALID;
7299         }
7300
7301         mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7302                           &rp, sizeof(rp));
7303
7304         mgmt_pending_free(cmd);
7305 }
7306
7307 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7308 {
7309         struct mgmt_pending_cmd *cmd = data;
7310         struct mgmt_cp_get_conn_info *cp = cmd->param;
7311         struct hci_conn *conn;
7312         int err;
7313         __le16   handle;
7314
7315         /* Make sure we are still connected */
7316         if (cp->addr.type == BDADDR_BREDR)
7317                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7318                                                &cp->addr.bdaddr);
7319         else
7320                 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7321
7322         if (!conn || conn->state != BT_CONNECTED)
7323                 return MGMT_STATUS_NOT_CONNECTED;
7324
7325         cmd->user_data = conn;
7326         handle = cpu_to_le16(conn->handle);
7327
7328         /* Refresh RSSI each time */
7329         err = hci_read_rssi_sync(hdev, handle);
7330
7331         /* For LE links TX power does not change thus we don't need to
7332          * query for it once value is known.
7333          */
7334         if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7335                      conn->tx_power == HCI_TX_POWER_INVALID))
7336                 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7337
7338         /* Max TX power needs to be read only once per connection */
7339         if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7340                 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7341
7342         return err;
7343 }
7344
7345 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7346                          u16 len)
7347 {
7348         struct mgmt_cp_get_conn_info *cp = data;
7349         struct mgmt_rp_get_conn_info rp;
7350         struct hci_conn *conn;
7351         unsigned long conn_info_age;
7352         int err = 0;
7353
7354         bt_dev_dbg(hdev, "sock %p", sk);
7355
7356         memset(&rp, 0, sizeof(rp));
7357         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7358         rp.addr.type = cp->addr.type;
7359
7360         if (!bdaddr_type_is_valid(cp->addr.type))
7361                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7362                                          MGMT_STATUS_INVALID_PARAMS,
7363                                          &rp, sizeof(rp));
7364
7365         hci_dev_lock(hdev);
7366
7367         if (!hdev_is_powered(hdev)) {
7368                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7369                                         MGMT_STATUS_NOT_POWERED, &rp,
7370                                         sizeof(rp));
7371                 goto unlock;
7372         }
7373
7374         if (cp->addr.type == BDADDR_BREDR)
7375                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7376                                                &cp->addr.bdaddr);
7377         else
7378                 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7379
7380         if (!conn || conn->state != BT_CONNECTED) {
7381                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7382                                         MGMT_STATUS_NOT_CONNECTED, &rp,
7383                                         sizeof(rp));
7384                 goto unlock;
7385         }
7386
7387         /* To avoid client trying to guess when to poll again for information we
7388          * calculate conn info age as random value between min/max set in hdev.
7389          */
7390         conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7391                                                  hdev->conn_info_max_age - 1);
7392
7393         /* Query controller to refresh cached values if they are too old or were
7394          * never read.
7395          */
7396         if (time_after(jiffies, conn->conn_info_timestamp +
7397                        msecs_to_jiffies(conn_info_age)) ||
7398             !conn->conn_info_timestamp) {
7399                 struct mgmt_pending_cmd *cmd;
7400
7401                 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7402                                        len);
7403                 if (!cmd) {
7404                         err = -ENOMEM;
7405                 } else {
7406                         err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7407                                                  cmd, get_conn_info_complete);
7408                 }
7409
7410                 if (err < 0) {
7411                         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7412                                           MGMT_STATUS_FAILED, &rp, sizeof(rp));
7413
7414                         if (cmd)
7415                                 mgmt_pending_free(cmd);
7416
7417                         goto unlock;
7418                 }
7419
7420                 conn->conn_info_timestamp = jiffies;
7421         } else {
7422                 /* Cache is valid, just reply with values cached in hci_conn */
7423                 rp.rssi = conn->rssi;
7424                 rp.tx_power = conn->tx_power;
7425                 rp.max_tx_power = conn->max_tx_power;
7426
7427                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7428                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7429         }
7430
7431 unlock:
7432         hci_dev_unlock(hdev);
7433         return err;
7434 }
7435
7436 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7437 {
7438         struct mgmt_pending_cmd *cmd = data;
7439         struct mgmt_cp_get_clock_info *cp = cmd->param;
7440         struct mgmt_rp_get_clock_info rp;
7441         struct hci_conn *conn = cmd->user_data;
7442         u8 status = mgmt_status(err);
7443
7444         bt_dev_dbg(hdev, "err %d", err);
7445
7446         memset(&rp, 0, sizeof(rp));
7447         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7448         rp.addr.type = cp->addr.type;
7449
7450         if (err)
7451                 goto complete;
7452
7453         rp.local_clock = cpu_to_le32(hdev->clock);
7454
7455         if (conn) {
7456                 rp.piconet_clock = cpu_to_le32(conn->clock);
7457                 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7458         }
7459
7460 complete:
7461         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7462                           sizeof(rp));
7463
7464         mgmt_pending_free(cmd);
7465 }
7466
7467 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7468 {
7469         struct mgmt_pending_cmd *cmd = data;
7470         struct mgmt_cp_get_clock_info *cp = cmd->param;
7471         struct hci_cp_read_clock hci_cp;
7472         struct hci_conn *conn;
7473
7474         memset(&hci_cp, 0, sizeof(hci_cp));
7475         hci_read_clock_sync(hdev, &hci_cp);
7476
7477         /* Make sure connection still exists */
7478         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7479         if (!conn || conn->state != BT_CONNECTED)
7480                 return MGMT_STATUS_NOT_CONNECTED;
7481
7482         cmd->user_data = conn;
7483         hci_cp.handle = cpu_to_le16(conn->handle);
7484         hci_cp.which = 0x01; /* Piconet clock */
7485
7486         return hci_read_clock_sync(hdev, &hci_cp);
7487 }
7488
7489 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7490                                                                 u16 len)
7491 {
7492         struct mgmt_cp_get_clock_info *cp = data;
7493         struct mgmt_rp_get_clock_info rp;
7494         struct mgmt_pending_cmd *cmd;
7495         struct hci_conn *conn;
7496         int err;
7497
7498         bt_dev_dbg(hdev, "sock %p", sk);
7499
7500         memset(&rp, 0, sizeof(rp));
7501         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7502         rp.addr.type = cp->addr.type;
7503
7504         if (cp->addr.type != BDADDR_BREDR)
7505                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7506                                          MGMT_STATUS_INVALID_PARAMS,
7507                                          &rp, sizeof(rp));
7508
7509         hci_dev_lock(hdev);
7510
7511         if (!hdev_is_powered(hdev)) {
7512                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7513                                         MGMT_STATUS_NOT_POWERED, &rp,
7514                                         sizeof(rp));
7515                 goto unlock;
7516         }
7517
7518         if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7519                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7520                                                &cp->addr.bdaddr);
7521                 if (!conn || conn->state != BT_CONNECTED) {
7522                         err = mgmt_cmd_complete(sk, hdev->id,
7523                                                 MGMT_OP_GET_CLOCK_INFO,
7524                                                 MGMT_STATUS_NOT_CONNECTED,
7525                                                 &rp, sizeof(rp));
7526                         goto unlock;
7527                 }
7528         } else {
7529                 conn = NULL;
7530         }
7531
7532         cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7533         if (!cmd)
7534                 err = -ENOMEM;
7535         else
7536                 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7537                                          get_clock_info_complete);
7538
7539         if (err < 0) {
7540                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7541                                         MGMT_STATUS_FAILED, &rp, sizeof(rp));
7542
7543                 if (cmd)
7544                         mgmt_pending_free(cmd);
7545         }
7546
7547
7548 unlock:
7549         hci_dev_unlock(hdev);
7550         return err;
7551 }
7552
7553 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7554 {
7555         struct hci_conn *conn;
7556
7557         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7558         if (!conn)
7559                 return false;
7560
7561         if (conn->dst_type != type)
7562                 return false;
7563
7564         if (conn->state != BT_CONNECTED)
7565                 return false;
7566
7567         return true;
7568 }
7569
7570 /* This function requires the caller holds hdev->lock */
7571 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7572                                u8 addr_type, u8 auto_connect)
7573 {
7574         struct hci_conn_params *params;
7575
7576         params = hci_conn_params_add(hdev, addr, addr_type);
7577         if (!params)
7578                 return -EIO;
7579
7580         if (params->auto_connect == auto_connect)
7581                 return 0;
7582
7583         list_del_init(&params->action);
7584
7585         switch (auto_connect) {
7586         case HCI_AUTO_CONN_DISABLED:
7587         case HCI_AUTO_CONN_LINK_LOSS:
7588                 /* If auto connect is being disabled when we're trying to
7589                  * connect to device, keep connecting.
7590                  */
7591                 if (params->explicit_connect)
7592                         list_add(&params->action, &hdev->pend_le_conns);
7593                 break;
7594         case HCI_AUTO_CONN_REPORT:
7595                 if (params->explicit_connect)
7596                         list_add(&params->action, &hdev->pend_le_conns);
7597                 else
7598                         list_add(&params->action, &hdev->pend_le_reports);
7599                 break;
7600         case HCI_AUTO_CONN_DIRECT:
7601         case HCI_AUTO_CONN_ALWAYS:
7602                 if (!is_connected(hdev, addr, addr_type))
7603                         list_add(&params->action, &hdev->pend_le_conns);
7604                 break;
7605         }
7606
7607         params->auto_connect = auto_connect;
7608
7609         bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7610                    addr, addr_type, auto_connect);
7611
7612         return 0;
7613 }
7614
7615 static void device_added(struct sock *sk, struct hci_dev *hdev,
7616                          bdaddr_t *bdaddr, u8 type, u8 action)
7617 {
7618         struct mgmt_ev_device_added ev;
7619
7620         bacpy(&ev.addr.bdaddr, bdaddr);
7621         ev.addr.type = type;
7622         ev.action = action;
7623
7624         mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7625 }
7626
7627 static int add_device_sync(struct hci_dev *hdev, void *data)
7628 {
7629         return hci_update_passive_scan_sync(hdev);
7630 }
7631
7632 static int add_device(struct sock *sk, struct hci_dev *hdev,
7633                       void *data, u16 len)
7634 {
7635         struct mgmt_cp_add_device *cp = data;
7636         u8 auto_conn, addr_type;
7637         struct hci_conn_params *params;
7638         int err;
7639         u32 current_flags = 0;
7640         u32 supported_flags;
7641
7642         bt_dev_dbg(hdev, "sock %p", sk);
7643
7644         if (!bdaddr_type_is_valid(cp->addr.type) ||
7645             !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7646                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7647                                          MGMT_STATUS_INVALID_PARAMS,
7648                                          &cp->addr, sizeof(cp->addr));
7649
7650         if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7651                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7652                                          MGMT_STATUS_INVALID_PARAMS,
7653                                          &cp->addr, sizeof(cp->addr));
7654
7655         hci_dev_lock(hdev);
7656
7657         if (cp->addr.type == BDADDR_BREDR) {
7658                 /* Only incoming connections action is supported for now */
7659                 if (cp->action != 0x01) {
7660                         err = mgmt_cmd_complete(sk, hdev->id,
7661                                                 MGMT_OP_ADD_DEVICE,
7662                                                 MGMT_STATUS_INVALID_PARAMS,
7663                                                 &cp->addr, sizeof(cp->addr));
7664                         goto unlock;
7665                 }
7666
7667                 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7668                                                      &cp->addr.bdaddr,
7669                                                      cp->addr.type, 0);
7670                 if (err)
7671                         goto unlock;
7672
7673                 hci_update_scan(hdev);
7674
7675                 goto added;
7676         }
7677
7678         addr_type = le_addr_type(cp->addr.type);
7679
7680         if (cp->action == 0x02)
7681                 auto_conn = HCI_AUTO_CONN_ALWAYS;
7682         else if (cp->action == 0x01)
7683                 auto_conn = HCI_AUTO_CONN_DIRECT;
7684         else
7685                 auto_conn = HCI_AUTO_CONN_REPORT;
7686
7687         /* Kernel internally uses conn_params with resolvable private
7688          * address, but Add Device allows only identity addresses.
7689          * Make sure it is enforced before calling
7690          * hci_conn_params_lookup.
7691          */
7692         if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7693                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7694                                         MGMT_STATUS_INVALID_PARAMS,
7695                                         &cp->addr, sizeof(cp->addr));
7696                 goto unlock;
7697         }
7698
7699         /* If the connection parameters don't exist for this device,
7700          * they will be created and configured with defaults.
7701          */
7702         if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7703                                 auto_conn) < 0) {
7704                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7705                                         MGMT_STATUS_FAILED, &cp->addr,
7706                                         sizeof(cp->addr));
7707                 goto unlock;
7708         } else {
7709                 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7710                                                 addr_type);
7711                 if (params)
7712                         current_flags = params->flags;
7713         }
7714
7715         err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7716         if (err < 0)
7717                 goto unlock;
7718
7719 added:
7720         device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7721         supported_flags = hdev->conn_flags;
7722         device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7723                              supported_flags, current_flags);
7724
7725         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7726                                 MGMT_STATUS_SUCCESS, &cp->addr,
7727                                 sizeof(cp->addr));
7728
7729 unlock:
7730         hci_dev_unlock(hdev);
7731         return err;
7732 }
7733
7734 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7735                            bdaddr_t *bdaddr, u8 type)
7736 {
7737         struct mgmt_ev_device_removed ev;
7738
7739         bacpy(&ev.addr.bdaddr, bdaddr);
7740         ev.addr.type = type;
7741
7742         mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7743 }
7744
7745 static int remove_device_sync(struct hci_dev *hdev, void *data)
7746 {
7747         return hci_update_passive_scan_sync(hdev);
7748 }
7749
7750 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7751                          void *data, u16 len)
7752 {
7753         struct mgmt_cp_remove_device *cp = data;
7754         int err;
7755
7756         bt_dev_dbg(hdev, "sock %p", sk);
7757
7758         hci_dev_lock(hdev);
7759
7760         if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7761                 struct hci_conn_params *params;
7762                 u8 addr_type;
7763
7764                 if (!bdaddr_type_is_valid(cp->addr.type)) {
7765                         err = mgmt_cmd_complete(sk, hdev->id,
7766                                                 MGMT_OP_REMOVE_DEVICE,
7767                                                 MGMT_STATUS_INVALID_PARAMS,
7768                                                 &cp->addr, sizeof(cp->addr));
7769                         goto unlock;
7770                 }
7771
7772                 if (cp->addr.type == BDADDR_BREDR) {
7773                         err = hci_bdaddr_list_del(&hdev->accept_list,
7774                                                   &cp->addr.bdaddr,
7775                                                   cp->addr.type);
7776                         if (err) {
7777                                 err = mgmt_cmd_complete(sk, hdev->id,
7778                                                         MGMT_OP_REMOVE_DEVICE,
7779                                                         MGMT_STATUS_INVALID_PARAMS,
7780                                                         &cp->addr,
7781                                                         sizeof(cp->addr));
7782                                 goto unlock;
7783                         }
7784
7785                         hci_update_scan(hdev);
7786
7787                         device_removed(sk, hdev, &cp->addr.bdaddr,
7788                                        cp->addr.type);
7789                         goto complete;
7790                 }
7791
7792                 addr_type = le_addr_type(cp->addr.type);
7793
7794                 /* Kernel internally uses conn_params with resolvable private
7795                  * address, but Remove Device allows only identity addresses.
7796                  * Make sure it is enforced before calling
7797                  * hci_conn_params_lookup.
7798                  */
7799                 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7800                         err = mgmt_cmd_complete(sk, hdev->id,
7801                                                 MGMT_OP_REMOVE_DEVICE,
7802                                                 MGMT_STATUS_INVALID_PARAMS,
7803                                                 &cp->addr, sizeof(cp->addr));
7804                         goto unlock;
7805                 }
7806
7807                 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7808                                                 addr_type);
7809                 if (!params) {
7810                         err = mgmt_cmd_complete(sk, hdev->id,
7811                                                 MGMT_OP_REMOVE_DEVICE,
7812                                                 MGMT_STATUS_INVALID_PARAMS,
7813                                                 &cp->addr, sizeof(cp->addr));
7814                         goto unlock;
7815                 }
7816
7817                 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7818                     params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7819                         err = mgmt_cmd_complete(sk, hdev->id,
7820                                                 MGMT_OP_REMOVE_DEVICE,
7821                                                 MGMT_STATUS_INVALID_PARAMS,
7822                                                 &cp->addr, sizeof(cp->addr));
7823                         goto unlock;
7824                 }
7825
7826                 list_del(&params->action);
7827                 list_del(&params->list);
7828                 kfree(params);
7829
7830                 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7831         } else {
7832                 struct hci_conn_params *p, *tmp;
7833                 struct bdaddr_list *b, *btmp;
7834
7835                 if (cp->addr.type) {
7836                         err = mgmt_cmd_complete(sk, hdev->id,
7837                                                 MGMT_OP_REMOVE_DEVICE,
7838                                                 MGMT_STATUS_INVALID_PARAMS,
7839                                                 &cp->addr, sizeof(cp->addr));
7840                         goto unlock;
7841                 }
7842
7843                 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7844                         device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7845                         list_del(&b->list);
7846                         kfree(b);
7847                 }
7848
7849                 hci_update_scan(hdev);
7850
7851                 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7852                         if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7853                                 continue;
7854                         device_removed(sk, hdev, &p->addr, p->addr_type);
7855                         if (p->explicit_connect) {
7856                                 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7857                                 continue;
7858                         }
7859                         list_del(&p->action);
7860                         list_del(&p->list);
7861                         kfree(p);
7862                 }
7863
7864                 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7865         }
7866
7867         hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7868
7869 complete:
7870         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7871                                 MGMT_STATUS_SUCCESS, &cp->addr,
7872                                 sizeof(cp->addr));
7873 unlock:
7874         hci_dev_unlock(hdev);
7875         return err;
7876 }
7877
7878 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7879                            u16 len)
7880 {
7881         struct mgmt_cp_load_conn_param *cp = data;
7882         const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7883                                      sizeof(struct mgmt_conn_param));
7884         u16 param_count, expected_len;
7885         int i;
7886
7887         if (!lmp_le_capable(hdev))
7888                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7889                                        MGMT_STATUS_NOT_SUPPORTED);
7890
7891         param_count = __le16_to_cpu(cp->param_count);
7892         if (param_count > max_param_count) {
7893                 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7894                            param_count);
7895                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7896                                        MGMT_STATUS_INVALID_PARAMS);
7897         }
7898
7899         expected_len = struct_size(cp, params, param_count);
7900         if (expected_len != len) {
7901                 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7902                            expected_len, len);
7903                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7904                                        MGMT_STATUS_INVALID_PARAMS);
7905         }
7906
7907         bt_dev_dbg(hdev, "param_count %u", param_count);
7908
7909         hci_dev_lock(hdev);
7910
7911         hci_conn_params_clear_disabled(hdev);
7912
7913         for (i = 0; i < param_count; i++) {
7914                 struct mgmt_conn_param *param = &cp->params[i];
7915                 struct hci_conn_params *hci_param;
7916                 u16 min, max, latency, timeout;
7917                 u8 addr_type;
7918
7919                 bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7920                            param->addr.type);
7921
7922                 if (param->addr.type == BDADDR_LE_PUBLIC) {
7923                         addr_type = ADDR_LE_DEV_PUBLIC;
7924                 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7925                         addr_type = ADDR_LE_DEV_RANDOM;
7926                 } else {
7927                         bt_dev_err(hdev, "ignoring invalid connection parameters");
7928                         continue;
7929                 }
7930
7931                 min = le16_to_cpu(param->min_interval);
7932                 max = le16_to_cpu(param->max_interval);
7933                 latency = le16_to_cpu(param->latency);
7934                 timeout = le16_to_cpu(param->timeout);
7935
7936                 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7937                            min, max, latency, timeout);
7938
7939                 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7940                         bt_dev_err(hdev, "ignoring invalid connection parameters");
7941                         continue;
7942                 }
7943
7944                 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7945                                                 addr_type);
7946                 if (!hci_param) {
7947                         bt_dev_err(hdev, "failed to add connection parameters");
7948                         continue;
7949                 }
7950
7951                 hci_param->conn_min_interval = min;
7952                 hci_param->conn_max_interval = max;
7953                 hci_param->conn_latency = latency;
7954                 hci_param->supervision_timeout = timeout;
7955         }
7956
7957         hci_dev_unlock(hdev);
7958
7959         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7960                                  NULL, 0);
7961 }
7962
7963 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7964                                void *data, u16 len)
7965 {
7966         struct mgmt_cp_set_external_config *cp = data;
7967         bool changed;
7968         int err;
7969
7970         bt_dev_dbg(hdev, "sock %p", sk);
7971
7972         if (hdev_is_powered(hdev))
7973                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7974                                        MGMT_STATUS_REJECTED);
7975
7976         if (cp->config != 0x00 && cp->config != 0x01)
7977                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7978                                          MGMT_STATUS_INVALID_PARAMS);
7979
7980         if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7981                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7982                                        MGMT_STATUS_NOT_SUPPORTED);
7983
7984         hci_dev_lock(hdev);
7985
7986         if (cp->config)
7987                 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7988         else
7989                 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7990
7991         err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7992         if (err < 0)
7993                 goto unlock;
7994
7995         if (!changed)
7996                 goto unlock;
7997
7998         err = new_options(hdev, sk);
7999
8000         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8001                 mgmt_index_removed(hdev);
8002
8003                 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8004                         hci_dev_set_flag(hdev, HCI_CONFIG);
8005                         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8006
8007                         queue_work(hdev->req_workqueue, &hdev->power_on);
8008                 } else {
8009                         set_bit(HCI_RAW, &hdev->flags);
8010                         mgmt_index_added(hdev);
8011                 }
8012         }
8013
8014 unlock:
8015         hci_dev_unlock(hdev);
8016         return err;
8017 }
8018
8019 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8020                               void *data, u16 len)
8021 {
8022         struct mgmt_cp_set_public_address *cp = data;
8023         bool changed;
8024         int err;
8025
8026         bt_dev_dbg(hdev, "sock %p", sk);
8027
8028         if (hdev_is_powered(hdev))
8029                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8030                                        MGMT_STATUS_REJECTED);
8031
8032         if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8033                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8034                                        MGMT_STATUS_INVALID_PARAMS);
8035
8036         if (!hdev->set_bdaddr)
8037                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8038                                        MGMT_STATUS_NOT_SUPPORTED);
8039
8040         hci_dev_lock(hdev);
8041
8042         changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8043         bacpy(&hdev->public_addr, &cp->bdaddr);
8044
8045         err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8046         if (err < 0)
8047                 goto unlock;
8048
8049         if (!changed)
8050                 goto unlock;
8051
8052         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8053                 err = new_options(hdev, sk);
8054
8055         if (is_configured(hdev)) {
8056                 mgmt_index_removed(hdev);
8057
8058                 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8059
8060                 hci_dev_set_flag(hdev, HCI_CONFIG);
8061                 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8062
8063                 queue_work(hdev->req_workqueue, &hdev->power_on);
8064         }
8065
8066 unlock:
8067         hci_dev_unlock(hdev);
8068         return err;
8069 }
8070
8071 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8072                                              int err)
8073 {
8074         const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8075         struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8076         u8 *h192, *r192, *h256, *r256;
8077         struct mgmt_pending_cmd *cmd = data;
8078         struct sk_buff *skb = cmd->skb;
8079         u8 status = mgmt_status(err);
8080         u16 eir_len;
8081
8082         if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8083                 return;
8084
8085         if (!status) {
8086                 if (!skb)
8087                         status = MGMT_STATUS_FAILED;
8088                 else if (IS_ERR(skb))
8089                         status = mgmt_status(PTR_ERR(skb));
8090                 else
8091                         status = mgmt_status(skb->data[0]);
8092         }
8093
8094         bt_dev_dbg(hdev, "status %u", status);
8095
8096         mgmt_cp = cmd->param;
8097
8098         if (status) {
8099                 status = mgmt_status(status);
8100                 eir_len = 0;
8101
8102                 h192 = NULL;
8103                 r192 = NULL;
8104                 h256 = NULL;
8105                 r256 = NULL;
8106         } else if (!bredr_sc_enabled(hdev)) {
8107                 struct hci_rp_read_local_oob_data *rp;
8108
8109                 if (skb->len != sizeof(*rp)) {
8110                         status = MGMT_STATUS_FAILED;
8111                         eir_len = 0;
8112                 } else {
8113                         status = MGMT_STATUS_SUCCESS;
8114                         rp = (void *)skb->data;
8115
8116                         eir_len = 5 + 18 + 18;
8117                         h192 = rp->hash;
8118                         r192 = rp->rand;
8119                         h256 = NULL;
8120                         r256 = NULL;
8121                 }
8122         } else {
8123                 struct hci_rp_read_local_oob_ext_data *rp;
8124
8125                 if (skb->len != sizeof(*rp)) {
8126                         status = MGMT_STATUS_FAILED;
8127                         eir_len = 0;
8128                 } else {
8129                         status = MGMT_STATUS_SUCCESS;
8130                         rp = (void *)skb->data;
8131
8132                         if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8133                                 eir_len = 5 + 18 + 18;
8134                                 h192 = NULL;
8135                                 r192 = NULL;
8136                         } else {
8137                                 eir_len = 5 + 18 + 18 + 18 + 18;
8138                                 h192 = rp->hash192;
8139                                 r192 = rp->rand192;
8140                         }
8141
8142                         h256 = rp->hash256;
8143                         r256 = rp->rand256;
8144                 }
8145         }
8146
8147         mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8148         if (!mgmt_rp)
8149                 goto done;
8150
8151         if (eir_len == 0)
8152                 goto send_rsp;
8153
8154         eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8155                                   hdev->dev_class, 3);
8156
8157         if (h192 && r192) {
8158                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8159                                           EIR_SSP_HASH_C192, h192, 16);
8160                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8161                                           EIR_SSP_RAND_R192, r192, 16);
8162         }
8163
8164         if (h256 && r256) {
8165                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8166                                           EIR_SSP_HASH_C256, h256, 16);
8167                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8168                                           EIR_SSP_RAND_R256, r256, 16);
8169         }
8170
8171 send_rsp:
8172         mgmt_rp->type = mgmt_cp->type;
8173         mgmt_rp->eir_len = cpu_to_le16(eir_len);
8174
8175         err = mgmt_cmd_complete(cmd->sk, hdev->id,
8176                                 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8177                                 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8178         if (err < 0 || status)
8179                 goto done;
8180
8181         hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8182
8183         err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8184                                  mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8185                                  HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8186 done:
8187         if (skb && !IS_ERR(skb))
8188                 kfree_skb(skb);
8189
8190         kfree(mgmt_rp);
8191         mgmt_pending_remove(cmd);
8192 }
8193
8194 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8195                                   struct mgmt_cp_read_local_oob_ext_data *cp)
8196 {
8197         struct mgmt_pending_cmd *cmd;
8198         int err;
8199
8200         cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8201                                cp, sizeof(*cp));
8202         if (!cmd)
8203                 return -ENOMEM;
8204
8205         err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8206                                  read_local_oob_ext_data_complete);
8207
8208         if (err < 0) {
8209                 mgmt_pending_remove(cmd);
8210                 return err;
8211         }
8212
8213         return 0;
8214 }
8215
8216 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8217                                    void *data, u16 data_len)
8218 {
8219         struct mgmt_cp_read_local_oob_ext_data *cp = data;
8220         struct mgmt_rp_read_local_oob_ext_data *rp;
8221         size_t rp_len;
8222         u16 eir_len;
8223         u8 status, flags, role, addr[7], hash[16], rand[16];
8224         int err;
8225
8226         bt_dev_dbg(hdev, "sock %p", sk);
8227
8228         if (hdev_is_powered(hdev)) {
8229                 switch (cp->type) {
8230                 case BIT(BDADDR_BREDR):
8231                         status = mgmt_bredr_support(hdev);
8232                         if (status)
8233                                 eir_len = 0;
8234                         else
8235                                 eir_len = 5;
8236                         break;
8237                 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8238                         status = mgmt_le_support(hdev);
8239                         if (status)
8240                                 eir_len = 0;
8241                         else
8242                                 eir_len = 9 + 3 + 18 + 18 + 3;
8243                         break;
8244                 default:
8245                         status = MGMT_STATUS_INVALID_PARAMS;
8246                         eir_len = 0;
8247                         break;
8248                 }
8249         } else {
8250                 status = MGMT_STATUS_NOT_POWERED;
8251                 eir_len = 0;
8252         }
8253
8254         rp_len = sizeof(*rp) + eir_len;
8255         rp = kmalloc(rp_len, GFP_ATOMIC);
8256         if (!rp)
8257                 return -ENOMEM;
8258
8259         if (!status && !lmp_ssp_capable(hdev)) {
8260                 status = MGMT_STATUS_NOT_SUPPORTED;
8261                 eir_len = 0;
8262         }
8263
8264         if (status)
8265                 goto complete;
8266
8267         hci_dev_lock(hdev);
8268
8269         eir_len = 0;
8270         switch (cp->type) {
8271         case BIT(BDADDR_BREDR):
8272                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8273                         err = read_local_ssp_oob_req(hdev, sk, cp);
8274                         hci_dev_unlock(hdev);
8275                         if (!err)
8276                                 goto done;
8277
8278                         status = MGMT_STATUS_FAILED;
8279                         goto complete;
8280                 } else {
8281                         eir_len = eir_append_data(rp->eir, eir_len,
8282                                                   EIR_CLASS_OF_DEV,
8283                                                   hdev->dev_class, 3);
8284                 }
8285                 break;
8286         case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8287                 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8288                     smp_generate_oob(hdev, hash, rand) < 0) {
8289                         hci_dev_unlock(hdev);
8290                         status = MGMT_STATUS_FAILED;
8291                         goto complete;
8292                 }
8293
8294                 /* This should return the active RPA, but since the RPA
8295                  * is only programmed on demand, it is really hard to fill
8296                  * this in at the moment. For now disallow retrieving
8297                  * local out-of-band data when privacy is in use.
8298                  *
8299                  * Returning the identity address will not help here since
8300                  * pairing happens before the identity resolving key is
8301                  * known and thus the connection establishment happens
8302                  * based on the RPA and not the identity address.
8303                  */
8304                 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8305                         hci_dev_unlock(hdev);
8306                         status = MGMT_STATUS_REJECTED;
8307                         goto complete;
8308                 }
8309
8310                 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8311                    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8312                    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8313                     bacmp(&hdev->static_addr, BDADDR_ANY))) {
8314                         memcpy(addr, &hdev->static_addr, 6);
8315                         addr[6] = 0x01;
8316                 } else {
8317                         memcpy(addr, &hdev->bdaddr, 6);
8318                         addr[6] = 0x00;
8319                 }
8320
8321                 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8322                                           addr, sizeof(addr));
8323
8324                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8325                         role = 0x02;
8326                 else
8327                         role = 0x01;
8328
8329                 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8330                                           &role, sizeof(role));
8331
8332                 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8333                         eir_len = eir_append_data(rp->eir, eir_len,
8334                                                   EIR_LE_SC_CONFIRM,
8335                                                   hash, sizeof(hash));
8336
8337                         eir_len = eir_append_data(rp->eir, eir_len,
8338                                                   EIR_LE_SC_RANDOM,
8339                                                   rand, sizeof(rand));
8340                 }
8341
8342                 flags = mgmt_get_adv_discov_flags(hdev);
8343
8344                 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8345                         flags |= LE_AD_NO_BREDR;
8346
8347                 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8348                                           &flags, sizeof(flags));
8349                 break;
8350         }
8351
8352         hci_dev_unlock(hdev);
8353
8354         hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8355
8356         status = MGMT_STATUS_SUCCESS;
8357
8358 complete:
8359         rp->type = cp->type;
8360         rp->eir_len = cpu_to_le16(eir_len);
8361
8362         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8363                                 status, rp, sizeof(*rp) + eir_len);
8364         if (err < 0 || status)
8365                 goto done;
8366
8367         err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8368                                  rp, sizeof(*rp) + eir_len,
8369                                  HCI_MGMT_OOB_DATA_EVENTS, sk);
8370
8371 done:
8372         kfree(rp);
8373
8374         return err;
8375 }
8376
8377 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8378 {
8379         u32 flags = 0;
8380
8381         flags |= MGMT_ADV_FLAG_CONNECTABLE;
8382         flags |= MGMT_ADV_FLAG_DISCOV;
8383         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8384         flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8385         flags |= MGMT_ADV_FLAG_APPEARANCE;
8386         flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8387         flags |= MGMT_ADV_PARAM_DURATION;
8388         flags |= MGMT_ADV_PARAM_TIMEOUT;
8389         flags |= MGMT_ADV_PARAM_INTERVALS;
8390         flags |= MGMT_ADV_PARAM_TX_POWER;
8391         flags |= MGMT_ADV_PARAM_SCAN_RSP;
8392
8393         /* In extended adv TX_POWER returned from Set Adv Param
8394          * will be always valid.
8395          */
8396         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8397                 flags |= MGMT_ADV_FLAG_TX_POWER;
8398
8399         if (ext_adv_capable(hdev)) {
8400                 flags |= MGMT_ADV_FLAG_SEC_1M;
8401                 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8402                 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8403
8404                 if (le_2m_capable(hdev))
8405                         flags |= MGMT_ADV_FLAG_SEC_2M;
8406
8407                 if (le_coded_capable(hdev))
8408                         flags |= MGMT_ADV_FLAG_SEC_CODED;
8409         }
8410
8411         return flags;
8412 }
8413
8414 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8415                              void *data, u16 data_len)
8416 {
8417         struct mgmt_rp_read_adv_features *rp;
8418         size_t rp_len;
8419         int err;
8420         struct adv_info *adv_instance;
8421         u32 supported_flags;
8422         u8 *instance;
8423
8424         bt_dev_dbg(hdev, "sock %p", sk);
8425
8426         if (!lmp_le_capable(hdev))
8427                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8428                                        MGMT_STATUS_REJECTED);
8429
8430         hci_dev_lock(hdev);
8431
8432         rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8433         rp = kmalloc(rp_len, GFP_ATOMIC);
8434         if (!rp) {
8435                 hci_dev_unlock(hdev);
8436                 return -ENOMEM;
8437         }
8438
8439         supported_flags = get_supported_adv_flags(hdev);
8440
8441         rp->supported_flags = cpu_to_le32(supported_flags);
8442         rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
8443         rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
8444         rp->max_instances = hdev->le_num_of_adv_sets;
8445         rp->num_instances = hdev->adv_instance_cnt;
8446
8447         instance = rp->instance;
8448         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8449                 /* Only instances 1-le_num_of_adv_sets are externally visible */
8450                 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8451                         *instance = adv_instance->instance;
8452                         instance++;
8453                 } else {
8454                         rp->num_instances--;
8455                         rp_len--;
8456                 }
8457         }
8458
8459         hci_dev_unlock(hdev);
8460
8461         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8462                                 MGMT_STATUS_SUCCESS, rp, rp_len);
8463
8464         kfree(rp);
8465
8466         return err;
8467 }
8468
8469 static u8 calculate_name_len(struct hci_dev *hdev)
8470 {
8471         u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
8472
8473         return eir_append_local_name(hdev, buf, 0);
8474 }
8475
8476 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8477                            bool is_adv_data)
8478 {
8479         u8 max_len = HCI_MAX_AD_LENGTH;
8480
8481         if (is_adv_data) {
8482                 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8483                                  MGMT_ADV_FLAG_LIMITED_DISCOV |
8484                                  MGMT_ADV_FLAG_MANAGED_FLAGS))
8485                         max_len -= 3;
8486
8487                 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8488                         max_len -= 3;
8489         } else {
8490                 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8491                         max_len -= calculate_name_len(hdev);
8492
8493                 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8494                         max_len -= 4;
8495         }
8496
8497         return max_len;
8498 }
8499
8500 static bool flags_managed(u32 adv_flags)
8501 {
8502         return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8503                             MGMT_ADV_FLAG_LIMITED_DISCOV |
8504                             MGMT_ADV_FLAG_MANAGED_FLAGS);
8505 }
8506
8507 static bool tx_power_managed(u32 adv_flags)
8508 {
8509         return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8510 }
8511
8512 static bool name_managed(u32 adv_flags)
8513 {
8514         return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8515 }
8516
8517 static bool appearance_managed(u32 adv_flags)
8518 {
8519         return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8520 }
8521
8522 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8523                               u8 len, bool is_adv_data)
8524 {
8525         int i, cur_len;
8526         u8 max_len;
8527
8528         max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8529
8530         if (len > max_len)
8531                 return false;
8532
8533         /* Make sure that the data is correctly formatted. */
8534         for (i = 0; i < len; i += (cur_len + 1)) {
8535                 cur_len = data[i];
8536
8537                 if (!cur_len)
8538                         continue;
8539
8540                 if (data[i + 1] == EIR_FLAGS &&
8541                     (!is_adv_data || flags_managed(adv_flags)))
8542                         return false;
8543
8544                 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8545                         return false;
8546
8547                 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8548                         return false;
8549
8550                 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8551                         return false;
8552
8553                 if (data[i + 1] == EIR_APPEARANCE &&
8554                     appearance_managed(adv_flags))
8555                         return false;
8556
8557                 /* If the current field length would exceed the total data
8558                  * length, then it's invalid.
8559                  */
8560                 if (i + cur_len >= len)
8561                         return false;
8562         }
8563
8564         return true;
8565 }
8566
8567 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8568 {
8569         u32 supported_flags, phy_flags;
8570
8571         /* The current implementation only supports a subset of the specified
8572          * flags. Also need to check mutual exclusiveness of sec flags.
8573          */
8574         supported_flags = get_supported_adv_flags(hdev);
8575         phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8576         if (adv_flags & ~supported_flags ||
8577             ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8578                 return false;
8579
8580         return true;
8581 }
8582
8583 static bool adv_busy(struct hci_dev *hdev)
8584 {
8585         return pending_find(MGMT_OP_SET_LE, hdev);
8586 }
8587
8588 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8589                              int err)
8590 {
8591         struct adv_info *adv, *n;
8592
8593         bt_dev_dbg(hdev, "err %d", err);
8594
8595         hci_dev_lock(hdev);
8596
8597         list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8598                 u8 instance;
8599
8600                 if (!adv->pending)
8601                         continue;
8602
8603                 if (!err) {
8604                         adv->pending = false;
8605                         continue;
8606                 }
8607
8608                 instance = adv->instance;
8609
8610                 if (hdev->cur_adv_instance == instance)
8611                         cancel_adv_timeout(hdev);
8612
8613                 hci_remove_adv_instance(hdev, instance);
8614                 mgmt_advertising_removed(sk, hdev, instance);
8615         }
8616
8617         hci_dev_unlock(hdev);
8618 }
8619
8620 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8621 {
8622         struct mgmt_pending_cmd *cmd = data;
8623         struct mgmt_cp_add_advertising *cp = cmd->param;
8624         struct mgmt_rp_add_advertising rp;
8625
8626         memset(&rp, 0, sizeof(rp));
8627
8628         rp.instance = cp->instance;
8629
8630         if (err)
8631                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8632                                 mgmt_status(err));
8633         else
8634                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8635                                   mgmt_status(err), &rp, sizeof(rp));
8636
8637         add_adv_complete(hdev, cmd->sk, cp->instance, err);
8638
8639         mgmt_pending_free(cmd);
8640 }
8641
8642 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8643 {
8644         struct mgmt_pending_cmd *cmd = data;
8645         struct mgmt_cp_add_advertising *cp = cmd->param;
8646
8647         return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8648 }
8649
8650 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8651                            void *data, u16 data_len)
8652 {
8653         struct mgmt_cp_add_advertising *cp = data;
8654         struct mgmt_rp_add_advertising rp;
8655         u32 flags;
8656         u8 status;
8657         u16 timeout, duration;
8658         unsigned int prev_instance_cnt;
8659         u8 schedule_instance = 0;
8660         struct adv_info *adv, *next_instance;
8661         int err;
8662         struct mgmt_pending_cmd *cmd;
8663
8664         bt_dev_dbg(hdev, "sock %p", sk);
8665
8666         status = mgmt_le_support(hdev);
8667         if (status)
8668                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8669                                        status);
8670
8671         if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8672                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8673                                        MGMT_STATUS_INVALID_PARAMS);
8674
8675         if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8676                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8677                                        MGMT_STATUS_INVALID_PARAMS);
8678
8679         flags = __le32_to_cpu(cp->flags);
8680         timeout = __le16_to_cpu(cp->timeout);
8681         duration = __le16_to_cpu(cp->duration);
8682
8683         if (!requested_adv_flags_are_valid(hdev, flags))
8684                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8685                                        MGMT_STATUS_INVALID_PARAMS);
8686
8687         hci_dev_lock(hdev);
8688
8689         if (timeout && !hdev_is_powered(hdev)) {
8690                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8691                                       MGMT_STATUS_REJECTED);
8692                 goto unlock;
8693         }
8694
8695         if (adv_busy(hdev)) {
8696                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8697                                       MGMT_STATUS_BUSY);
8698                 goto unlock;
8699         }
8700
8701         if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8702             !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8703                                cp->scan_rsp_len, false)) {
8704                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8705                                       MGMT_STATUS_INVALID_PARAMS);
8706                 goto unlock;
8707         }
8708
8709         prev_instance_cnt = hdev->adv_instance_cnt;
8710
8711         adv = hci_add_adv_instance(hdev, cp->instance, flags,
8712                                    cp->adv_data_len, cp->data,
8713                                    cp->scan_rsp_len,
8714                                    cp->data + cp->adv_data_len,
8715                                    timeout, duration,
8716                                    HCI_ADV_TX_POWER_NO_PREFERENCE,
8717                                    hdev->le_adv_min_interval,
8718                                    hdev->le_adv_max_interval, 0);
8719         if (IS_ERR(adv)) {
8720                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8721                                       MGMT_STATUS_FAILED);
8722                 goto unlock;
8723         }
8724
8725         /* Only trigger an advertising added event if a new instance was
8726          * actually added.
8727          */
8728         if (hdev->adv_instance_cnt > prev_instance_cnt)
8729                 mgmt_advertising_added(sk, hdev, cp->instance);
8730
8731         if (hdev->cur_adv_instance == cp->instance) {
8732                 /* If the currently advertised instance is being changed then
8733                  * cancel the current advertising and schedule the next
8734                  * instance. If there is only one instance then the overridden
8735                  * advertising data will be visible right away.
8736                  */
8737                 cancel_adv_timeout(hdev);
8738
8739                 next_instance = hci_get_next_instance(hdev, cp->instance);
8740                 if (next_instance)
8741                         schedule_instance = next_instance->instance;
8742         } else if (!hdev->adv_instance_timeout) {
8743                 /* Immediately advertise the new instance if no other
8744                  * instance is currently being advertised.
8745                  */
8746                 schedule_instance = cp->instance;
8747         }
8748
8749         /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8750          * there is no instance to be advertised then we have no HCI
8751          * communication to make. Simply return.
8752          */
8753         if (!hdev_is_powered(hdev) ||
8754             hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8755             !schedule_instance) {
8756                 rp.instance = cp->instance;
8757                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8758                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8759                 goto unlock;
8760         }
8761
8762         /* We're good to go, update advertising data, parameters, and start
8763          * advertising.
8764          */
8765         cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8766                                data_len);
8767         if (!cmd) {
8768                 err = -ENOMEM;
8769                 goto unlock;
8770         }
8771
8772         cp->instance = schedule_instance;
8773
8774         err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8775                                  add_advertising_complete);
8776         if (err < 0)
8777                 mgmt_pending_free(cmd);
8778
8779 unlock:
8780         hci_dev_unlock(hdev);
8781
8782         return err;
8783 }
8784
8785 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8786                                         int err)
8787 {
8788         struct mgmt_pending_cmd *cmd = data;
8789         struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8790         struct mgmt_rp_add_ext_adv_params rp;
8791         struct adv_info *adv;
8792         u32 flags;
8793
8794         BT_DBG("%s", hdev->name);
8795
8796         hci_dev_lock(hdev);
8797
8798         adv = hci_find_adv_instance(hdev, cp->instance);
8799         if (!adv)
8800                 goto unlock;
8801
8802         rp.instance = cp->instance;
8803         rp.tx_power = adv->tx_power;
8804
8805         /* While we're at it, inform userspace of the available space for this
8806          * advertisement, given the flags that will be used.
8807          */
8808         flags = __le32_to_cpu(cp->flags);
8809         rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8810         rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8811
8812         if (err) {
8813                 /* If this advertisement was previously advertising and we
8814                  * failed to update it, we signal that it has been removed and
8815                  * delete its structure
8816                  */
8817                 if (!adv->pending)
8818                         mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8819
8820                 hci_remove_adv_instance(hdev, cp->instance);
8821
8822                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8823                                 mgmt_status(err));
8824         } else {
8825                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8826                                   mgmt_status(err), &rp, sizeof(rp));
8827         }
8828
8829 unlock:
8830         if (cmd)
8831                 mgmt_pending_free(cmd);
8832
8833         hci_dev_unlock(hdev);
8834 }
8835
8836 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8837 {
8838         struct mgmt_pending_cmd *cmd = data;
8839         struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8840
8841         return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8842 }
8843
8844 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8845                               void *data, u16 data_len)
8846 {
8847         struct mgmt_cp_add_ext_adv_params *cp = data;
8848         struct mgmt_rp_add_ext_adv_params rp;
8849         struct mgmt_pending_cmd *cmd = NULL;
8850         struct adv_info *adv;
8851         u32 flags, min_interval, max_interval;
8852         u16 timeout, duration;
8853         u8 status;
8854         s8 tx_power;
8855         int err;
8856
8857         BT_DBG("%s", hdev->name);
8858
8859         status = mgmt_le_support(hdev);
8860         if (status)
8861                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8862                                        status);
8863
8864         if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8865                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8866                                        MGMT_STATUS_INVALID_PARAMS);
8867
8868         /* The purpose of breaking add_advertising into two separate MGMT calls
8869          * for params and data is to allow more parameters to be added to this
8870          * structure in the future. For this reason, we verify that we have the
8871          * bare minimum structure we know of when the interface was defined. Any
8872          * extra parameters we don't know about will be ignored in this request.
8873          */
8874         if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8875                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8876                                        MGMT_STATUS_INVALID_PARAMS);
8877
8878         flags = __le32_to_cpu(cp->flags);
8879
8880         if (!requested_adv_flags_are_valid(hdev, flags))
8881                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8882                                        MGMT_STATUS_INVALID_PARAMS);
8883
8884         hci_dev_lock(hdev);
8885
8886         /* In new interface, we require that we are powered to register */
8887         if (!hdev_is_powered(hdev)) {
8888                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8889                                       MGMT_STATUS_REJECTED);
8890                 goto unlock;
8891         }
8892
8893         if (adv_busy(hdev)) {
8894                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8895                                       MGMT_STATUS_BUSY);
8896                 goto unlock;
8897         }
8898
8899         /* Parse defined parameters from request, use defaults otherwise */
8900         timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8901                   __le16_to_cpu(cp->timeout) : 0;
8902
8903         duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8904                    __le16_to_cpu(cp->duration) :
8905                    hdev->def_multi_adv_rotation_duration;
8906
8907         min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8908                        __le32_to_cpu(cp->min_interval) :
8909                        hdev->le_adv_min_interval;
8910
8911         max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8912                        __le32_to_cpu(cp->max_interval) :
8913                        hdev->le_adv_max_interval;
8914
8915         tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8916                    cp->tx_power :
8917                    HCI_ADV_TX_POWER_NO_PREFERENCE;
8918
8919         /* Create advertising instance with no advertising or response data */
8920         adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8921                                    timeout, duration, tx_power, min_interval,
8922                                    max_interval, 0);
8923
8924         if (IS_ERR(adv)) {
8925                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8926                                       MGMT_STATUS_FAILED);
8927                 goto unlock;
8928         }
8929
8930         /* Submit request for advertising params if ext adv available */
8931         if (ext_adv_capable(hdev)) {
8932                 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8933                                        data, data_len);
8934                 if (!cmd) {
8935                         err = -ENOMEM;
8936                         hci_remove_adv_instance(hdev, cp->instance);
8937                         goto unlock;
8938                 }
8939
8940                 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8941                                          add_ext_adv_params_complete);
8942                 if (err < 0)
8943                         mgmt_pending_free(cmd);
8944         } else {
8945                 rp.instance = cp->instance;
8946                 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8947                 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8948                 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8949                 err = mgmt_cmd_complete(sk, hdev->id,
8950                                         MGMT_OP_ADD_EXT_ADV_PARAMS,
8951                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8952         }
8953
8954 unlock:
8955         hci_dev_unlock(hdev);
8956
8957         return err;
8958 }
8959
8960 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8961 {
8962         struct mgmt_pending_cmd *cmd = data;
8963         struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8964         struct mgmt_rp_add_advertising rp;
8965
8966         add_adv_complete(hdev, cmd->sk, cp->instance, err);
8967
8968         memset(&rp, 0, sizeof(rp));
8969
8970         rp.instance = cp->instance;
8971
8972         if (err)
8973                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8974                                 mgmt_status(err));
8975         else
8976                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8977                                   mgmt_status(err), &rp, sizeof(rp));
8978
8979         mgmt_pending_free(cmd);
8980 }
8981
8982 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8983 {
8984         struct mgmt_pending_cmd *cmd = data;
8985         struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8986         int err;
8987
8988         if (ext_adv_capable(hdev)) {
8989                 err = hci_update_adv_data_sync(hdev, cp->instance);
8990                 if (err)
8991                         return err;
8992
8993                 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8994                 if (err)
8995                         return err;
8996
8997                 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8998         }
8999
9000         return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
9001 }
9002
9003 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9004                             u16 data_len)
9005 {
9006         struct mgmt_cp_add_ext_adv_data *cp = data;
9007         struct mgmt_rp_add_ext_adv_data rp;
9008         u8 schedule_instance = 0;
9009         struct adv_info *next_instance;
9010         struct adv_info *adv_instance;
9011         int err = 0;
9012         struct mgmt_pending_cmd *cmd;
9013
9014         BT_DBG("%s", hdev->name);
9015
9016         hci_dev_lock(hdev);
9017
9018         adv_instance = hci_find_adv_instance(hdev, cp->instance);
9019
9020         if (!adv_instance) {
9021                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9022                                       MGMT_STATUS_INVALID_PARAMS);
9023                 goto unlock;
9024         }
9025
9026         /* In new interface, we require that we are powered to register */
9027         if (!hdev_is_powered(hdev)) {
9028                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9029                                       MGMT_STATUS_REJECTED);
9030                 goto clear_new_instance;
9031         }
9032
9033         if (adv_busy(hdev)) {
9034                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9035                                       MGMT_STATUS_BUSY);
9036                 goto clear_new_instance;
9037         }
9038
9039         /* Validate new data */
9040         if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9041                                cp->adv_data_len, true) ||
9042             !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9043                                cp->adv_data_len, cp->scan_rsp_len, false)) {
9044                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9045                                       MGMT_STATUS_INVALID_PARAMS);
9046                 goto clear_new_instance;
9047         }
9048
9049         /* Set the data in the advertising instance */
9050         hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9051                                   cp->data, cp->scan_rsp_len,
9052                                   cp->data + cp->adv_data_len);
9053
9054         /* If using software rotation, determine next instance to use */
9055         if (hdev->cur_adv_instance == cp->instance) {
9056                 /* If the currently advertised instance is being changed
9057                  * then cancel the current advertising and schedule the
9058                  * next instance. If there is only one instance then the
9059                  * overridden advertising data will be visible right
9060                  * away
9061                  */
9062                 cancel_adv_timeout(hdev);
9063
9064                 next_instance = hci_get_next_instance(hdev, cp->instance);
9065                 if (next_instance)
9066                         schedule_instance = next_instance->instance;
9067         } else if (!hdev->adv_instance_timeout) {
9068                 /* Immediately advertise the new instance if no other
9069                  * instance is currently being advertised.
9070                  */
9071                 schedule_instance = cp->instance;
9072         }
9073
9074         /* If the HCI_ADVERTISING flag is set or there is no instance to
9075          * be advertised then we have no HCI communication to make.
9076          * Simply return.
9077          */
9078         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9079                 if (adv_instance->pending) {
9080                         mgmt_advertising_added(sk, hdev, cp->instance);
9081                         adv_instance->pending = false;
9082                 }
9083                 rp.instance = cp->instance;
9084                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9085                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9086                 goto unlock;
9087         }
9088
9089         cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9090                                data_len);
9091         if (!cmd) {
9092                 err = -ENOMEM;
9093                 goto clear_new_instance;
9094         }
9095
9096         err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9097                                  add_ext_adv_data_complete);
9098         if (err < 0) {
9099                 mgmt_pending_free(cmd);
9100                 goto clear_new_instance;
9101         }
9102
9103         /* We were successful in updating data, so trigger advertising_added
9104          * event if this is an instance that wasn't previously advertising. If
9105          * a failure occurs in the requests we initiated, we will remove the
9106          * instance again in add_advertising_complete
9107          */
9108         if (adv_instance->pending)
9109                 mgmt_advertising_added(sk, hdev, cp->instance);
9110
9111         goto unlock;
9112
9113 clear_new_instance:
9114         hci_remove_adv_instance(hdev, cp->instance);
9115
9116 unlock:
9117         hci_dev_unlock(hdev);
9118
9119         return err;
9120 }
9121
9122 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9123                                         int err)
9124 {
9125         struct mgmt_pending_cmd *cmd = data;
9126         struct mgmt_cp_remove_advertising *cp = cmd->param;
9127         struct mgmt_rp_remove_advertising rp;
9128
9129         bt_dev_dbg(hdev, "err %d", err);
9130
9131         memset(&rp, 0, sizeof(rp));
9132         rp.instance = cp->instance;
9133
9134         if (err)
9135                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9136                                 mgmt_status(err));
9137         else
9138                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9139                                   MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9140
9141         mgmt_pending_free(cmd);
9142 }
9143
9144 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9145 {
9146         struct mgmt_pending_cmd *cmd = data;
9147         struct mgmt_cp_remove_advertising *cp = cmd->param;
9148         int err;
9149
9150         err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9151         if (err)
9152                 return err;
9153
9154         if (list_empty(&hdev->adv_instances))
9155                 err = hci_disable_advertising_sync(hdev);
9156
9157         return err;
9158 }
9159
9160 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9161                               void *data, u16 data_len)
9162 {
9163         struct mgmt_cp_remove_advertising *cp = data;
9164         struct mgmt_pending_cmd *cmd;
9165         int err;
9166
9167         bt_dev_dbg(hdev, "sock %p", sk);
9168
9169         hci_dev_lock(hdev);
9170
9171         if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9172                 err = mgmt_cmd_status(sk, hdev->id,
9173                                       MGMT_OP_REMOVE_ADVERTISING,
9174                                       MGMT_STATUS_INVALID_PARAMS);
9175                 goto unlock;
9176         }
9177
9178         if (pending_find(MGMT_OP_SET_LE, hdev)) {
9179                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9180                                       MGMT_STATUS_BUSY);
9181                 goto unlock;
9182         }
9183
9184         if (list_empty(&hdev->adv_instances)) {
9185                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9186                                       MGMT_STATUS_INVALID_PARAMS);
9187                 goto unlock;
9188         }
9189
9190         cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9191                                data_len);
9192         if (!cmd) {
9193                 err = -ENOMEM;
9194                 goto unlock;
9195         }
9196
9197         err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9198                                  remove_advertising_complete);
9199         if (err < 0)
9200                 mgmt_pending_free(cmd);
9201
9202 unlock:
9203         hci_dev_unlock(hdev);
9204
9205         return err;
9206 }
9207
9208 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9209                              void *data, u16 data_len)
9210 {
9211         struct mgmt_cp_get_adv_size_info *cp = data;
9212         struct mgmt_rp_get_adv_size_info rp;
9213         u32 flags, supported_flags;
9214
9215         bt_dev_dbg(hdev, "sock %p", sk);
9216
9217         if (!lmp_le_capable(hdev))
9218                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9219                                        MGMT_STATUS_REJECTED);
9220
9221         if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9222                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9223                                        MGMT_STATUS_INVALID_PARAMS);
9224
9225         flags = __le32_to_cpu(cp->flags);
9226
9227         /* The current implementation only supports a subset of the specified
9228          * flags.
9229          */
9230         supported_flags = get_supported_adv_flags(hdev);
9231         if (flags & ~supported_flags)
9232                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9233                                        MGMT_STATUS_INVALID_PARAMS);
9234
9235         rp.instance = cp->instance;
9236         rp.flags = cp->flags;
9237         rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9238         rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9239
9240         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9241                                  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9242 }
9243
9244 static const struct hci_mgmt_handler mgmt_handlers[] = {
9245         { NULL }, /* 0x0000 (no command) */
9246         { read_version,            MGMT_READ_VERSION_SIZE,
9247                                                 HCI_MGMT_NO_HDEV |
9248                                                 HCI_MGMT_UNTRUSTED },
9249         { read_commands,           MGMT_READ_COMMANDS_SIZE,
9250                                                 HCI_MGMT_NO_HDEV |
9251                                                 HCI_MGMT_UNTRUSTED },
9252         { read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
9253                                                 HCI_MGMT_NO_HDEV |
9254                                                 HCI_MGMT_UNTRUSTED },
9255         { read_controller_info,    MGMT_READ_INFO_SIZE,
9256                                                 HCI_MGMT_UNTRUSTED },
9257         { set_powered,             MGMT_SETTING_SIZE },
9258         { set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
9259         { set_connectable,         MGMT_SETTING_SIZE },
9260         { set_fast_connectable,    MGMT_SETTING_SIZE },
9261         { set_bondable,            MGMT_SETTING_SIZE },
9262         { set_link_security,       MGMT_SETTING_SIZE },
9263         { set_ssp,                 MGMT_SETTING_SIZE },
9264         { set_hs,                  MGMT_SETTING_SIZE },
9265         { set_le,                  MGMT_SETTING_SIZE },
9266         { set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
9267         { set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
9268         { add_uuid,                MGMT_ADD_UUID_SIZE },
9269         { remove_uuid,             MGMT_REMOVE_UUID_SIZE },
9270         { load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
9271                                                 HCI_MGMT_VAR_LEN },
9272         { load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9273                                                 HCI_MGMT_VAR_LEN },
9274         { disconnect,              MGMT_DISCONNECT_SIZE },
9275         { get_connections,         MGMT_GET_CONNECTIONS_SIZE },
9276         { pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
9277         { pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
9278         { set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
9279         { pair_device,             MGMT_PAIR_DEVICE_SIZE },
9280         { cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
9281         { unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
9282         { user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
9283         { user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9284         { user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
9285         { user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9286         { read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
9287         { add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9288                                                 HCI_MGMT_VAR_LEN },
9289         { remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9290         { start_discovery,         MGMT_START_DISCOVERY_SIZE },
9291         { stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
9292         { confirm_name,            MGMT_CONFIRM_NAME_SIZE },
9293         { block_device,            MGMT_BLOCK_DEVICE_SIZE },
9294         { unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
9295         { set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
9296         { set_advertising,         MGMT_SETTING_SIZE },
9297         { set_bredr,               MGMT_SETTING_SIZE },
9298         { set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
9299         { set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
9300         { set_secure_conn,         MGMT_SETTING_SIZE },
9301         { set_debug_keys,          MGMT_SETTING_SIZE },
9302         { set_privacy,             MGMT_SET_PRIVACY_SIZE },
9303         { load_irks,               MGMT_LOAD_IRKS_SIZE,
9304                                                 HCI_MGMT_VAR_LEN },
9305         { get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
9306         { get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
9307         { add_device,              MGMT_ADD_DEVICE_SIZE },
9308         { remove_device,           MGMT_REMOVE_DEVICE_SIZE },
9309         { load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
9310                                                 HCI_MGMT_VAR_LEN },
9311         { read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9312                                                 HCI_MGMT_NO_HDEV |
9313                                                 HCI_MGMT_UNTRUSTED },
9314         { read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
9315                                                 HCI_MGMT_UNCONFIGURED |
9316                                                 HCI_MGMT_UNTRUSTED },
9317         { set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
9318                                                 HCI_MGMT_UNCONFIGURED },
9319         { set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
9320                                                 HCI_MGMT_UNCONFIGURED },
9321         { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9322                                                 HCI_MGMT_VAR_LEN },
9323         { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9324         { read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
9325                                                 HCI_MGMT_NO_HDEV |
9326                                                 HCI_MGMT_UNTRUSTED },
9327         { read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
9328         { add_advertising,         MGMT_ADD_ADVERTISING_SIZE,
9329                                                 HCI_MGMT_VAR_LEN },
9330         { remove_advertising,      MGMT_REMOVE_ADVERTISING_SIZE },
9331         { get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
9332         { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9333         { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9334                                                 HCI_MGMT_UNTRUSTED },
9335         { set_appearance,          MGMT_SET_APPEARANCE_SIZE },
9336         { get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
9337         { set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
9338         { set_blocked_keys,        MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9339                                                 HCI_MGMT_VAR_LEN },
9340         { set_wideband_speech,     MGMT_SETTING_SIZE },
9341         { read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
9342                                                 HCI_MGMT_UNTRUSTED },
9343         { read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
9344                                                 HCI_MGMT_UNTRUSTED |
9345                                                 HCI_MGMT_HDEV_OPTIONAL },
9346         { set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
9347                                                 HCI_MGMT_VAR_LEN |
9348                                                 HCI_MGMT_HDEV_OPTIONAL },
9349         { read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9350                                                 HCI_MGMT_UNTRUSTED },
9351         { set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9352                                                 HCI_MGMT_VAR_LEN },
9353         { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9354                                                 HCI_MGMT_UNTRUSTED },
9355         { set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9356                                                 HCI_MGMT_VAR_LEN },
9357         { get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
9358         { set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
9359         { read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9360         { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9361                                                 HCI_MGMT_VAR_LEN },
9362         { remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
9363         { add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9364                                                 HCI_MGMT_VAR_LEN },
9365         { add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
9366                                                 HCI_MGMT_VAR_LEN },
9367         { add_adv_patterns_monitor_rssi,
9368                                    MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9369                                                 HCI_MGMT_VAR_LEN },
9370         { set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
9371                                                 HCI_MGMT_VAR_LEN },
9372         { mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
9373         { mesh_send,               MGMT_MESH_SEND_SIZE,
9374                                                 HCI_MGMT_VAR_LEN },
9375         { mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
9376 };
9377
9378 void mgmt_index_added(struct hci_dev *hdev)
9379 {
9380         struct mgmt_ev_ext_index ev;
9381
9382         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9383                 return;
9384
9385         switch (hdev->dev_type) {
9386         case HCI_PRIMARY:
9387                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9388                         mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9389                                          NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9390                         ev.type = 0x01;
9391                 } else {
9392                         mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9393                                          HCI_MGMT_INDEX_EVENTS);
9394                         ev.type = 0x00;
9395                 }
9396                 break;
9397         case HCI_AMP:
9398                 ev.type = 0x02;
9399                 break;
9400         default:
9401                 return;
9402         }
9403
9404         ev.bus = hdev->bus;
9405
9406         mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9407                          HCI_MGMT_EXT_INDEX_EVENTS);
9408 }
9409
9410 void mgmt_index_removed(struct hci_dev *hdev)
9411 {
9412         struct mgmt_ev_ext_index ev;
9413         u8 status = MGMT_STATUS_INVALID_INDEX;
9414
9415         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9416                 return;
9417
9418         switch (hdev->dev_type) {
9419         case HCI_PRIMARY:
9420                 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9421
9422                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9423                         mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9424                                          NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9425                         ev.type = 0x01;
9426                 } else {
9427                         mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9428                                          HCI_MGMT_INDEX_EVENTS);
9429                         ev.type = 0x00;
9430                 }
9431                 break;
9432         case HCI_AMP:
9433                 ev.type = 0x02;
9434                 break;
9435         default:
9436                 return;
9437         }
9438
9439         ev.bus = hdev->bus;
9440
9441         mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9442                          HCI_MGMT_EXT_INDEX_EVENTS);
9443
9444         /* Cancel any remaining timed work */
9445         if (!hci_dev_test_flag(hdev, HCI_MGMT))
9446                 return;
9447         cancel_delayed_work_sync(&hdev->discov_off);
9448         cancel_delayed_work_sync(&hdev->service_cache);
9449         cancel_delayed_work_sync(&hdev->rpa_expired);
9450 }
9451
9452 void mgmt_power_on(struct hci_dev *hdev, int err)
9453 {
9454         struct cmd_lookup match = { NULL, hdev };
9455
9456         bt_dev_dbg(hdev, "err %d", err);
9457
9458         hci_dev_lock(hdev);
9459
9460         if (!err) {
9461                 restart_le_actions(hdev);
9462                 hci_update_passive_scan(hdev);
9463         }
9464
9465         mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9466
9467         new_settings(hdev, match.sk);
9468
9469         if (match.sk)
9470                 sock_put(match.sk);
9471
9472         hci_dev_unlock(hdev);
9473 }
9474
9475 void __mgmt_power_off(struct hci_dev *hdev)
9476 {
9477         struct cmd_lookup match = { NULL, hdev };
9478         u8 status, zero_cod[] = { 0, 0, 0 };
9479
9480         mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9481
9482         /* If the power off is because of hdev unregistration let
9483          * use the appropriate INVALID_INDEX status. Otherwise use
9484          * NOT_POWERED. We cover both scenarios here since later in
9485          * mgmt_index_removed() any hci_conn callbacks will have already
9486          * been triggered, potentially causing misleading DISCONNECTED
9487          * status responses.
9488          */
9489         if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9490                 status = MGMT_STATUS_INVALID_INDEX;
9491         else
9492                 status = MGMT_STATUS_NOT_POWERED;
9493
9494         mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9495
9496         if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9497                 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9498                                    zero_cod, sizeof(zero_cod),
9499                                    HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9500                 ext_info_changed(hdev, NULL);
9501         }
9502
9503         new_settings(hdev, match.sk);
9504
9505         if (match.sk)
9506                 sock_put(match.sk);
9507 }
9508
9509 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9510 {
9511         struct mgmt_pending_cmd *cmd;
9512         u8 status;
9513
9514         cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9515         if (!cmd)
9516                 return;
9517
9518         if (err == -ERFKILL)
9519                 status = MGMT_STATUS_RFKILLED;
9520         else
9521                 status = MGMT_STATUS_FAILED;
9522
9523         mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9524
9525         mgmt_pending_remove(cmd);
9526 }
9527
9528 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9529                        bool persistent)
9530 {
9531         struct mgmt_ev_new_link_key ev;
9532
9533         memset(&ev, 0, sizeof(ev));
9534
9535         ev.store_hint = persistent;
9536         bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9537         ev.key.addr.type = BDADDR_BREDR;
9538         ev.key.type = key->type;
9539         memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9540         ev.key.pin_len = key->pin_len;
9541
9542         mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9543 }
9544
9545 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9546 {
9547         switch (ltk->type) {
9548         case SMP_LTK:
9549         case SMP_LTK_RESPONDER:
9550                 if (ltk->authenticated)
9551                         return MGMT_LTK_AUTHENTICATED;
9552                 return MGMT_LTK_UNAUTHENTICATED;
9553         case SMP_LTK_P256:
9554                 if (ltk->authenticated)
9555                         return MGMT_LTK_P256_AUTH;
9556                 return MGMT_LTK_P256_UNAUTH;
9557         case SMP_LTK_P256_DEBUG:
9558                 return MGMT_LTK_P256_DEBUG;
9559         }
9560
9561         return MGMT_LTK_UNAUTHENTICATED;
9562 }
9563
9564 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9565 {
9566         struct mgmt_ev_new_long_term_key ev;
9567
9568         memset(&ev, 0, sizeof(ev));
9569
9570         /* Devices using resolvable or non-resolvable random addresses
9571          * without providing an identity resolving key don't require
9572          * to store long term keys. Their addresses will change the
9573          * next time around.
9574          *
9575          * Only when a remote device provides an identity address
9576          * make sure the long term key is stored. If the remote
9577          * identity is known, the long term keys are internally
9578          * mapped to the identity address. So allow static random
9579          * and public addresses here.
9580          */
9581         if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9582             (key->bdaddr.b[5] & 0xc0) != 0xc0)
9583                 ev.store_hint = 0x00;
9584         else
9585                 ev.store_hint = persistent;
9586
9587         bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9588         ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9589         ev.key.type = mgmt_ltk_type(key);
9590         ev.key.enc_size = key->enc_size;
9591         ev.key.ediv = key->ediv;
9592         ev.key.rand = key->rand;
9593
9594         if (key->type == SMP_LTK)
9595                 ev.key.initiator = 1;
9596
9597         /* Make sure we copy only the significant bytes based on the
9598          * encryption key size, and set the rest of the value to zeroes.
9599          */
9600         memcpy(ev.key.val, key->val, key->enc_size);
9601         memset(ev.key.val + key->enc_size, 0,
9602                sizeof(ev.key.val) - key->enc_size);
9603
9604         mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9605 }
9606
9607 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9608 {
9609         struct mgmt_ev_new_irk ev;
9610
9611         memset(&ev, 0, sizeof(ev));
9612
9613         ev.store_hint = persistent;
9614
9615         bacpy(&ev.rpa, &irk->rpa);
9616         bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9617         ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9618         memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9619
9620         mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9621 }
9622
9623 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9624                    bool persistent)
9625 {
9626         struct mgmt_ev_new_csrk ev;
9627
9628         memset(&ev, 0, sizeof(ev));
9629
9630         /* Devices using resolvable or non-resolvable random addresses
9631          * without providing an identity resolving key don't require
9632          * to store signature resolving keys. Their addresses will change
9633          * the next time around.
9634          *
9635          * Only when a remote device provides an identity address
9636          * make sure the signature resolving key is stored. So allow
9637          * static random and public addresses here.
9638          */
9639         if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9640             (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9641                 ev.store_hint = 0x00;
9642         else
9643                 ev.store_hint = persistent;
9644
9645         bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9646         ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9647         ev.key.type = csrk->type;
9648         memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9649
9650         mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9651 }
9652
9653 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9654                          u8 bdaddr_type, u8 store_hint, u16 min_interval,
9655                          u16 max_interval, u16 latency, u16 timeout)
9656 {
9657         struct mgmt_ev_new_conn_param ev;
9658
9659         if (!hci_is_identity_address(bdaddr, bdaddr_type))
9660                 return;
9661
9662         memset(&ev, 0, sizeof(ev));
9663         bacpy(&ev.addr.bdaddr, bdaddr);
9664         ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9665         ev.store_hint = store_hint;
9666         ev.min_interval = cpu_to_le16(min_interval);
9667         ev.max_interval = cpu_to_le16(max_interval);
9668         ev.latency = cpu_to_le16(latency);
9669         ev.timeout = cpu_to_le16(timeout);
9670
9671         mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9672 }
9673
9674 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9675                            u8 *name, u8 name_len)
9676 {
9677         struct sk_buff *skb;
9678         struct mgmt_ev_device_connected *ev;
9679         u16 eir_len = 0;
9680         u32 flags = 0;
9681
9682         /* allocate buff for LE or BR/EDR adv */
9683         if (conn->le_adv_data_len > 0)
9684                 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9685                                      sizeof(*ev) + conn->le_adv_data_len);
9686         else
9687                 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9688                                      sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9689                                      eir_precalc_len(sizeof(conn->dev_class)));
9690
9691         ev = skb_put(skb, sizeof(*ev));
9692         bacpy(&ev->addr.bdaddr, &conn->dst);
9693         ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9694
9695         if (conn->out)
9696                 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9697
9698         ev->flags = __cpu_to_le32(flags);
9699
9700         /* We must ensure that the EIR Data fields are ordered and
9701          * unique. Keep it simple for now and avoid the problem by not
9702          * adding any BR/EDR data to the LE adv.
9703          */
9704         if (conn->le_adv_data_len > 0) {
9705                 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9706                 eir_len = conn->le_adv_data_len;
9707         } else {
9708                 if (name)
9709                         eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9710
9711                 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9712                         eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9713                                                     conn->dev_class, sizeof(conn->dev_class));
9714         }
9715
9716         ev->eir_len = cpu_to_le16(eir_len);
9717
9718         mgmt_event_skb(skb, NULL);
9719 }
9720
9721 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9722 {
9723         struct sock **sk = data;
9724
9725         cmd->cmd_complete(cmd, 0);
9726
9727         *sk = cmd->sk;
9728         sock_hold(*sk);
9729
9730         mgmt_pending_remove(cmd);
9731 }
9732
9733 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9734 {
9735         struct hci_dev *hdev = data;
9736         struct mgmt_cp_unpair_device *cp = cmd->param;
9737
9738         device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9739
9740         cmd->cmd_complete(cmd, 0);
9741         mgmt_pending_remove(cmd);
9742 }
9743
9744 bool mgmt_powering_down(struct hci_dev *hdev)
9745 {
9746         struct mgmt_pending_cmd *cmd;
9747         struct mgmt_mode *cp;
9748
9749         cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9750         if (!cmd)
9751                 return false;
9752
9753         cp = cmd->param;
9754         if (!cp->val)
9755                 return true;
9756
9757         return false;
9758 }
9759
9760 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9761                               u8 link_type, u8 addr_type, u8 reason,
9762                               bool mgmt_connected)
9763 {
9764         struct mgmt_ev_device_disconnected ev;
9765         struct sock *sk = NULL;
9766
9767         /* The connection is still in hci_conn_hash so test for 1
9768          * instead of 0 to know if this is the last one.
9769          */
9770         if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9771                 cancel_delayed_work(&hdev->power_off);
9772                 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9773         }
9774
9775         if (!mgmt_connected)
9776                 return;
9777
9778         if (link_type != ACL_LINK && link_type != LE_LINK)
9779                 return;
9780
9781         mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9782
9783         bacpy(&ev.addr.bdaddr, bdaddr);
9784         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9785         ev.reason = reason;
9786
9787         /* Report disconnects due to suspend */
9788         if (hdev->suspended)
9789                 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9790
9791         mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9792
9793         if (sk)
9794                 sock_put(sk);
9795
9796         mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9797                              hdev);
9798 }
9799
9800 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9801                             u8 link_type, u8 addr_type, u8 status)
9802 {
9803         u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9804         struct mgmt_cp_disconnect *cp;
9805         struct mgmt_pending_cmd *cmd;
9806
9807         mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9808                              hdev);
9809
9810         cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9811         if (!cmd)
9812                 return;
9813
9814         cp = cmd->param;
9815
9816         if (bacmp(bdaddr, &cp->addr.bdaddr))
9817                 return;
9818
9819         if (cp->addr.type != bdaddr_type)
9820                 return;
9821
9822         cmd->cmd_complete(cmd, mgmt_status(status));
9823         mgmt_pending_remove(cmd);
9824 }
9825
9826 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9827                          u8 addr_type, u8 status)
9828 {
9829         struct mgmt_ev_connect_failed ev;
9830
9831         /* The connection is still in hci_conn_hash so test for 1
9832          * instead of 0 to know if this is the last one.
9833          */
9834         if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9835                 cancel_delayed_work(&hdev->power_off);
9836                 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9837         }
9838
9839         bacpy(&ev.addr.bdaddr, bdaddr);
9840         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9841         ev.status = mgmt_status(status);
9842
9843         mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9844 }
9845
9846 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9847 {
9848         struct mgmt_ev_pin_code_request ev;
9849
9850         bacpy(&ev.addr.bdaddr, bdaddr);
9851         ev.addr.type = BDADDR_BREDR;
9852         ev.secure = secure;
9853
9854         mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9855 }
9856
9857 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9858                                   u8 status)
9859 {
9860         struct mgmt_pending_cmd *cmd;
9861
9862         cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9863         if (!cmd)
9864                 return;
9865
9866         cmd->cmd_complete(cmd, mgmt_status(status));
9867         mgmt_pending_remove(cmd);
9868 }
9869
9870 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9871                                       u8 status)
9872 {
9873         struct mgmt_pending_cmd *cmd;
9874
9875         cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9876         if (!cmd)
9877                 return;
9878
9879         cmd->cmd_complete(cmd, mgmt_status(status));
9880         mgmt_pending_remove(cmd);
9881 }
9882
9883 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9884                               u8 link_type, u8 addr_type, u32 value,
9885                               u8 confirm_hint)
9886 {
9887         struct mgmt_ev_user_confirm_request ev;
9888
9889         bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9890
9891         bacpy(&ev.addr.bdaddr, bdaddr);
9892         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9893         ev.confirm_hint = confirm_hint;
9894         ev.value = cpu_to_le32(value);
9895
9896         return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9897                           NULL);
9898 }
9899
9900 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9901                               u8 link_type, u8 addr_type)
9902 {
9903         struct mgmt_ev_user_passkey_request ev;
9904
9905         bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9906
9907         bacpy(&ev.addr.bdaddr, bdaddr);
9908         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9909
9910         return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9911                           NULL);
9912 }
9913
9914 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9915                                       u8 link_type, u8 addr_type, u8 status,
9916                                       u8 opcode)
9917 {
9918         struct mgmt_pending_cmd *cmd;
9919
9920         cmd = pending_find(opcode, hdev);
9921         if (!cmd)
9922                 return -ENOENT;
9923
9924         cmd->cmd_complete(cmd, mgmt_status(status));
9925         mgmt_pending_remove(cmd);
9926
9927         return 0;
9928 }
9929
9930 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9931                                      u8 link_type, u8 addr_type, u8 status)
9932 {
9933         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9934                                           status, MGMT_OP_USER_CONFIRM_REPLY);
9935 }
9936
9937 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9938                                          u8 link_type, u8 addr_type, u8 status)
9939 {
9940         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9941                                           status,
9942                                           MGMT_OP_USER_CONFIRM_NEG_REPLY);
9943 }
9944
9945 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9946                                      u8 link_type, u8 addr_type, u8 status)
9947 {
9948         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9949                                           status, MGMT_OP_USER_PASSKEY_REPLY);
9950 }
9951
9952 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9953                                          u8 link_type, u8 addr_type, u8 status)
9954 {
9955         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9956                                           status,
9957                                           MGMT_OP_USER_PASSKEY_NEG_REPLY);
9958 }
9959
9960 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9961                              u8 link_type, u8 addr_type, u32 passkey,
9962                              u8 entered)
9963 {
9964         struct mgmt_ev_passkey_notify ev;
9965
9966         bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9967
9968         bacpy(&ev.addr.bdaddr, bdaddr);
9969         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9970         ev.passkey = __cpu_to_le32(passkey);
9971         ev.entered = entered;
9972
9973         return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9974 }
9975
9976 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9977 {
9978         struct mgmt_ev_auth_failed ev;
9979         struct mgmt_pending_cmd *cmd;
9980         u8 status = mgmt_status(hci_status);
9981
9982         bacpy(&ev.addr.bdaddr, &conn->dst);
9983         ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9984         ev.status = status;
9985
9986         cmd = find_pairing(conn);
9987
9988         mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9989                     cmd ? cmd->sk : NULL);
9990
9991         if (cmd) {
9992                 cmd->cmd_complete(cmd, status);
9993                 mgmt_pending_remove(cmd);
9994         }
9995 }
9996
9997 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9998 {
9999         struct cmd_lookup match = { NULL, hdev };
10000         bool changed;
10001
10002         if (status) {
10003                 u8 mgmt_err = mgmt_status(status);
10004                 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
10005                                      cmd_status_rsp, &mgmt_err);
10006                 return;
10007         }
10008
10009         if (test_bit(HCI_AUTH, &hdev->flags))
10010                 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10011         else
10012                 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10013
10014         mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
10015                              &match);
10016
10017         if (changed)
10018                 new_settings(hdev, match.sk);
10019
10020         if (match.sk)
10021                 sock_put(match.sk);
10022 }
10023
10024 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10025 {
10026         struct cmd_lookup *match = data;
10027
10028         if (match->sk == NULL) {
10029                 match->sk = cmd->sk;
10030                 sock_hold(match->sk);
10031         }
10032 }
10033
10034 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10035                                     u8 status)
10036 {
10037         struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10038
10039         mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
10040         mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
10041         mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
10042
10043         if (!status) {
10044                 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10045                                    3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10046                 ext_info_changed(hdev, NULL);
10047         }
10048
10049         if (match.sk)
10050                 sock_put(match.sk);
10051 }
10052
10053 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10054 {
10055         struct mgmt_cp_set_local_name ev;
10056         struct mgmt_pending_cmd *cmd;
10057
10058         if (status)
10059                 return;
10060
10061         memset(&ev, 0, sizeof(ev));
10062         memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10063         memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10064
10065         cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10066         if (!cmd) {
10067                 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10068
10069                 /* If this is a HCI command related to powering on the
10070                  * HCI dev don't send any mgmt signals.
10071                  */
10072                 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10073                         return;
10074         }
10075
10076         mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10077                            HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10078         ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10079 }
10080
10081 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10082 {
10083         int i;
10084
10085         for (i = 0; i < uuid_count; i++) {
10086                 if (!memcmp(uuid, uuids[i], 16))
10087                         return true;
10088         }
10089
10090         return false;
10091 }
10092
10093 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10094 {
10095         u16 parsed = 0;
10096
10097         while (parsed < eir_len) {
10098                 u8 field_len = eir[0];
10099                 u8 uuid[16];
10100                 int i;
10101
10102                 if (field_len == 0)
10103                         break;
10104
10105                 if (eir_len - parsed < field_len + 1)
10106                         break;
10107
10108                 switch (eir[1]) {
10109                 case EIR_UUID16_ALL:
10110                 case EIR_UUID16_SOME:
10111                         for (i = 0; i + 3 <= field_len; i += 2) {
10112                                 memcpy(uuid, bluetooth_base_uuid, 16);
10113                                 uuid[13] = eir[i + 3];
10114                                 uuid[12] = eir[i + 2];
10115                                 if (has_uuid(uuid, uuid_count, uuids))
10116                                         return true;
10117                         }
10118                         break;
10119                 case EIR_UUID32_ALL:
10120                 case EIR_UUID32_SOME:
10121                         for (i = 0; i + 5 <= field_len; i += 4) {
10122                                 memcpy(uuid, bluetooth_base_uuid, 16);
10123                                 uuid[15] = eir[i + 5];
10124                                 uuid[14] = eir[i + 4];
10125                                 uuid[13] = eir[i + 3];
10126                                 uuid[12] = eir[i + 2];
10127                                 if (has_uuid(uuid, uuid_count, uuids))
10128                                         return true;
10129                         }
10130                         break;
10131                 case EIR_UUID128_ALL:
10132                 case EIR_UUID128_SOME:
10133                         for (i = 0; i + 17 <= field_len; i += 16) {
10134                                 memcpy(uuid, eir + i + 2, 16);
10135                                 if (has_uuid(uuid, uuid_count, uuids))
10136                                         return true;
10137                         }
10138                         break;
10139                 }
10140
10141                 parsed += field_len + 1;
10142                 eir += field_len + 1;
10143         }
10144
10145         return false;
10146 }
10147
10148 static void restart_le_scan(struct hci_dev *hdev)
10149 {
10150         /* If controller is not scanning we are done. */
10151         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10152                 return;
10153
10154         if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10155                        hdev->discovery.scan_start +
10156                        hdev->discovery.scan_duration))
10157                 return;
10158
10159         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10160                            DISCOV_LE_RESTART_DELAY);
10161 }
10162
10163 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10164                             u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10165 {
10166         /* If a RSSI threshold has been specified, and
10167          * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10168          * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10169          * is set, let it through for further processing, as we might need to
10170          * restart the scan.
10171          *
10172          * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10173          * the results are also dropped.
10174          */
10175         if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10176             (rssi == HCI_RSSI_INVALID ||
10177             (rssi < hdev->discovery.rssi &&
10178              !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10179                 return  false;
10180
10181         if (hdev->discovery.uuid_count != 0) {
10182                 /* If a list of UUIDs is provided in filter, results with no
10183                  * matching UUID should be dropped.
10184                  */
10185                 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10186                                    hdev->discovery.uuids) &&
10187                     !eir_has_uuids(scan_rsp, scan_rsp_len,
10188                                    hdev->discovery.uuid_count,
10189                                    hdev->discovery.uuids))
10190                         return false;
10191         }
10192
10193         /* If duplicate filtering does not report RSSI changes, then restart
10194          * scanning to ensure updated result with updated RSSI values.
10195          */
10196         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10197                 restart_le_scan(hdev);
10198
10199                 /* Validate RSSI value against the RSSI threshold once more. */
10200                 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10201                     rssi < hdev->discovery.rssi)
10202                         return false;
10203         }
10204
10205         return true;
10206 }
10207
10208 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10209                                   bdaddr_t *bdaddr, u8 addr_type)
10210 {
10211         struct mgmt_ev_adv_monitor_device_lost ev;
10212
10213         ev.monitor_handle = cpu_to_le16(handle);
10214         bacpy(&ev.addr.bdaddr, bdaddr);
10215         ev.addr.type = addr_type;
10216
10217         mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10218                    NULL);
10219 }
10220
10221 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10222                                                struct sk_buff *skb,
10223                                                struct sock *skip_sk,
10224                                                u16 handle)
10225 {
10226         struct sk_buff *advmon_skb;
10227         size_t advmon_skb_len;
10228         __le16 *monitor_handle;
10229
10230         if (!skb)
10231                 return;
10232
10233         advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10234                           sizeof(struct mgmt_ev_device_found)) + skb->len;
10235         advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10236                                     advmon_skb_len);
10237         if (!advmon_skb)
10238                 return;
10239
10240         /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10241          * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10242          * store monitor_handle of the matched monitor.
10243          */
10244         monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10245         *monitor_handle = cpu_to_le16(handle);
10246         skb_put_data(advmon_skb, skb->data, skb->len);
10247
10248         mgmt_event_skb(advmon_skb, skip_sk);
10249 }
10250
10251 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10252                                           bdaddr_t *bdaddr, bool report_device,
10253                                           struct sk_buff *skb,
10254                                           struct sock *skip_sk)
10255 {
10256         struct monitored_device *dev, *tmp;
10257         bool matched = false;
10258         bool notified = false;
10259
10260         /* We have received the Advertisement Report because:
10261          * 1. the kernel has initiated active discovery
10262          * 2. if not, we have pend_le_reports > 0 in which case we are doing
10263          *    passive scanning
10264          * 3. if none of the above is true, we have one or more active
10265          *    Advertisement Monitor
10266          *
10267          * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10268          * and report ONLY one advertisement per device for the matched Monitor
10269          * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10270          *
10271          * For case 3, since we are not active scanning and all advertisements
10272          * received are due to a matched Advertisement Monitor, report all
10273          * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10274          */
10275         if (report_device && !hdev->advmon_pend_notify) {
10276                 mgmt_event_skb(skb, skip_sk);
10277                 return;
10278         }
10279
10280         hdev->advmon_pend_notify = false;
10281
10282         list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10283                 if (!bacmp(&dev->bdaddr, bdaddr)) {
10284                         matched = true;
10285
10286                         if (!dev->notified) {
10287                                 mgmt_send_adv_monitor_device_found(hdev, skb,
10288                                                                    skip_sk,
10289                                                                    dev->handle);
10290                                 notified = true;
10291                                 dev->notified = true;
10292                         }
10293                 }
10294
10295                 if (!dev->notified)
10296                         hdev->advmon_pend_notify = true;
10297         }
10298
10299         if (!report_device &&
10300             ((matched && !notified) || !msft_monitor_supported(hdev))) {
10301                 /* Handle 0 indicates that we are not active scanning and this
10302                  * is a subsequent advertisement report for an already matched
10303                  * Advertisement Monitor or the controller offloading support
10304                  * is not available.
10305                  */
10306                 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10307         }
10308
10309         if (report_device)
10310                 mgmt_event_skb(skb, skip_sk);
10311         else
10312                 kfree_skb(skb);
10313 }
10314
10315 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10316                               u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10317                               u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10318                               u64 instant)
10319 {
10320         struct sk_buff *skb;
10321         struct mgmt_ev_mesh_device_found *ev;
10322         int i, j;
10323
10324         if (!hdev->mesh_ad_types[0])
10325                 goto accepted;
10326
10327         /* Scan for requested AD types */
10328         if (eir_len > 0) {
10329                 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10330                         for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10331                                 if (!hdev->mesh_ad_types[j])
10332                                         break;
10333
10334                                 if (hdev->mesh_ad_types[j] == eir[i + 1])
10335                                         goto accepted;
10336                         }
10337                 }
10338         }
10339
10340         if (scan_rsp_len > 0) {
10341                 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10342                         for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10343                                 if (!hdev->mesh_ad_types[j])
10344                                         break;
10345
10346                                 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10347                                         goto accepted;
10348                         }
10349                 }
10350         }
10351
10352         return;
10353
10354 accepted:
10355         skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10356                              sizeof(*ev) + eir_len + scan_rsp_len);
10357         if (!skb)
10358                 return;
10359
10360         ev = skb_put(skb, sizeof(*ev));
10361
10362         bacpy(&ev->addr.bdaddr, bdaddr);
10363         ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10364         ev->rssi = rssi;
10365         ev->flags = cpu_to_le32(flags);
10366         ev->instant = cpu_to_le64(instant);
10367
10368         if (eir_len > 0)
10369                 /* Copy EIR or advertising data into event */
10370                 skb_put_data(skb, eir, eir_len);
10371
10372         if (scan_rsp_len > 0)
10373                 /* Append scan response data to event */
10374                 skb_put_data(skb, scan_rsp, scan_rsp_len);
10375
10376         ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10377
10378         mgmt_event_skb(skb, NULL);
10379 }
10380
10381 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10382                        u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10383                        u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10384                        u64 instant)
10385 {
10386         struct sk_buff *skb;
10387         struct mgmt_ev_device_found *ev;
10388         bool report_device = hci_discovery_active(hdev);
10389
10390         if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10391                 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10392                                   eir, eir_len, scan_rsp, scan_rsp_len,
10393                                   instant);
10394
10395         /* Don't send events for a non-kernel initiated discovery. With
10396          * LE one exception is if we have pend_le_reports > 0 in which
10397          * case we're doing passive scanning and want these events.
10398          */
10399         if (!hci_discovery_active(hdev)) {
10400                 if (link_type == ACL_LINK)
10401                         return;
10402                 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10403                         report_device = true;
10404                 else if (!hci_is_adv_monitoring(hdev))
10405                         return;
10406         }
10407
10408         if (hdev->discovery.result_filtering) {
10409                 /* We are using service discovery */
10410                 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10411                                      scan_rsp_len))
10412                         return;
10413         }
10414
10415         if (hdev->discovery.limited) {
10416                 /* Check for limited discoverable bit */
10417                 if (dev_class) {
10418                         if (!(dev_class[1] & 0x20))
10419                                 return;
10420                 } else {
10421                         u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10422                         if (!flags || !(flags[0] & LE_AD_LIMITED))
10423                                 return;
10424                 }
10425         }
10426
10427         /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10428         skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10429                              sizeof(*ev) + eir_len + scan_rsp_len + 5);
10430         if (!skb)
10431                 return;
10432
10433         ev = skb_put(skb, sizeof(*ev));
10434
10435         /* In case of device discovery with BR/EDR devices (pre 1.2), the
10436          * RSSI value was reported as 0 when not available. This behavior
10437          * is kept when using device discovery. This is required for full
10438          * backwards compatibility with the API.
10439          *
10440          * However when using service discovery, the value 127 will be
10441          * returned when the RSSI is not available.
10442          */
10443         if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10444             link_type == ACL_LINK)
10445                 rssi = 0;
10446
10447         bacpy(&ev->addr.bdaddr, bdaddr);
10448         ev->addr.type = link_to_bdaddr(link_type, addr_type);
10449         ev->rssi = rssi;
10450         ev->flags = cpu_to_le32(flags);
10451
10452         if (eir_len > 0)
10453                 /* Copy EIR or advertising data into event */
10454                 skb_put_data(skb, eir, eir_len);
10455
10456         if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10457                 u8 eir_cod[5];
10458
10459                 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10460                                            dev_class, 3);
10461                 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10462         }
10463
10464         if (scan_rsp_len > 0)
10465                 /* Append scan response data to event */
10466                 skb_put_data(skb, scan_rsp, scan_rsp_len);
10467
10468         ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10469
10470         mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10471 }
10472
10473 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10474                       u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10475 {
10476         struct sk_buff *skb;
10477         struct mgmt_ev_device_found *ev;
10478         u16 eir_len = 0;
10479         u32 flags = 0;
10480
10481         skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10482                              sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10483
10484         ev = skb_put(skb, sizeof(*ev));
10485         bacpy(&ev->addr.bdaddr, bdaddr);
10486         ev->addr.type = link_to_bdaddr(link_type, addr_type);
10487         ev->rssi = rssi;
10488
10489         if (name)
10490                 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10491         else
10492                 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10493
10494         ev->eir_len = cpu_to_le16(eir_len);
10495         ev->flags = cpu_to_le32(flags);
10496
10497         mgmt_event_skb(skb, NULL);
10498 }
10499
10500 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10501 {
10502         struct mgmt_ev_discovering ev;
10503
10504         bt_dev_dbg(hdev, "discovering %u", discovering);
10505
10506         memset(&ev, 0, sizeof(ev));
10507         ev.type = hdev->discovery.type;
10508         ev.discovering = discovering;
10509
10510         mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10511 }
10512
10513 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10514 {
10515         struct mgmt_ev_controller_suspend ev;
10516
10517         ev.suspend_state = state;
10518         mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10519 }
10520
10521 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10522                    u8 addr_type)
10523 {
10524         struct mgmt_ev_controller_resume ev;
10525
10526         ev.wake_reason = reason;
10527         if (bdaddr) {
10528                 bacpy(&ev.addr.bdaddr, bdaddr);
10529                 ev.addr.type = addr_type;
10530         } else {
10531                 memset(&ev.addr, 0, sizeof(ev.addr));
10532         }
10533
10534         mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10535 }
10536
10537 static struct hci_mgmt_chan chan = {
10538         .channel        = HCI_CHANNEL_CONTROL,
10539         .handler_count  = ARRAY_SIZE(mgmt_handlers),
10540         .handlers       = mgmt_handlers,
10541         .hdev_init      = mgmt_init_hdev,
10542 };
10543
10544 int mgmt_init(void)
10545 {
10546         return hci_mgmt_chan_register(&chan);
10547 }
10548
10549 void mgmt_exit(void)
10550 {
10551         hci_mgmt_chan_unregister(&chan);
10552 }
10553
10554 void mgmt_cleanup(struct sock *sk)
10555 {
10556         struct mgmt_mesh_tx *mesh_tx;
10557         struct hci_dev *hdev;
10558
10559         read_lock(&hci_dev_list_lock);
10560
10561         list_for_each_entry(hdev, &hci_dev_list, list) {
10562                 do {
10563                         mesh_tx = mgmt_mesh_next(hdev, sk);
10564
10565                         if (mesh_tx)
10566                                 mesh_send_complete(hdev, mesh_tx, true);
10567                 } while (mesh_tx);
10568         }
10569
10570         read_unlock(&hci_dev_list_lock);
10571 }