Bluetooth: Set filter policy for LE connection
[platform/kernel/linux-starfive.git] / net / bluetooth / mgmt.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 #ifdef TIZEN_BT
36 #include <net/bluetooth/mgmt_tizen.h>
37 #include <net/bluetooth/sco.h>
38 #endif
39
40 #include "hci_request.h"
41 #include "smp.h"
42 #include "mgmt_util.h"
43 #include "mgmt_config.h"
44 #include "msft.h"
45 #include "eir.h"
46 #include "aosp.h"
47
48 #define MGMT_VERSION    1
49 #define MGMT_REVISION   22
50
51 static const u16 mgmt_commands[] = {
52         MGMT_OP_READ_INDEX_LIST,
53         MGMT_OP_READ_INFO,
54         MGMT_OP_SET_POWERED,
55         MGMT_OP_SET_DISCOVERABLE,
56         MGMT_OP_SET_CONNECTABLE,
57         MGMT_OP_SET_FAST_CONNECTABLE,
58         MGMT_OP_SET_BONDABLE,
59         MGMT_OP_SET_LINK_SECURITY,
60         MGMT_OP_SET_SSP,
61         MGMT_OP_SET_HS,
62         MGMT_OP_SET_LE,
63         MGMT_OP_SET_DEV_CLASS,
64         MGMT_OP_SET_LOCAL_NAME,
65         MGMT_OP_ADD_UUID,
66         MGMT_OP_REMOVE_UUID,
67         MGMT_OP_LOAD_LINK_KEYS,
68         MGMT_OP_LOAD_LONG_TERM_KEYS,
69         MGMT_OP_DISCONNECT,
70         MGMT_OP_GET_CONNECTIONS,
71         MGMT_OP_PIN_CODE_REPLY,
72         MGMT_OP_PIN_CODE_NEG_REPLY,
73         MGMT_OP_SET_IO_CAPABILITY,
74         MGMT_OP_PAIR_DEVICE,
75         MGMT_OP_CANCEL_PAIR_DEVICE,
76         MGMT_OP_UNPAIR_DEVICE,
77         MGMT_OP_USER_CONFIRM_REPLY,
78         MGMT_OP_USER_CONFIRM_NEG_REPLY,
79         MGMT_OP_USER_PASSKEY_REPLY,
80         MGMT_OP_USER_PASSKEY_NEG_REPLY,
81         MGMT_OP_READ_LOCAL_OOB_DATA,
82         MGMT_OP_ADD_REMOTE_OOB_DATA,
83         MGMT_OP_REMOVE_REMOTE_OOB_DATA,
84         MGMT_OP_START_DISCOVERY,
85         MGMT_OP_STOP_DISCOVERY,
86         MGMT_OP_CONFIRM_NAME,
87         MGMT_OP_BLOCK_DEVICE,
88         MGMT_OP_UNBLOCK_DEVICE,
89         MGMT_OP_SET_DEVICE_ID,
90         MGMT_OP_SET_ADVERTISING,
91         MGMT_OP_SET_BREDR,
92         MGMT_OP_SET_STATIC_ADDRESS,
93         MGMT_OP_SET_SCAN_PARAMS,
94         MGMT_OP_SET_SECURE_CONN,
95         MGMT_OP_SET_DEBUG_KEYS,
96         MGMT_OP_SET_PRIVACY,
97         MGMT_OP_LOAD_IRKS,
98         MGMT_OP_GET_CONN_INFO,
99         MGMT_OP_GET_CLOCK_INFO,
100         MGMT_OP_ADD_DEVICE,
101         MGMT_OP_REMOVE_DEVICE,
102         MGMT_OP_LOAD_CONN_PARAM,
103         MGMT_OP_READ_UNCONF_INDEX_LIST,
104         MGMT_OP_READ_CONFIG_INFO,
105         MGMT_OP_SET_EXTERNAL_CONFIG,
106         MGMT_OP_SET_PUBLIC_ADDRESS,
107         MGMT_OP_START_SERVICE_DISCOVERY,
108         MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
109         MGMT_OP_READ_EXT_INDEX_LIST,
110         MGMT_OP_READ_ADV_FEATURES,
111         MGMT_OP_ADD_ADVERTISING,
112         MGMT_OP_REMOVE_ADVERTISING,
113         MGMT_OP_GET_ADV_SIZE_INFO,
114         MGMT_OP_START_LIMITED_DISCOVERY,
115         MGMT_OP_READ_EXT_INFO,
116         MGMT_OP_SET_APPEARANCE,
117         MGMT_OP_GET_PHY_CONFIGURATION,
118         MGMT_OP_SET_PHY_CONFIGURATION,
119         MGMT_OP_SET_BLOCKED_KEYS,
120         MGMT_OP_SET_WIDEBAND_SPEECH,
121         MGMT_OP_READ_CONTROLLER_CAP,
122         MGMT_OP_READ_EXP_FEATURES_INFO,
123         MGMT_OP_SET_EXP_FEATURE,
124         MGMT_OP_READ_DEF_SYSTEM_CONFIG,
125         MGMT_OP_SET_DEF_SYSTEM_CONFIG,
126         MGMT_OP_READ_DEF_RUNTIME_CONFIG,
127         MGMT_OP_SET_DEF_RUNTIME_CONFIG,
128         MGMT_OP_GET_DEVICE_FLAGS,
129         MGMT_OP_SET_DEVICE_FLAGS,
130         MGMT_OP_READ_ADV_MONITOR_FEATURES,
131         MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
132         MGMT_OP_REMOVE_ADV_MONITOR,
133         MGMT_OP_ADD_EXT_ADV_PARAMS,
134         MGMT_OP_ADD_EXT_ADV_DATA,
135         MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
136         MGMT_OP_SET_MESH_RECEIVER,
137         MGMT_OP_MESH_READ_FEATURES,
138         MGMT_OP_MESH_SEND,
139         MGMT_OP_MESH_SEND_CANCEL,
140 };
141
142 static const u16 mgmt_events[] = {
143         MGMT_EV_CONTROLLER_ERROR,
144         MGMT_EV_INDEX_ADDED,
145         MGMT_EV_INDEX_REMOVED,
146         MGMT_EV_NEW_SETTINGS,
147         MGMT_EV_CLASS_OF_DEV_CHANGED,
148         MGMT_EV_LOCAL_NAME_CHANGED,
149         MGMT_EV_NEW_LINK_KEY,
150         MGMT_EV_NEW_LONG_TERM_KEY,
151         MGMT_EV_DEVICE_CONNECTED,
152         MGMT_EV_DEVICE_DISCONNECTED,
153         MGMT_EV_CONNECT_FAILED,
154         MGMT_EV_PIN_CODE_REQUEST,
155         MGMT_EV_USER_CONFIRM_REQUEST,
156         MGMT_EV_USER_PASSKEY_REQUEST,
157         MGMT_EV_AUTH_FAILED,
158         MGMT_EV_DEVICE_FOUND,
159         MGMT_EV_DISCOVERING,
160         MGMT_EV_DEVICE_BLOCKED,
161         MGMT_EV_DEVICE_UNBLOCKED,
162         MGMT_EV_DEVICE_UNPAIRED,
163         MGMT_EV_PASSKEY_NOTIFY,
164         MGMT_EV_NEW_IRK,
165         MGMT_EV_NEW_CSRK,
166         MGMT_EV_DEVICE_ADDED,
167         MGMT_EV_DEVICE_REMOVED,
168         MGMT_EV_NEW_CONN_PARAM,
169         MGMT_EV_UNCONF_INDEX_ADDED,
170         MGMT_EV_UNCONF_INDEX_REMOVED,
171         MGMT_EV_NEW_CONFIG_OPTIONS,
172         MGMT_EV_EXT_INDEX_ADDED,
173         MGMT_EV_EXT_INDEX_REMOVED,
174         MGMT_EV_LOCAL_OOB_DATA_UPDATED,
175         MGMT_EV_ADVERTISING_ADDED,
176         MGMT_EV_ADVERTISING_REMOVED,
177         MGMT_EV_EXT_INFO_CHANGED,
178         MGMT_EV_PHY_CONFIGURATION_CHANGED,
179         MGMT_EV_EXP_FEATURE_CHANGED,
180         MGMT_EV_DEVICE_FLAGS_CHANGED,
181         MGMT_EV_ADV_MONITOR_ADDED,
182         MGMT_EV_ADV_MONITOR_REMOVED,
183         MGMT_EV_CONTROLLER_SUSPEND,
184         MGMT_EV_CONTROLLER_RESUME,
185         MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
186         MGMT_EV_ADV_MONITOR_DEVICE_LOST,
187 };
188
189 static const u16 mgmt_untrusted_commands[] = {
190         MGMT_OP_READ_INDEX_LIST,
191         MGMT_OP_READ_INFO,
192         MGMT_OP_READ_UNCONF_INDEX_LIST,
193         MGMT_OP_READ_CONFIG_INFO,
194         MGMT_OP_READ_EXT_INDEX_LIST,
195         MGMT_OP_READ_EXT_INFO,
196         MGMT_OP_READ_CONTROLLER_CAP,
197         MGMT_OP_READ_EXP_FEATURES_INFO,
198         MGMT_OP_READ_DEF_SYSTEM_CONFIG,
199         MGMT_OP_READ_DEF_RUNTIME_CONFIG,
200 };
201
202 static const u16 mgmt_untrusted_events[] = {
203         MGMT_EV_INDEX_ADDED,
204         MGMT_EV_INDEX_REMOVED,
205         MGMT_EV_NEW_SETTINGS,
206         MGMT_EV_CLASS_OF_DEV_CHANGED,
207         MGMT_EV_LOCAL_NAME_CHANGED,
208         MGMT_EV_UNCONF_INDEX_ADDED,
209         MGMT_EV_UNCONF_INDEX_REMOVED,
210         MGMT_EV_NEW_CONFIG_OPTIONS,
211         MGMT_EV_EXT_INDEX_ADDED,
212         MGMT_EV_EXT_INDEX_REMOVED,
213         MGMT_EV_EXT_INFO_CHANGED,
214         MGMT_EV_EXP_FEATURE_CHANGED,
215 };
216
217 #define CACHE_TIMEOUT   msecs_to_jiffies(2 * 1000)
218
219 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
220                  "\x00\x00\x00\x00\x00\x00\x00\x00"
221
222 /* HCI to MGMT error code conversion table */
223 static const u8 mgmt_status_table[] = {
224         MGMT_STATUS_SUCCESS,
225         MGMT_STATUS_UNKNOWN_COMMAND,    /* Unknown Command */
226         MGMT_STATUS_NOT_CONNECTED,      /* No Connection */
227         MGMT_STATUS_FAILED,             /* Hardware Failure */
228         MGMT_STATUS_CONNECT_FAILED,     /* Page Timeout */
229         MGMT_STATUS_AUTH_FAILED,        /* Authentication Failed */
230         MGMT_STATUS_AUTH_FAILED,        /* PIN or Key Missing */
231         MGMT_STATUS_NO_RESOURCES,       /* Memory Full */
232         MGMT_STATUS_TIMEOUT,            /* Connection Timeout */
233         MGMT_STATUS_NO_RESOURCES,       /* Max Number of Connections */
234         MGMT_STATUS_NO_RESOURCES,       /* Max Number of SCO Connections */
235         MGMT_STATUS_ALREADY_CONNECTED,  /* ACL Connection Exists */
236         MGMT_STATUS_BUSY,               /* Command Disallowed */
237         MGMT_STATUS_NO_RESOURCES,       /* Rejected Limited Resources */
238         MGMT_STATUS_REJECTED,           /* Rejected Security */
239         MGMT_STATUS_REJECTED,           /* Rejected Personal */
240         MGMT_STATUS_TIMEOUT,            /* Host Timeout */
241         MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported Feature */
242         MGMT_STATUS_INVALID_PARAMS,     /* Invalid Parameters */
243         MGMT_STATUS_DISCONNECTED,       /* OE User Ended Connection */
244         MGMT_STATUS_NO_RESOURCES,       /* OE Low Resources */
245         MGMT_STATUS_DISCONNECTED,       /* OE Power Off */
246         MGMT_STATUS_DISCONNECTED,       /* Connection Terminated */
247         MGMT_STATUS_BUSY,               /* Repeated Attempts */
248         MGMT_STATUS_REJECTED,           /* Pairing Not Allowed */
249         MGMT_STATUS_FAILED,             /* Unknown LMP PDU */
250         MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported Remote Feature */
251         MGMT_STATUS_REJECTED,           /* SCO Offset Rejected */
252         MGMT_STATUS_REJECTED,           /* SCO Interval Rejected */
253         MGMT_STATUS_REJECTED,           /* Air Mode Rejected */
254         MGMT_STATUS_INVALID_PARAMS,     /* Invalid LMP Parameters */
255         MGMT_STATUS_FAILED,             /* Unspecified Error */
256         MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported LMP Parameter Value */
257         MGMT_STATUS_FAILED,             /* Role Change Not Allowed */
258         MGMT_STATUS_TIMEOUT,            /* LMP Response Timeout */
259         MGMT_STATUS_FAILED,             /* LMP Error Transaction Collision */
260         MGMT_STATUS_FAILED,             /* LMP PDU Not Allowed */
261         MGMT_STATUS_REJECTED,           /* Encryption Mode Not Accepted */
262         MGMT_STATUS_FAILED,             /* Unit Link Key Used */
263         MGMT_STATUS_NOT_SUPPORTED,      /* QoS Not Supported */
264         MGMT_STATUS_TIMEOUT,            /* Instant Passed */
265         MGMT_STATUS_NOT_SUPPORTED,      /* Pairing Not Supported */
266         MGMT_STATUS_FAILED,             /* Transaction Collision */
267         MGMT_STATUS_FAILED,             /* Reserved for future use */
268         MGMT_STATUS_INVALID_PARAMS,     /* Unacceptable Parameter */
269         MGMT_STATUS_REJECTED,           /* QoS Rejected */
270         MGMT_STATUS_NOT_SUPPORTED,      /* Classification Not Supported */
271         MGMT_STATUS_REJECTED,           /* Insufficient Security */
272         MGMT_STATUS_INVALID_PARAMS,     /* Parameter Out Of Range */
273         MGMT_STATUS_FAILED,             /* Reserved for future use */
274         MGMT_STATUS_BUSY,               /* Role Switch Pending */
275         MGMT_STATUS_FAILED,             /* Reserved for future use */
276         MGMT_STATUS_FAILED,             /* Slot Violation */
277         MGMT_STATUS_FAILED,             /* Role Switch Failed */
278         MGMT_STATUS_INVALID_PARAMS,     /* EIR Too Large */
279         MGMT_STATUS_NOT_SUPPORTED,      /* Simple Pairing Not Supported */
280         MGMT_STATUS_BUSY,               /* Host Busy Pairing */
281         MGMT_STATUS_REJECTED,           /* Rejected, No Suitable Channel */
282         MGMT_STATUS_BUSY,               /* Controller Busy */
283         MGMT_STATUS_INVALID_PARAMS,     /* Unsuitable Connection Interval */
284         MGMT_STATUS_TIMEOUT,            /* Directed Advertising Timeout */
285         MGMT_STATUS_AUTH_FAILED,        /* Terminated Due to MIC Failure */
286         MGMT_STATUS_CONNECT_FAILED,     /* Connection Establishment Failed */
287         MGMT_STATUS_CONNECT_FAILED,     /* MAC Connection Failed */
288 };
289
290 static u8 mgmt_errno_status(int err)
291 {
292         switch (err) {
293         case 0:
294                 return MGMT_STATUS_SUCCESS;
295         case -EPERM:
296                 return MGMT_STATUS_REJECTED;
297         case -EINVAL:
298                 return MGMT_STATUS_INVALID_PARAMS;
299         case -EOPNOTSUPP:
300                 return MGMT_STATUS_NOT_SUPPORTED;
301         case -EBUSY:
302                 return MGMT_STATUS_BUSY;
303         case -ETIMEDOUT:
304                 return MGMT_STATUS_AUTH_FAILED;
305         case -ENOMEM:
306                 return MGMT_STATUS_NO_RESOURCES;
307         case -EISCONN:
308                 return MGMT_STATUS_ALREADY_CONNECTED;
309         case -ENOTCONN:
310                 return MGMT_STATUS_DISCONNECTED;
311         }
312
313         return MGMT_STATUS_FAILED;
314 }
315
316 static u8 mgmt_status(int err)
317 {
318         if (err < 0)
319                 return mgmt_errno_status(err);
320
321         if (err < ARRAY_SIZE(mgmt_status_table))
322                 return mgmt_status_table[err];
323
324         return MGMT_STATUS_FAILED;
325 }
326
327 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
328                             u16 len, int flag)
329 {
330         return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
331                                flag, NULL);
332 }
333
334 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
335                               u16 len, int flag, struct sock *skip_sk)
336 {
337         return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
338                                flag, skip_sk);
339 }
340
341 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
342                       struct sock *skip_sk)
343 {
344         return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
345                                HCI_SOCK_TRUSTED, skip_sk);
346 }
347
348 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
349 {
350         return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
351                                    skip_sk);
352 }
353
354 static u8 le_addr_type(u8 mgmt_addr_type)
355 {
356         if (mgmt_addr_type == BDADDR_LE_PUBLIC)
357                 return ADDR_LE_DEV_PUBLIC;
358         else
359                 return ADDR_LE_DEV_RANDOM;
360 }
361
362 void mgmt_fill_version_info(void *ver)
363 {
364         struct mgmt_rp_read_version *rp = ver;
365
366         rp->version = MGMT_VERSION;
367         rp->revision = cpu_to_le16(MGMT_REVISION);
368 }
369
370 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
371                         u16 data_len)
372 {
373         struct mgmt_rp_read_version rp;
374
375         bt_dev_dbg(hdev, "sock %p", sk);
376
377         mgmt_fill_version_info(&rp);
378
379         return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
380                                  &rp, sizeof(rp));
381 }
382
383 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
384                          u16 data_len)
385 {
386         struct mgmt_rp_read_commands *rp;
387         u16 num_commands, num_events;
388         size_t rp_size;
389         int i, err;
390
391         bt_dev_dbg(hdev, "sock %p", sk);
392
393         if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
394                 num_commands = ARRAY_SIZE(mgmt_commands);
395                 num_events = ARRAY_SIZE(mgmt_events);
396         } else {
397                 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
398                 num_events = ARRAY_SIZE(mgmt_untrusted_events);
399         }
400
401         rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
402
403         rp = kmalloc(rp_size, GFP_KERNEL);
404         if (!rp)
405                 return -ENOMEM;
406
407         rp->num_commands = cpu_to_le16(num_commands);
408         rp->num_events = cpu_to_le16(num_events);
409
410         if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
411                 __le16 *opcode = rp->opcodes;
412
413                 for (i = 0; i < num_commands; i++, opcode++)
414                         put_unaligned_le16(mgmt_commands[i], opcode);
415
416                 for (i = 0; i < num_events; i++, opcode++)
417                         put_unaligned_le16(mgmt_events[i], opcode);
418         } else {
419                 __le16 *opcode = rp->opcodes;
420
421                 for (i = 0; i < num_commands; i++, opcode++)
422                         put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
423
424                 for (i = 0; i < num_events; i++, opcode++)
425                         put_unaligned_le16(mgmt_untrusted_events[i], opcode);
426         }
427
428         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
429                                 rp, rp_size);
430         kfree(rp);
431
432         return err;
433 }
434
435 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
436                            u16 data_len)
437 {
438         struct mgmt_rp_read_index_list *rp;
439         struct hci_dev *d;
440         size_t rp_len;
441         u16 count;
442         int err;
443
444         bt_dev_dbg(hdev, "sock %p", sk);
445
446         read_lock(&hci_dev_list_lock);
447
448         count = 0;
449         list_for_each_entry(d, &hci_dev_list, list) {
450                 if (d->dev_type == HCI_PRIMARY &&
451                     !hci_dev_test_flag(d, HCI_UNCONFIGURED))
452                         count++;
453         }
454
455         rp_len = sizeof(*rp) + (2 * count);
456         rp = kmalloc(rp_len, GFP_ATOMIC);
457         if (!rp) {
458                 read_unlock(&hci_dev_list_lock);
459                 return -ENOMEM;
460         }
461
462         count = 0;
463         list_for_each_entry(d, &hci_dev_list, list) {
464                 if (hci_dev_test_flag(d, HCI_SETUP) ||
465                     hci_dev_test_flag(d, HCI_CONFIG) ||
466                     hci_dev_test_flag(d, HCI_USER_CHANNEL))
467                         continue;
468
469                 /* Devices marked as raw-only are neither configured
470                  * nor unconfigured controllers.
471                  */
472                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
473                         continue;
474
475                 if (d->dev_type == HCI_PRIMARY &&
476                     !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
477                         rp->index[count++] = cpu_to_le16(d->id);
478                         bt_dev_dbg(hdev, "Added hci%u", d->id);
479                 }
480         }
481
482         rp->num_controllers = cpu_to_le16(count);
483         rp_len = sizeof(*rp) + (2 * count);
484
485         read_unlock(&hci_dev_list_lock);
486
487         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
488                                 0, rp, rp_len);
489
490         kfree(rp);
491
492         return err;
493 }
494
495 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
496                                   void *data, u16 data_len)
497 {
498         struct mgmt_rp_read_unconf_index_list *rp;
499         struct hci_dev *d;
500         size_t rp_len;
501         u16 count;
502         int err;
503
504         bt_dev_dbg(hdev, "sock %p", sk);
505
506         read_lock(&hci_dev_list_lock);
507
508         count = 0;
509         list_for_each_entry(d, &hci_dev_list, list) {
510                 if (d->dev_type == HCI_PRIMARY &&
511                     hci_dev_test_flag(d, HCI_UNCONFIGURED))
512                         count++;
513         }
514
515         rp_len = sizeof(*rp) + (2 * count);
516         rp = kmalloc(rp_len, GFP_ATOMIC);
517         if (!rp) {
518                 read_unlock(&hci_dev_list_lock);
519                 return -ENOMEM;
520         }
521
522         count = 0;
523         list_for_each_entry(d, &hci_dev_list, list) {
524                 if (hci_dev_test_flag(d, HCI_SETUP) ||
525                     hci_dev_test_flag(d, HCI_CONFIG) ||
526                     hci_dev_test_flag(d, HCI_USER_CHANNEL))
527                         continue;
528
529                 /* Devices marked as raw-only are neither configured
530                  * nor unconfigured controllers.
531                  */
532                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
533                         continue;
534
535                 if (d->dev_type == HCI_PRIMARY &&
536                     hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
537                         rp->index[count++] = cpu_to_le16(d->id);
538                         bt_dev_dbg(hdev, "Added hci%u", d->id);
539                 }
540         }
541
542         rp->num_controllers = cpu_to_le16(count);
543         rp_len = sizeof(*rp) + (2 * count);
544
545         read_unlock(&hci_dev_list_lock);
546
547         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
548                                 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
549
550         kfree(rp);
551
552         return err;
553 }
554
555 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
556                                void *data, u16 data_len)
557 {
558         struct mgmt_rp_read_ext_index_list *rp;
559         struct hci_dev *d;
560         u16 count;
561         int err;
562
563         bt_dev_dbg(hdev, "sock %p", sk);
564
565         read_lock(&hci_dev_list_lock);
566
567         count = 0;
568         list_for_each_entry(d, &hci_dev_list, list) {
569                 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
570                         count++;
571         }
572
573         rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
574         if (!rp) {
575                 read_unlock(&hci_dev_list_lock);
576                 return -ENOMEM;
577         }
578
579         count = 0;
580         list_for_each_entry(d, &hci_dev_list, list) {
581                 if (hci_dev_test_flag(d, HCI_SETUP) ||
582                     hci_dev_test_flag(d, HCI_CONFIG) ||
583                     hci_dev_test_flag(d, HCI_USER_CHANNEL))
584                         continue;
585
586                 /* Devices marked as raw-only are neither configured
587                  * nor unconfigured controllers.
588                  */
589                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
590                         continue;
591
592                 if (d->dev_type == HCI_PRIMARY) {
593                         if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
594                                 rp->entry[count].type = 0x01;
595                         else
596                                 rp->entry[count].type = 0x00;
597                 } else if (d->dev_type == HCI_AMP) {
598                         rp->entry[count].type = 0x02;
599                 } else {
600                         continue;
601                 }
602
603                 rp->entry[count].bus = d->bus;
604                 rp->entry[count++].index = cpu_to_le16(d->id);
605                 bt_dev_dbg(hdev, "Added hci%u", d->id);
606         }
607
608         rp->num_controllers = cpu_to_le16(count);
609
610         read_unlock(&hci_dev_list_lock);
611
612         /* If this command is called at least once, then all the
613          * default index and unconfigured index events are disabled
614          * and from now on only extended index events are used.
615          */
616         hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
617         hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
618         hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
619
620         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
621                                 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
622                                 struct_size(rp, entry, count));
623
624         kfree(rp);
625
626         return err;
627 }
628
629 static bool is_configured(struct hci_dev *hdev)
630 {
631         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632             !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633                 return false;
634
635         if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636              test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637             !bacmp(&hdev->public_addr, BDADDR_ANY))
638                 return false;
639
640         return true;
641 }
642
643 static __le32 get_missing_options(struct hci_dev *hdev)
644 {
645         u32 options = 0;
646
647         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
648             !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
649                 options |= MGMT_OPTION_EXTERNAL_CONFIG;
650
651         if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
652              test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
653             !bacmp(&hdev->public_addr, BDADDR_ANY))
654                 options |= MGMT_OPTION_PUBLIC_ADDRESS;
655
656         return cpu_to_le32(options);
657 }
658
659 static int new_options(struct hci_dev *hdev, struct sock *skip)
660 {
661         __le32 options = get_missing_options(hdev);
662
663         return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
664                                   sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
665 }
666
667 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
668 {
669         __le32 options = get_missing_options(hdev);
670
671         return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
672                                  sizeof(options));
673 }
674
675 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
676                             void *data, u16 data_len)
677 {
678         struct mgmt_rp_read_config_info rp;
679         u32 options = 0;
680
681         bt_dev_dbg(hdev, "sock %p", sk);
682
683         hci_dev_lock(hdev);
684
685         memset(&rp, 0, sizeof(rp));
686         rp.manufacturer = cpu_to_le16(hdev->manufacturer);
687
688         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
689                 options |= MGMT_OPTION_EXTERNAL_CONFIG;
690
691         if (hdev->set_bdaddr)
692                 options |= MGMT_OPTION_PUBLIC_ADDRESS;
693
694         rp.supported_options = cpu_to_le32(options);
695         rp.missing_options = get_missing_options(hdev);
696
697         hci_dev_unlock(hdev);
698
699         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
700                                  &rp, sizeof(rp));
701 }
702
703 static u32 get_supported_phys(struct hci_dev *hdev)
704 {
705         u32 supported_phys = 0;
706
707         if (lmp_bredr_capable(hdev)) {
708                 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
709
710                 if (hdev->features[0][0] & LMP_3SLOT)
711                         supported_phys |= MGMT_PHY_BR_1M_3SLOT;
712
713                 if (hdev->features[0][0] & LMP_5SLOT)
714                         supported_phys |= MGMT_PHY_BR_1M_5SLOT;
715
716                 if (lmp_edr_2m_capable(hdev)) {
717                         supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
718
719                         if (lmp_edr_3slot_capable(hdev))
720                                 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
721
722                         if (lmp_edr_5slot_capable(hdev))
723                                 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
724
725                         if (lmp_edr_3m_capable(hdev)) {
726                                 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
727
728                                 if (lmp_edr_3slot_capable(hdev))
729                                         supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
730
731                                 if (lmp_edr_5slot_capable(hdev))
732                                         supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
733                         }
734                 }
735         }
736
737         if (lmp_le_capable(hdev)) {
738                 supported_phys |= MGMT_PHY_LE_1M_TX;
739                 supported_phys |= MGMT_PHY_LE_1M_RX;
740
741                 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
742                         supported_phys |= MGMT_PHY_LE_2M_TX;
743                         supported_phys |= MGMT_PHY_LE_2M_RX;
744                 }
745
746                 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
747                         supported_phys |= MGMT_PHY_LE_CODED_TX;
748                         supported_phys |= MGMT_PHY_LE_CODED_RX;
749                 }
750         }
751
752         return supported_phys;
753 }
754
755 static u32 get_selected_phys(struct hci_dev *hdev)
756 {
757         u32 selected_phys = 0;
758
759         if (lmp_bredr_capable(hdev)) {
760                 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
761
762                 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
763                         selected_phys |= MGMT_PHY_BR_1M_3SLOT;
764
765                 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
766                         selected_phys |= MGMT_PHY_BR_1M_5SLOT;
767
768                 if (lmp_edr_2m_capable(hdev)) {
769                         if (!(hdev->pkt_type & HCI_2DH1))
770                                 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
771
772                         if (lmp_edr_3slot_capable(hdev) &&
773                             !(hdev->pkt_type & HCI_2DH3))
774                                 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
775
776                         if (lmp_edr_5slot_capable(hdev) &&
777                             !(hdev->pkt_type & HCI_2DH5))
778                                 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
779
780                         if (lmp_edr_3m_capable(hdev)) {
781                                 if (!(hdev->pkt_type & HCI_3DH1))
782                                         selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
783
784                                 if (lmp_edr_3slot_capable(hdev) &&
785                                     !(hdev->pkt_type & HCI_3DH3))
786                                         selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
787
788                                 if (lmp_edr_5slot_capable(hdev) &&
789                                     !(hdev->pkt_type & HCI_3DH5))
790                                         selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
791                         }
792                 }
793         }
794
795         if (lmp_le_capable(hdev)) {
796                 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
797                         selected_phys |= MGMT_PHY_LE_1M_TX;
798
799                 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
800                         selected_phys |= MGMT_PHY_LE_1M_RX;
801
802                 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
803                         selected_phys |= MGMT_PHY_LE_2M_TX;
804
805                 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
806                         selected_phys |= MGMT_PHY_LE_2M_RX;
807
808                 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
809                         selected_phys |= MGMT_PHY_LE_CODED_TX;
810
811                 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
812                         selected_phys |= MGMT_PHY_LE_CODED_RX;
813         }
814
815         return selected_phys;
816 }
817
818 static u32 get_configurable_phys(struct hci_dev *hdev)
819 {
820         return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
821                 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
822 }
823
824 static u32 get_supported_settings(struct hci_dev *hdev)
825 {
826         u32 settings = 0;
827
828         settings |= MGMT_SETTING_POWERED;
829         settings |= MGMT_SETTING_BONDABLE;
830         settings |= MGMT_SETTING_DEBUG_KEYS;
831         settings |= MGMT_SETTING_CONNECTABLE;
832         settings |= MGMT_SETTING_DISCOVERABLE;
833
834         if (lmp_bredr_capable(hdev)) {
835                 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
836                         settings |= MGMT_SETTING_FAST_CONNECTABLE;
837                 settings |= MGMT_SETTING_BREDR;
838                 settings |= MGMT_SETTING_LINK_SECURITY;
839
840                 if (lmp_ssp_capable(hdev)) {
841                         settings |= MGMT_SETTING_SSP;
842                         if (IS_ENABLED(CONFIG_BT_HS))
843                                 settings |= MGMT_SETTING_HS;
844                 }
845
846                 if (lmp_sc_capable(hdev))
847                         settings |= MGMT_SETTING_SECURE_CONN;
848
849                 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
850                              &hdev->quirks))
851                         settings |= MGMT_SETTING_WIDEBAND_SPEECH;
852         }
853
854         if (lmp_le_capable(hdev)) {
855                 settings |= MGMT_SETTING_LE;
856                 settings |= MGMT_SETTING_SECURE_CONN;
857                 settings |= MGMT_SETTING_PRIVACY;
858                 settings |= MGMT_SETTING_STATIC_ADDRESS;
859                 settings |= MGMT_SETTING_ADVERTISING;
860         }
861
862         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
863             hdev->set_bdaddr)
864                 settings |= MGMT_SETTING_CONFIGURATION;
865
866         if (cis_central_capable(hdev))
867                 settings |= MGMT_SETTING_CIS_CENTRAL;
868
869         if (cis_peripheral_capable(hdev))
870                 settings |= MGMT_SETTING_CIS_PERIPHERAL;
871
872         settings |= MGMT_SETTING_PHY_CONFIGURATION;
873
874         return settings;
875 }
876
877 static u32 get_current_settings(struct hci_dev *hdev)
878 {
879         u32 settings = 0;
880
881         if (hdev_is_powered(hdev))
882                 settings |= MGMT_SETTING_POWERED;
883
884         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
885                 settings |= MGMT_SETTING_CONNECTABLE;
886
887         if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
888                 settings |= MGMT_SETTING_FAST_CONNECTABLE;
889
890         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
891                 settings |= MGMT_SETTING_DISCOVERABLE;
892
893         if (hci_dev_test_flag(hdev, HCI_BONDABLE))
894                 settings |= MGMT_SETTING_BONDABLE;
895
896         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
897                 settings |= MGMT_SETTING_BREDR;
898
899         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
900                 settings |= MGMT_SETTING_LE;
901
902         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
903                 settings |= MGMT_SETTING_LINK_SECURITY;
904
905         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
906                 settings |= MGMT_SETTING_SSP;
907
908         if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
909                 settings |= MGMT_SETTING_HS;
910
911         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
912                 settings |= MGMT_SETTING_ADVERTISING;
913
914         if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
915                 settings |= MGMT_SETTING_SECURE_CONN;
916
917         if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
918                 settings |= MGMT_SETTING_DEBUG_KEYS;
919
920         if (hci_dev_test_flag(hdev, HCI_PRIVACY))
921                 settings |= MGMT_SETTING_PRIVACY;
922
923         /* The current setting for static address has two purposes. The
924          * first is to indicate if the static address will be used and
925          * the second is to indicate if it is actually set.
926          *
927          * This means if the static address is not configured, this flag
928          * will never be set. If the address is configured, then if the
929          * address is actually used decides if the flag is set or not.
930          *
931          * For single mode LE only controllers and dual-mode controllers
932          * with BR/EDR disabled, the existence of the static address will
933          * be evaluated.
934          */
935         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
936             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
937             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
938                 if (bacmp(&hdev->static_addr, BDADDR_ANY))
939                         settings |= MGMT_SETTING_STATIC_ADDRESS;
940         }
941
942         if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
943                 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
944
945         if (cis_central_capable(hdev))
946                 settings |= MGMT_SETTING_CIS_CENTRAL;
947
948         if (cis_peripheral_capable(hdev))
949                 settings |= MGMT_SETTING_CIS_PERIPHERAL;
950
951         if (bis_capable(hdev))
952                 settings |= MGMT_SETTING_ISO_BROADCASTER;
953
954         if (sync_recv_capable(hdev))
955                 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
956
957         return settings;
958 }
959
960 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
961 {
962         return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
963 }
964
965 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
966 {
967         struct mgmt_pending_cmd *cmd;
968
969         /* If there's a pending mgmt command the flags will not yet have
970          * their final values, so check for this first.
971          */
972         cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
973         if (cmd) {
974                 struct mgmt_mode *cp = cmd->param;
975                 if (cp->val == 0x01)
976                         return LE_AD_GENERAL;
977                 else if (cp->val == 0x02)
978                         return LE_AD_LIMITED;
979         } else {
980                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
981                         return LE_AD_LIMITED;
982                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
983                         return LE_AD_GENERAL;
984         }
985
986         return 0;
987 }
988
989 bool mgmt_get_connectable(struct hci_dev *hdev)
990 {
991         struct mgmt_pending_cmd *cmd;
992
993         /* If there's a pending mgmt command the flag will not yet have
994          * it's final value, so check for this first.
995          */
996         cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
997         if (cmd) {
998                 struct mgmt_mode *cp = cmd->param;
999
1000                 return cp->val;
1001         }
1002
1003         return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1004 }
1005
1006 static int service_cache_sync(struct hci_dev *hdev, void *data)
1007 {
1008         hci_update_eir_sync(hdev);
1009         hci_update_class_sync(hdev);
1010
1011         return 0;
1012 }
1013
1014 static void service_cache_off(struct work_struct *work)
1015 {
1016         struct hci_dev *hdev = container_of(work, struct hci_dev,
1017                                             service_cache.work);
1018
1019         if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1020                 return;
1021
1022         hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1023 }
1024
1025 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1026 {
1027         /* The generation of a new RPA and programming it into the
1028          * controller happens in the hci_req_enable_advertising()
1029          * function.
1030          */
1031         if (ext_adv_capable(hdev))
1032                 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1033         else
1034                 return hci_enable_advertising_sync(hdev);
1035 }
1036
1037 static void rpa_expired(struct work_struct *work)
1038 {
1039         struct hci_dev *hdev = container_of(work, struct hci_dev,
1040                                             rpa_expired.work);
1041
1042         bt_dev_dbg(hdev, "");
1043
1044         hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1045
1046         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1047                 return;
1048
1049         hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1050 }
1051
1052 static void discov_off(struct work_struct *work)
1053 {
1054         struct hci_dev *hdev = container_of(work, struct hci_dev,
1055                                             discov_off.work);
1056
1057         bt_dev_dbg(hdev, "");
1058
1059         hci_dev_lock(hdev);
1060
1061         /* When discoverable timeout triggers, then just make sure
1062          * the limited discoverable flag is cleared. Even in the case
1063          * of a timeout triggered from general discoverable, it is
1064          * safe to unconditionally clear the flag.
1065          */
1066         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1067         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1068         hdev->discov_timeout = 0;
1069
1070         hci_update_discoverable(hdev);
1071
1072         mgmt_new_settings(hdev);
1073
1074         hci_dev_unlock(hdev);
1075 }
1076
1077 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1078
1079 static void mesh_send_complete(struct hci_dev *hdev,
1080                                struct mgmt_mesh_tx *mesh_tx, bool silent)
1081 {
1082         u8 handle = mesh_tx->handle;
1083
1084         if (!silent)
1085                 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1086                            sizeof(handle), NULL);
1087
1088         mgmt_mesh_remove(mesh_tx);
1089 }
1090
1091 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1092 {
1093         struct mgmt_mesh_tx *mesh_tx;
1094
1095         hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1096         hci_disable_advertising_sync(hdev);
1097         mesh_tx = mgmt_mesh_next(hdev, NULL);
1098
1099         if (mesh_tx)
1100                 mesh_send_complete(hdev, mesh_tx, false);
1101
1102         return 0;
1103 }
1104
1105 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1106 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1107 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1108 {
1109         struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1110
1111         if (!mesh_tx)
1112                 return;
1113
1114         err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1115                                  mesh_send_start_complete);
1116
1117         if (err < 0)
1118                 mesh_send_complete(hdev, mesh_tx, false);
1119         else
1120                 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1121 }
1122
1123 static void mesh_send_done(struct work_struct *work)
1124 {
1125         struct hci_dev *hdev = container_of(work, struct hci_dev,
1126                                             mesh_send_done.work);
1127
1128         if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1129                 return;
1130
1131         hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1132 }
1133
1134 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1135 {
1136         if (hci_dev_test_flag(hdev, HCI_MGMT))
1137                 return;
1138
1139         BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1140
1141         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1142         INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1143         INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1144         INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1145
1146         /* Non-mgmt controlled devices get this bit set
1147          * implicitly so that pairing works for them, however
1148          * for mgmt we require user-space to explicitly enable
1149          * it
1150          */
1151         hci_dev_clear_flag(hdev, HCI_BONDABLE);
1152
1153         hci_dev_set_flag(hdev, HCI_MGMT);
1154 }
1155
1156 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1157                                 void *data, u16 data_len)
1158 {
1159         struct mgmt_rp_read_info rp;
1160
1161         bt_dev_dbg(hdev, "sock %p", sk);
1162
1163         hci_dev_lock(hdev);
1164
1165         memset(&rp, 0, sizeof(rp));
1166
1167         bacpy(&rp.bdaddr, &hdev->bdaddr);
1168
1169         rp.version = hdev->hci_ver;
1170         rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1171
1172         rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1173         rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1174
1175         memcpy(rp.dev_class, hdev->dev_class, 3);
1176
1177         memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1178         memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1179
1180         hci_dev_unlock(hdev);
1181
1182         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1183                                  sizeof(rp));
1184 }
1185
1186 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1187 {
1188         u16 eir_len = 0;
1189         size_t name_len;
1190
1191         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1192                 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1193                                           hdev->dev_class, 3);
1194
1195         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1196                 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1197                                           hdev->appearance);
1198
1199         name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1200         eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1201                                   hdev->dev_name, name_len);
1202
1203         name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1204         eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1205                                   hdev->short_name, name_len);
1206
1207         return eir_len;
1208 }
1209
1210 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1211                                     void *data, u16 data_len)
1212 {
1213         char buf[512];
1214         struct mgmt_rp_read_ext_info *rp = (void *)buf;
1215         u16 eir_len;
1216
1217         bt_dev_dbg(hdev, "sock %p", sk);
1218
1219         memset(&buf, 0, sizeof(buf));
1220
1221         hci_dev_lock(hdev);
1222
1223         bacpy(&rp->bdaddr, &hdev->bdaddr);
1224
1225         rp->version = hdev->hci_ver;
1226         rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1227
1228         rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1229         rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1230
1231
1232         eir_len = append_eir_data_to_buf(hdev, rp->eir);
1233         rp->eir_len = cpu_to_le16(eir_len);
1234
1235         hci_dev_unlock(hdev);
1236
1237         /* If this command is called at least once, then the events
1238          * for class of device and local name changes are disabled
1239          * and only the new extended controller information event
1240          * is used.
1241          */
1242         hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1243         hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1244         hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1245
1246         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1247                                  sizeof(*rp) + eir_len);
1248 }
1249
1250 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1251 {
1252         char buf[512];
1253         struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1254         u16 eir_len;
1255
1256         memset(buf, 0, sizeof(buf));
1257
1258         eir_len = append_eir_data_to_buf(hdev, ev->eir);
1259         ev->eir_len = cpu_to_le16(eir_len);
1260
1261         return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1262                                   sizeof(*ev) + eir_len,
1263                                   HCI_MGMT_EXT_INFO_EVENTS, skip);
1264 }
1265
1266 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1267 {
1268         __le32 settings = cpu_to_le32(get_current_settings(hdev));
1269
1270         return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1271                                  sizeof(settings));
1272 }
1273
1274 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1275 {
1276         struct mgmt_ev_advertising_added ev;
1277
1278         ev.instance = instance;
1279
1280         mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1281 }
1282
1283 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1284                               u8 instance)
1285 {
1286         struct mgmt_ev_advertising_removed ev;
1287
1288         ev.instance = instance;
1289
1290         mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1291 }
1292
1293 static void cancel_adv_timeout(struct hci_dev *hdev)
1294 {
1295         if (hdev->adv_instance_timeout) {
1296                 hdev->adv_instance_timeout = 0;
1297                 cancel_delayed_work(&hdev->adv_instance_expire);
1298         }
1299 }
1300
1301 /* This function requires the caller holds hdev->lock */
1302 static void restart_le_actions(struct hci_dev *hdev)
1303 {
1304         struct hci_conn_params *p;
1305
1306         list_for_each_entry(p, &hdev->le_conn_params, list) {
1307                 /* Needed for AUTO_OFF case where might not "really"
1308                  * have been powered off.
1309                  */
1310                 hci_pend_le_list_del_init(p);
1311
1312                 switch (p->auto_connect) {
1313                 case HCI_AUTO_CONN_DIRECT:
1314                 case HCI_AUTO_CONN_ALWAYS:
1315                         hci_pend_le_list_add(p, &hdev->pend_le_conns);
1316                         break;
1317                 case HCI_AUTO_CONN_REPORT:
1318                         hci_pend_le_list_add(p, &hdev->pend_le_reports);
1319                         break;
1320                 default:
1321                         break;
1322                 }
1323         }
1324 }
1325
1326 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1327 {
1328         __le32 ev = cpu_to_le32(get_current_settings(hdev));
1329
1330         return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1331                                   sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1332 }
1333
1334 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1335 {
1336         struct mgmt_pending_cmd *cmd = data;
1337         struct mgmt_mode *cp;
1338
1339         /* Make sure cmd still outstanding. */
1340         if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1341                 return;
1342
1343         cp = cmd->param;
1344
1345         bt_dev_dbg(hdev, "err %d", err);
1346
1347         if (!err) {
1348                 if (cp->val) {
1349                         hci_dev_lock(hdev);
1350                         restart_le_actions(hdev);
1351                         hci_update_passive_scan(hdev);
1352                         hci_dev_unlock(hdev);
1353                 }
1354
1355                 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1356
1357                 /* Only call new_setting for power on as power off is deferred
1358                  * to hdev->power_off work which does call hci_dev_do_close.
1359                  */
1360                 if (cp->val)
1361                         new_settings(hdev, cmd->sk);
1362         } else {
1363                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1364                                 mgmt_status(err));
1365         }
1366
1367         mgmt_pending_remove(cmd);
1368 }
1369
1370 static int set_powered_sync(struct hci_dev *hdev, void *data)
1371 {
1372         struct mgmt_pending_cmd *cmd = data;
1373         struct mgmt_mode *cp = cmd->param;
1374
1375         BT_DBG("%s", hdev->name);
1376
1377         return hci_set_powered_sync(hdev, cp->val);
1378 }
1379
1380 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1381                        u16 len)
1382 {
1383         struct mgmt_mode *cp = data;
1384         struct mgmt_pending_cmd *cmd;
1385         int err;
1386
1387         bt_dev_dbg(hdev, "sock %p", sk);
1388
1389         if (cp->val != 0x00 && cp->val != 0x01)
1390                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1391                                        MGMT_STATUS_INVALID_PARAMS);
1392
1393         hci_dev_lock(hdev);
1394
1395         if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1396                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1397                                       MGMT_STATUS_BUSY);
1398                 goto failed;
1399         }
1400
1401         if (!!cp->val == hdev_is_powered(hdev)) {
1402                 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1403                 goto failed;
1404         }
1405
1406         cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1407         if (!cmd) {
1408                 err = -ENOMEM;
1409                 goto failed;
1410         }
1411
1412         /* Cancel potentially blocking sync operation before power off */
1413         if (cp->val == 0x00) {
1414                 __hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
1415                 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1416                                          mgmt_set_powered_complete);
1417         } else {
1418                 /* Use hci_cmd_sync_submit since hdev might not be running */
1419                 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1420                                           mgmt_set_powered_complete);
1421         }
1422
1423         if (err < 0)
1424                 mgmt_pending_remove(cmd);
1425
1426 failed:
1427         hci_dev_unlock(hdev);
1428         return err;
1429 }
1430
1431 int mgmt_new_settings(struct hci_dev *hdev)
1432 {
1433         return new_settings(hdev, NULL);
1434 }
1435
1436 struct cmd_lookup {
1437         struct sock *sk;
1438         struct hci_dev *hdev;
1439         u8 mgmt_status;
1440 };
1441
1442 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1443 {
1444         struct cmd_lookup *match = data;
1445
1446         send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1447
1448         list_del(&cmd->list);
1449
1450         if (match->sk == NULL) {
1451                 match->sk = cmd->sk;
1452                 sock_hold(match->sk);
1453         }
1454
1455         mgmt_pending_free(cmd);
1456 }
1457
1458 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1459 {
1460         u8 *status = data;
1461
1462         mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1463         mgmt_pending_remove(cmd);
1464 }
1465
1466 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1467 {
1468         if (cmd->cmd_complete) {
1469                 u8 *status = data;
1470
1471                 cmd->cmd_complete(cmd, *status);
1472                 mgmt_pending_remove(cmd);
1473
1474                 return;
1475         }
1476
1477         cmd_status_rsp(cmd, data);
1478 }
1479
1480 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1481 {
1482         return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1483                                  cmd->param, cmd->param_len);
1484 }
1485
1486 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1487 {
1488         return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1489                                  cmd->param, sizeof(struct mgmt_addr_info));
1490 }
1491
1492 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1493 {
1494         if (!lmp_bredr_capable(hdev))
1495                 return MGMT_STATUS_NOT_SUPPORTED;
1496         else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1497                 return MGMT_STATUS_REJECTED;
1498         else
1499                 return MGMT_STATUS_SUCCESS;
1500 }
1501
1502 static u8 mgmt_le_support(struct hci_dev *hdev)
1503 {
1504         if (!lmp_le_capable(hdev))
1505                 return MGMT_STATUS_NOT_SUPPORTED;
1506         else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1507                 return MGMT_STATUS_REJECTED;
1508         else
1509                 return MGMT_STATUS_SUCCESS;
1510 }
1511
1512 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1513                                            int err)
1514 {
1515         struct mgmt_pending_cmd *cmd = data;
1516
1517         bt_dev_dbg(hdev, "err %d", err);
1518
1519         /* Make sure cmd still outstanding. */
1520         if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1521                 return;
1522
1523         hci_dev_lock(hdev);
1524
1525         if (err) {
1526                 u8 mgmt_err = mgmt_status(err);
1527                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1528                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1529                 goto done;
1530         }
1531
1532         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1533             hdev->discov_timeout > 0) {
1534                 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1535                 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1536         }
1537
1538         send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1539         new_settings(hdev, cmd->sk);
1540
1541 done:
1542         mgmt_pending_remove(cmd);
1543         hci_dev_unlock(hdev);
1544 }
1545
1546 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1547 {
1548         BT_DBG("%s", hdev->name);
1549
1550         return hci_update_discoverable_sync(hdev);
1551 }
1552
1553 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1554                             u16 len)
1555 {
1556         struct mgmt_cp_set_discoverable *cp = data;
1557         struct mgmt_pending_cmd *cmd;
1558         u16 timeout;
1559         int err;
1560
1561         bt_dev_dbg(hdev, "sock %p", sk);
1562
1563         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1564             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1565                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1566                                        MGMT_STATUS_REJECTED);
1567
1568         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1569                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1570                                        MGMT_STATUS_INVALID_PARAMS);
1571
1572         timeout = __le16_to_cpu(cp->timeout);
1573
1574         /* Disabling discoverable requires that no timeout is set,
1575          * and enabling limited discoverable requires a timeout.
1576          */
1577         if ((cp->val == 0x00 && timeout > 0) ||
1578             (cp->val == 0x02 && timeout == 0))
1579                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1580                                        MGMT_STATUS_INVALID_PARAMS);
1581
1582         hci_dev_lock(hdev);
1583
1584         if (!hdev_is_powered(hdev) && timeout > 0) {
1585                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1586                                       MGMT_STATUS_NOT_POWERED);
1587                 goto failed;
1588         }
1589
1590         if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1591             pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1592                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1593                                       MGMT_STATUS_BUSY);
1594                 goto failed;
1595         }
1596
1597         if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1598                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1599                                       MGMT_STATUS_REJECTED);
1600                 goto failed;
1601         }
1602
1603         if (hdev->advertising_paused) {
1604                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1605                                       MGMT_STATUS_BUSY);
1606                 goto failed;
1607         }
1608
1609         if (!hdev_is_powered(hdev)) {
1610                 bool changed = false;
1611
1612                 /* Setting limited discoverable when powered off is
1613                  * not a valid operation since it requires a timeout
1614                  * and so no need to check HCI_LIMITED_DISCOVERABLE.
1615                  */
1616                 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1617                         hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1618                         changed = true;
1619                 }
1620
1621                 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1622                 if (err < 0)
1623                         goto failed;
1624
1625                 if (changed)
1626                         err = new_settings(hdev, sk);
1627
1628                 goto failed;
1629         }
1630
1631         /* If the current mode is the same, then just update the timeout
1632          * value with the new value. And if only the timeout gets updated,
1633          * then no need for any HCI transactions.
1634          */
1635         if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1636             (cp->val == 0x02) == hci_dev_test_flag(hdev,
1637                                                    HCI_LIMITED_DISCOVERABLE)) {
1638                 cancel_delayed_work(&hdev->discov_off);
1639                 hdev->discov_timeout = timeout;
1640
1641                 if (cp->val && hdev->discov_timeout > 0) {
1642                         int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1643                         queue_delayed_work(hdev->req_workqueue,
1644                                            &hdev->discov_off, to);
1645                 }
1646
1647                 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1648                 goto failed;
1649         }
1650
1651         cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1652         if (!cmd) {
1653                 err = -ENOMEM;
1654                 goto failed;
1655         }
1656
1657         /* Cancel any potential discoverable timeout that might be
1658          * still active and store new timeout value. The arming of
1659          * the timeout happens in the complete handler.
1660          */
1661         cancel_delayed_work(&hdev->discov_off);
1662         hdev->discov_timeout = timeout;
1663
1664         if (cp->val)
1665                 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1666         else
1667                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1668
1669         /* Limited discoverable mode */
1670         if (cp->val == 0x02)
1671                 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1672         else
1673                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1674
1675         err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1676                                  mgmt_set_discoverable_complete);
1677
1678         if (err < 0)
1679                 mgmt_pending_remove(cmd);
1680
1681 failed:
1682         hci_dev_unlock(hdev);
1683         return err;
1684 }
1685
1686 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1687                                           int err)
1688 {
1689         struct mgmt_pending_cmd *cmd = data;
1690
1691         bt_dev_dbg(hdev, "err %d", err);
1692
1693         /* Make sure cmd still outstanding. */
1694         if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1695                 return;
1696
1697         hci_dev_lock(hdev);
1698
1699         if (err) {
1700                 u8 mgmt_err = mgmt_status(err);
1701                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1702                 goto done;
1703         }
1704
1705         send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1706         new_settings(hdev, cmd->sk);
1707
1708 done:
1709         if (cmd)
1710                 mgmt_pending_remove(cmd);
1711
1712         hci_dev_unlock(hdev);
1713 }
1714
1715 static int set_connectable_update_settings(struct hci_dev *hdev,
1716                                            struct sock *sk, u8 val)
1717 {
1718         bool changed = false;
1719         int err;
1720
1721         if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1722                 changed = true;
1723
1724         if (val) {
1725                 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1726         } else {
1727                 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1728                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1729         }
1730
1731         err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1732         if (err < 0)
1733                 return err;
1734
1735         if (changed) {
1736                 hci_update_scan(hdev);
1737                 hci_update_passive_scan(hdev);
1738                 return new_settings(hdev, sk);
1739         }
1740
1741         return 0;
1742 }
1743
1744 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1745 {
1746         BT_DBG("%s", hdev->name);
1747
1748         return hci_update_connectable_sync(hdev);
1749 }
1750
1751 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1752                            u16 len)
1753 {
1754         struct mgmt_mode *cp = data;
1755         struct mgmt_pending_cmd *cmd;
1756         int err;
1757
1758         bt_dev_dbg(hdev, "sock %p", sk);
1759
1760         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1761             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1762                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1763                                        MGMT_STATUS_REJECTED);
1764
1765         if (cp->val != 0x00 && cp->val != 0x01)
1766                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1767                                        MGMT_STATUS_INVALID_PARAMS);
1768
1769         hci_dev_lock(hdev);
1770
1771         if (!hdev_is_powered(hdev)) {
1772                 err = set_connectable_update_settings(hdev, sk, cp->val);
1773                 goto failed;
1774         }
1775
1776         if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1777             pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1778                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1779                                       MGMT_STATUS_BUSY);
1780                 goto failed;
1781         }
1782
1783         cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1784         if (!cmd) {
1785                 err = -ENOMEM;
1786                 goto failed;
1787         }
1788
1789         if (cp->val) {
1790                 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1791         } else {
1792                 if (hdev->discov_timeout > 0)
1793                         cancel_delayed_work(&hdev->discov_off);
1794
1795                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1796                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1797                 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1798         }
1799
1800         err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1801                                  mgmt_set_connectable_complete);
1802
1803         if (err < 0)
1804                 mgmt_pending_remove(cmd);
1805
1806 failed:
1807         hci_dev_unlock(hdev);
1808         return err;
1809 }
1810
1811 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1812                         u16 len)
1813 {
1814         struct mgmt_mode *cp = data;
1815         bool changed;
1816         int err;
1817
1818         bt_dev_dbg(hdev, "sock %p", sk);
1819
1820         if (cp->val != 0x00 && cp->val != 0x01)
1821                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1822                                        MGMT_STATUS_INVALID_PARAMS);
1823
1824         hci_dev_lock(hdev);
1825
1826         if (cp->val)
1827                 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1828         else
1829                 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1830
1831         err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1832         if (err < 0)
1833                 goto unlock;
1834
1835         if (changed) {
1836                 /* In limited privacy mode the change of bondable mode
1837                  * may affect the local advertising address.
1838                  */
1839                 hci_update_discoverable(hdev);
1840
1841                 err = new_settings(hdev, sk);
1842         }
1843
1844 unlock:
1845         hci_dev_unlock(hdev);
1846         return err;
1847 }
1848
1849 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1850                              u16 len)
1851 {
1852         struct mgmt_mode *cp = data;
1853         struct mgmt_pending_cmd *cmd;
1854         u8 val, status;
1855         int err;
1856
1857         bt_dev_dbg(hdev, "sock %p", sk);
1858
1859         status = mgmt_bredr_support(hdev);
1860         if (status)
1861                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1862                                        status);
1863
1864         if (cp->val != 0x00 && cp->val != 0x01)
1865                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1866                                        MGMT_STATUS_INVALID_PARAMS);
1867
1868         hci_dev_lock(hdev);
1869
1870         if (!hdev_is_powered(hdev)) {
1871                 bool changed = false;
1872
1873                 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1874                         hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1875                         changed = true;
1876                 }
1877
1878                 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1879                 if (err < 0)
1880                         goto failed;
1881
1882                 if (changed)
1883                         err = new_settings(hdev, sk);
1884
1885                 goto failed;
1886         }
1887
1888         if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1889                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1890                                       MGMT_STATUS_BUSY);
1891                 goto failed;
1892         }
1893
1894         val = !!cp->val;
1895
1896         if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1897                 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1898                 goto failed;
1899         }
1900
1901         cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1902         if (!cmd) {
1903                 err = -ENOMEM;
1904                 goto failed;
1905         }
1906
1907         err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1908         if (err < 0) {
1909                 mgmt_pending_remove(cmd);
1910                 goto failed;
1911         }
1912
1913 failed:
1914         hci_dev_unlock(hdev);
1915         return err;
1916 }
1917
1918 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1919 {
1920         struct cmd_lookup match = { NULL, hdev };
1921         struct mgmt_pending_cmd *cmd = data;
1922         struct mgmt_mode *cp = cmd->param;
1923         u8 enable = cp->val;
1924         bool changed;
1925
1926         /* Make sure cmd still outstanding. */
1927         if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1928                 return;
1929
1930         if (err) {
1931                 u8 mgmt_err = mgmt_status(err);
1932
1933                 if (enable && hci_dev_test_and_clear_flag(hdev,
1934                                                           HCI_SSP_ENABLED)) {
1935                         hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1936                         new_settings(hdev, NULL);
1937                 }
1938
1939                 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1940                                      &mgmt_err);
1941                 return;
1942         }
1943
1944         if (enable) {
1945                 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1946         } else {
1947                 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1948
1949                 if (!changed)
1950                         changed = hci_dev_test_and_clear_flag(hdev,
1951                                                               HCI_HS_ENABLED);
1952                 else
1953                         hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1954         }
1955
1956         mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1957
1958         if (changed)
1959                 new_settings(hdev, match.sk);
1960
1961         if (match.sk)
1962                 sock_put(match.sk);
1963
1964         hci_update_eir_sync(hdev);
1965 }
1966
1967 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1968 {
1969         struct mgmt_pending_cmd *cmd = data;
1970         struct mgmt_mode *cp = cmd->param;
1971         bool changed = false;
1972         int err;
1973
1974         if (cp->val)
1975                 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1976
1977         err = hci_write_ssp_mode_sync(hdev, cp->val);
1978
1979         if (!err && changed)
1980                 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1981
1982         return err;
1983 }
1984
1985 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1986 {
1987         struct mgmt_mode *cp = data;
1988         struct mgmt_pending_cmd *cmd;
1989         u8 status;
1990         int err;
1991
1992         bt_dev_dbg(hdev, "sock %p", sk);
1993
1994         status = mgmt_bredr_support(hdev);
1995         if (status)
1996                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1997
1998         if (!lmp_ssp_capable(hdev))
1999                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2000                                        MGMT_STATUS_NOT_SUPPORTED);
2001
2002         if (cp->val != 0x00 && cp->val != 0x01)
2003                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2004                                        MGMT_STATUS_INVALID_PARAMS);
2005
2006         hci_dev_lock(hdev);
2007
2008         if (!hdev_is_powered(hdev)) {
2009                 bool changed;
2010
2011                 if (cp->val) {
2012                         changed = !hci_dev_test_and_set_flag(hdev,
2013                                                              HCI_SSP_ENABLED);
2014                 } else {
2015                         changed = hci_dev_test_and_clear_flag(hdev,
2016                                                               HCI_SSP_ENABLED);
2017                         if (!changed)
2018                                 changed = hci_dev_test_and_clear_flag(hdev,
2019                                                                       HCI_HS_ENABLED);
2020                         else
2021                                 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2022                 }
2023
2024                 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2025                 if (err < 0)
2026                         goto failed;
2027
2028                 if (changed)
2029                         err = new_settings(hdev, sk);
2030
2031                 goto failed;
2032         }
2033
2034         if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2035                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2036                                       MGMT_STATUS_BUSY);
2037                 goto failed;
2038         }
2039
2040         if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2041                 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2042                 goto failed;
2043         }
2044
2045         cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2046         if (!cmd)
2047                 err = -ENOMEM;
2048         else
2049                 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2050                                          set_ssp_complete);
2051
2052         if (err < 0) {
2053                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2054                                       MGMT_STATUS_FAILED);
2055
2056                 if (cmd)
2057                         mgmt_pending_remove(cmd);
2058         }
2059
2060 failed:
2061         hci_dev_unlock(hdev);
2062         return err;
2063 }
2064
2065 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2066 {
2067         struct mgmt_mode *cp = data;
2068         bool changed;
2069         u8 status;
2070         int err;
2071
2072         bt_dev_dbg(hdev, "sock %p", sk);
2073
2074         if (!IS_ENABLED(CONFIG_BT_HS))
2075                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2076                                        MGMT_STATUS_NOT_SUPPORTED);
2077
2078         status = mgmt_bredr_support(hdev);
2079         if (status)
2080                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2081
2082         if (!lmp_ssp_capable(hdev))
2083                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2084                                        MGMT_STATUS_NOT_SUPPORTED);
2085
2086         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2087                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2088                                        MGMT_STATUS_REJECTED);
2089
2090         if (cp->val != 0x00 && cp->val != 0x01)
2091                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2092                                        MGMT_STATUS_INVALID_PARAMS);
2093
2094         hci_dev_lock(hdev);
2095
2096         if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2097                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2098                                       MGMT_STATUS_BUSY);
2099                 goto unlock;
2100         }
2101
2102         if (cp->val) {
2103                 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2104         } else {
2105                 if (hdev_is_powered(hdev)) {
2106                         err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2107                                               MGMT_STATUS_REJECTED);
2108                         goto unlock;
2109                 }
2110
2111                 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2112         }
2113
2114         err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2115         if (err < 0)
2116                 goto unlock;
2117
2118         if (changed)
2119                 err = new_settings(hdev, sk);
2120
2121 unlock:
2122         hci_dev_unlock(hdev);
2123         return err;
2124 }
2125
2126 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2127 {
2128         struct cmd_lookup match = { NULL, hdev };
2129         u8 status = mgmt_status(err);
2130
2131         bt_dev_dbg(hdev, "err %d", err);
2132
2133         if (status) {
2134                 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2135                                                         &status);
2136                 return;
2137         }
2138
2139         mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2140
2141         new_settings(hdev, match.sk);
2142
2143         if (match.sk)
2144                 sock_put(match.sk);
2145 }
2146
2147 static int set_le_sync(struct hci_dev *hdev, void *data)
2148 {
2149         struct mgmt_pending_cmd *cmd = data;
2150         struct mgmt_mode *cp = cmd->param;
2151         u8 val = !!cp->val;
2152         int err;
2153
2154         if (!val) {
2155                 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2156
2157                 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2158                         hci_disable_advertising_sync(hdev);
2159
2160                 if (ext_adv_capable(hdev))
2161                         hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2162         } else {
2163                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2164         }
2165
2166         err = hci_write_le_host_supported_sync(hdev, val, 0);
2167
2168         /* Make sure the controller has a good default for
2169          * advertising data. Restrict the update to when LE
2170          * has actually been enabled. During power on, the
2171          * update in powered_update_hci will take care of it.
2172          */
2173         if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2174                 if (ext_adv_capable(hdev)) {
2175                         int status;
2176
2177                         status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2178                         if (!status)
2179                                 hci_update_scan_rsp_data_sync(hdev, 0x00);
2180                 } else {
2181                         hci_update_adv_data_sync(hdev, 0x00);
2182                         hci_update_scan_rsp_data_sync(hdev, 0x00);
2183                 }
2184
2185                 hci_update_passive_scan(hdev);
2186         }
2187
2188         return err;
2189 }
2190
2191 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2192 {
2193         struct mgmt_pending_cmd *cmd = data;
2194         u8 status = mgmt_status(err);
2195         struct sock *sk = cmd->sk;
2196
2197         if (status) {
2198                 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2199                                      cmd_status_rsp, &status);
2200                 return;
2201         }
2202
2203         mgmt_pending_remove(cmd);
2204         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2205 }
2206
2207 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2208 {
2209         struct mgmt_pending_cmd *cmd = data;
2210         struct mgmt_cp_set_mesh *cp = cmd->param;
2211         size_t len = cmd->param_len;
2212
2213         memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2214
2215         if (cp->enable)
2216                 hci_dev_set_flag(hdev, HCI_MESH);
2217         else
2218                 hci_dev_clear_flag(hdev, HCI_MESH);
2219
2220         len -= sizeof(*cp);
2221
2222         /* If filters don't fit, forward all adv pkts */
2223         if (len <= sizeof(hdev->mesh_ad_types))
2224                 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2225
2226         hci_update_passive_scan_sync(hdev);
2227         return 0;
2228 }
2229
2230 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2231 {
2232         struct mgmt_cp_set_mesh *cp = data;
2233         struct mgmt_pending_cmd *cmd;
2234         int err = 0;
2235
2236         bt_dev_dbg(hdev, "sock %p", sk);
2237
2238         if (!lmp_le_capable(hdev) ||
2239             !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2240                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2241                                        MGMT_STATUS_NOT_SUPPORTED);
2242
2243         if (cp->enable != 0x00 && cp->enable != 0x01)
2244                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2245                                        MGMT_STATUS_INVALID_PARAMS);
2246
2247         hci_dev_lock(hdev);
2248
2249         cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2250         if (!cmd)
2251                 err = -ENOMEM;
2252         else
2253                 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2254                                          set_mesh_complete);
2255
2256         if (err < 0) {
2257                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2258                                       MGMT_STATUS_FAILED);
2259
2260                 if (cmd)
2261                         mgmt_pending_remove(cmd);
2262         }
2263
2264         hci_dev_unlock(hdev);
2265         return err;
2266 }
2267
2268 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2269 {
2270         struct mgmt_mesh_tx *mesh_tx = data;
2271         struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2272         unsigned long mesh_send_interval;
2273         u8 mgmt_err = mgmt_status(err);
2274
2275         /* Report any errors here, but don't report completion */
2276
2277         if (mgmt_err) {
2278                 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2279                 /* Send Complete Error Code for handle */
2280                 mesh_send_complete(hdev, mesh_tx, false);
2281                 return;
2282         }
2283
2284         mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2285         queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2286                            mesh_send_interval);
2287 }
2288
2289 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2290 {
2291         struct mgmt_mesh_tx *mesh_tx = data;
2292         struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2293         struct adv_info *adv, *next_instance;
2294         u8 instance = hdev->le_num_of_adv_sets + 1;
2295         u16 timeout, duration;
2296         int err = 0;
2297
2298         if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2299                 return MGMT_STATUS_BUSY;
2300
2301         timeout = 1000;
2302         duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2303         adv = hci_add_adv_instance(hdev, instance, 0,
2304                                    send->adv_data_len, send->adv_data,
2305                                    0, NULL,
2306                                    timeout, duration,
2307                                    HCI_ADV_TX_POWER_NO_PREFERENCE,
2308                                    hdev->le_adv_min_interval,
2309                                    hdev->le_adv_max_interval,
2310                                    mesh_tx->handle);
2311
2312         if (!IS_ERR(adv))
2313                 mesh_tx->instance = instance;
2314         else
2315                 err = PTR_ERR(adv);
2316
2317         if (hdev->cur_adv_instance == instance) {
2318                 /* If the currently advertised instance is being changed then
2319                  * cancel the current advertising and schedule the next
2320                  * instance. If there is only one instance then the overridden
2321                  * advertising data will be visible right away.
2322                  */
2323                 cancel_adv_timeout(hdev);
2324
2325                 next_instance = hci_get_next_instance(hdev, instance);
2326                 if (next_instance)
2327                         instance = next_instance->instance;
2328                 else
2329                         instance = 0;
2330         } else if (hdev->adv_instance_timeout) {
2331                 /* Immediately advertise the new instance if no other, or
2332                  * let it go naturally from queue if ADV is already happening
2333                  */
2334                 instance = 0;
2335         }
2336
2337         if (instance)
2338                 return hci_schedule_adv_instance_sync(hdev, instance, true);
2339
2340         return err;
2341 }
2342
2343 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2344 {
2345         struct mgmt_rp_mesh_read_features *rp = data;
2346
2347         if (rp->used_handles >= rp->max_handles)
2348                 return;
2349
2350         rp->handles[rp->used_handles++] = mesh_tx->handle;
2351 }
2352
2353 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2354                          void *data, u16 len)
2355 {
2356         struct mgmt_rp_mesh_read_features rp;
2357
2358         if (!lmp_le_capable(hdev) ||
2359             !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2360                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2361                                        MGMT_STATUS_NOT_SUPPORTED);
2362
2363         memset(&rp, 0, sizeof(rp));
2364         rp.index = cpu_to_le16(hdev->id);
2365         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2366                 rp.max_handles = MESH_HANDLES_MAX;
2367
2368         hci_dev_lock(hdev);
2369
2370         if (rp.max_handles)
2371                 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2372
2373         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2374                           rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2375
2376         hci_dev_unlock(hdev);
2377         return 0;
2378 }
2379
2380 static int send_cancel(struct hci_dev *hdev, void *data)
2381 {
2382         struct mgmt_pending_cmd *cmd = data;
2383         struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2384         struct mgmt_mesh_tx *mesh_tx;
2385
2386         if (!cancel->handle) {
2387                 do {
2388                         mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2389
2390                         if (mesh_tx)
2391                                 mesh_send_complete(hdev, mesh_tx, false);
2392                 } while (mesh_tx);
2393         } else {
2394                 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2395
2396                 if (mesh_tx && mesh_tx->sk == cmd->sk)
2397                         mesh_send_complete(hdev, mesh_tx, false);
2398         }
2399
2400         mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2401                           0, NULL, 0);
2402         mgmt_pending_free(cmd);
2403
2404         return 0;
2405 }
2406
2407 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2408                             void *data, u16 len)
2409 {
2410         struct mgmt_pending_cmd *cmd;
2411         int err;
2412
2413         if (!lmp_le_capable(hdev) ||
2414             !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2415                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2416                                        MGMT_STATUS_NOT_SUPPORTED);
2417
2418         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2419                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2420                                        MGMT_STATUS_REJECTED);
2421
2422         hci_dev_lock(hdev);
2423         cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2424         if (!cmd)
2425                 err = -ENOMEM;
2426         else
2427                 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2428
2429         if (err < 0) {
2430                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2431                                       MGMT_STATUS_FAILED);
2432
2433                 if (cmd)
2434                         mgmt_pending_free(cmd);
2435         }
2436
2437         hci_dev_unlock(hdev);
2438         return err;
2439 }
2440
2441 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2442 {
2443         struct mgmt_mesh_tx *mesh_tx;
2444         struct mgmt_cp_mesh_send *send = data;
2445         struct mgmt_rp_mesh_read_features rp;
2446         bool sending;
2447         int err = 0;
2448
2449         if (!lmp_le_capable(hdev) ||
2450             !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2451                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2452                                        MGMT_STATUS_NOT_SUPPORTED);
2453         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2454             len <= MGMT_MESH_SEND_SIZE ||
2455             len > (MGMT_MESH_SEND_SIZE + 31))
2456                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2457                                        MGMT_STATUS_REJECTED);
2458
2459         hci_dev_lock(hdev);
2460
2461         memset(&rp, 0, sizeof(rp));
2462         rp.max_handles = MESH_HANDLES_MAX;
2463
2464         mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2465
2466         if (rp.max_handles <= rp.used_handles) {
2467                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2468                                       MGMT_STATUS_BUSY);
2469                 goto done;
2470         }
2471
2472         sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2473         mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2474
2475         if (!mesh_tx)
2476                 err = -ENOMEM;
2477         else if (!sending)
2478                 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2479                                          mesh_send_start_complete);
2480
2481         if (err < 0) {
2482                 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2483                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2484                                       MGMT_STATUS_FAILED);
2485
2486                 if (mesh_tx) {
2487                         if (sending)
2488                                 mgmt_mesh_remove(mesh_tx);
2489                 }
2490         } else {
2491                 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2492
2493                 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2494                                   &mesh_tx->handle, 1);
2495         }
2496
2497 done:
2498         hci_dev_unlock(hdev);
2499         return err;
2500 }
2501
2502 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2503 {
2504         struct mgmt_mode *cp = data;
2505         struct mgmt_pending_cmd *cmd;
2506         int err;
2507         u8 val, enabled;
2508
2509         bt_dev_dbg(hdev, "sock %p", sk);
2510
2511         if (!lmp_le_capable(hdev))
2512                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2513                                        MGMT_STATUS_NOT_SUPPORTED);
2514
2515         if (cp->val != 0x00 && cp->val != 0x01)
2516                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2517                                        MGMT_STATUS_INVALID_PARAMS);
2518
2519         /* Bluetooth single mode LE only controllers or dual-mode
2520          * controllers configured as LE only devices, do not allow
2521          * switching LE off. These have either LE enabled explicitly
2522          * or BR/EDR has been previously switched off.
2523          *
2524          * When trying to enable an already enabled LE, then gracefully
2525          * send a positive response. Trying to disable it however will
2526          * result into rejection.
2527          */
2528         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2529                 if (cp->val == 0x01)
2530                         return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2531
2532                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2533                                        MGMT_STATUS_REJECTED);
2534         }
2535
2536         hci_dev_lock(hdev);
2537
2538         val = !!cp->val;
2539         enabled = lmp_host_le_capable(hdev);
2540
2541         if (!hdev_is_powered(hdev) || val == enabled) {
2542                 bool changed = false;
2543
2544                 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2545                         hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2546                         changed = true;
2547                 }
2548
2549                 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2550                         hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2551                         changed = true;
2552                 }
2553
2554                 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2555                 if (err < 0)
2556                         goto unlock;
2557
2558                 if (changed)
2559                         err = new_settings(hdev, sk);
2560
2561                 goto unlock;
2562         }
2563
2564         if (pending_find(MGMT_OP_SET_LE, hdev) ||
2565             pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2566                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2567                                       MGMT_STATUS_BUSY);
2568                 goto unlock;
2569         }
2570
2571         cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2572         if (!cmd)
2573                 err = -ENOMEM;
2574         else
2575                 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2576                                          set_le_complete);
2577
2578         if (err < 0) {
2579                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2580                                       MGMT_STATUS_FAILED);
2581
2582                 if (cmd)
2583                         mgmt_pending_remove(cmd);
2584         }
2585
2586 unlock:
2587         hci_dev_unlock(hdev);
2588         return err;
2589 }
2590
2591 /* This is a helper function to test for pending mgmt commands that can
2592  * cause CoD or EIR HCI commands. We can only allow one such pending
2593  * mgmt command at a time since otherwise we cannot easily track what
2594  * the current values are, will be, and based on that calculate if a new
2595  * HCI command needs to be sent and if yes with what value.
2596  */
2597 static bool pending_eir_or_class(struct hci_dev *hdev)
2598 {
2599         struct mgmt_pending_cmd *cmd;
2600
2601         list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2602                 switch (cmd->opcode) {
2603                 case MGMT_OP_ADD_UUID:
2604                 case MGMT_OP_REMOVE_UUID:
2605                 case MGMT_OP_SET_DEV_CLASS:
2606                 case MGMT_OP_SET_POWERED:
2607                         return true;
2608                 }
2609         }
2610
2611         return false;
2612 }
2613
2614 static const u8 bluetooth_base_uuid[] = {
2615                         0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2616                         0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2617 };
2618
2619 static u8 get_uuid_size(const u8 *uuid)
2620 {
2621         u32 val;
2622
2623         if (memcmp(uuid, bluetooth_base_uuid, 12))
2624                 return 128;
2625
2626         val = get_unaligned_le32(&uuid[12]);
2627         if (val > 0xffff)
2628                 return 32;
2629
2630         return 16;
2631 }
2632
2633 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2634 {
2635         struct mgmt_pending_cmd *cmd = data;
2636
2637         bt_dev_dbg(hdev, "err %d", err);
2638
2639         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2640                           mgmt_status(err), hdev->dev_class, 3);
2641
2642         mgmt_pending_free(cmd);
2643 }
2644
2645 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2646 {
2647         int err;
2648
2649         err = hci_update_class_sync(hdev);
2650         if (err)
2651                 return err;
2652
2653         return hci_update_eir_sync(hdev);
2654 }
2655
2656 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2657 {
2658         struct mgmt_cp_add_uuid *cp = data;
2659         struct mgmt_pending_cmd *cmd;
2660         struct bt_uuid *uuid;
2661         int err;
2662
2663         bt_dev_dbg(hdev, "sock %p", sk);
2664
2665         hci_dev_lock(hdev);
2666
2667         if (pending_eir_or_class(hdev)) {
2668                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2669                                       MGMT_STATUS_BUSY);
2670                 goto failed;
2671         }
2672
2673         uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2674         if (!uuid) {
2675                 err = -ENOMEM;
2676                 goto failed;
2677         }
2678
2679         memcpy(uuid->uuid, cp->uuid, 16);
2680         uuid->svc_hint = cp->svc_hint;
2681         uuid->size = get_uuid_size(cp->uuid);
2682
2683         list_add_tail(&uuid->list, &hdev->uuids);
2684
2685         cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2686         if (!cmd) {
2687                 err = -ENOMEM;
2688                 goto failed;
2689         }
2690
2691         err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2692         if (err < 0) {
2693                 mgmt_pending_free(cmd);
2694                 goto failed;
2695         }
2696
2697 failed:
2698         hci_dev_unlock(hdev);
2699         return err;
2700 }
2701
2702 static bool enable_service_cache(struct hci_dev *hdev)
2703 {
2704         if (!hdev_is_powered(hdev))
2705                 return false;
2706
2707         if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2708                 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2709                                    CACHE_TIMEOUT);
2710                 return true;
2711         }
2712
2713         return false;
2714 }
2715
2716 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2717 {
2718         int err;
2719
2720         err = hci_update_class_sync(hdev);
2721         if (err)
2722                 return err;
2723
2724         return hci_update_eir_sync(hdev);
2725 }
2726
2727 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2728                        u16 len)
2729 {
2730         struct mgmt_cp_remove_uuid *cp = data;
2731         struct mgmt_pending_cmd *cmd;
2732         struct bt_uuid *match, *tmp;
2733         static const u8 bt_uuid_any[] = {
2734                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2735         };
2736         int err, found;
2737
2738         bt_dev_dbg(hdev, "sock %p", sk);
2739
2740         hci_dev_lock(hdev);
2741
2742         if (pending_eir_or_class(hdev)) {
2743                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2744                                       MGMT_STATUS_BUSY);
2745                 goto unlock;
2746         }
2747
2748         if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2749                 hci_uuids_clear(hdev);
2750
2751                 if (enable_service_cache(hdev)) {
2752                         err = mgmt_cmd_complete(sk, hdev->id,
2753                                                 MGMT_OP_REMOVE_UUID,
2754                                                 0, hdev->dev_class, 3);
2755                         goto unlock;
2756                 }
2757
2758                 goto update_class;
2759         }
2760
2761         found = 0;
2762
2763         list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2764                 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2765                         continue;
2766
2767                 list_del(&match->list);
2768                 kfree(match);
2769                 found++;
2770         }
2771
2772         if (found == 0) {
2773                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2774                                       MGMT_STATUS_INVALID_PARAMS);
2775                 goto unlock;
2776         }
2777
2778 update_class:
2779         cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2780         if (!cmd) {
2781                 err = -ENOMEM;
2782                 goto unlock;
2783         }
2784
2785         err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2786                                  mgmt_class_complete);
2787         if (err < 0)
2788                 mgmt_pending_free(cmd);
2789
2790 unlock:
2791         hci_dev_unlock(hdev);
2792         return err;
2793 }
2794
2795 static int set_class_sync(struct hci_dev *hdev, void *data)
2796 {
2797         int err = 0;
2798
2799         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2800                 cancel_delayed_work_sync(&hdev->service_cache);
2801                 err = hci_update_eir_sync(hdev);
2802         }
2803
2804         if (err)
2805                 return err;
2806
2807         return hci_update_class_sync(hdev);
2808 }
2809
2810 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2811                          u16 len)
2812 {
2813         struct mgmt_cp_set_dev_class *cp = data;
2814         struct mgmt_pending_cmd *cmd;
2815         int err;
2816
2817         bt_dev_dbg(hdev, "sock %p", sk);
2818
2819         if (!lmp_bredr_capable(hdev))
2820                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2821                                        MGMT_STATUS_NOT_SUPPORTED);
2822
2823         hci_dev_lock(hdev);
2824
2825         if (pending_eir_or_class(hdev)) {
2826                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2827                                       MGMT_STATUS_BUSY);
2828                 goto unlock;
2829         }
2830
2831         if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2832                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2833                                       MGMT_STATUS_INVALID_PARAMS);
2834                 goto unlock;
2835         }
2836
2837         hdev->major_class = cp->major;
2838         hdev->minor_class = cp->minor;
2839
2840         if (!hdev_is_powered(hdev)) {
2841                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2842                                         hdev->dev_class, 3);
2843                 goto unlock;
2844         }
2845
2846         cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2847         if (!cmd) {
2848                 err = -ENOMEM;
2849                 goto unlock;
2850         }
2851
2852         err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2853                                  mgmt_class_complete);
2854         if (err < 0)
2855                 mgmt_pending_free(cmd);
2856
2857 unlock:
2858         hci_dev_unlock(hdev);
2859         return err;
2860 }
2861
2862 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2863                           u16 len)
2864 {
2865         struct mgmt_cp_load_link_keys *cp = data;
2866         const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2867                                    sizeof(struct mgmt_link_key_info));
2868         u16 key_count, expected_len;
2869         bool changed;
2870         int i;
2871
2872         bt_dev_dbg(hdev, "sock %p", sk);
2873
2874         if (!lmp_bredr_capable(hdev))
2875                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2876                                        MGMT_STATUS_NOT_SUPPORTED);
2877
2878         key_count = __le16_to_cpu(cp->key_count);
2879         if (key_count > max_key_count) {
2880                 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2881                            key_count);
2882                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2883                                        MGMT_STATUS_INVALID_PARAMS);
2884         }
2885
2886         expected_len = struct_size(cp, keys, key_count);
2887         if (expected_len != len) {
2888                 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2889                            expected_len, len);
2890                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2891                                        MGMT_STATUS_INVALID_PARAMS);
2892         }
2893
2894         if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2895                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2896                                        MGMT_STATUS_INVALID_PARAMS);
2897
2898         bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2899                    key_count);
2900
2901         for (i = 0; i < key_count; i++) {
2902                 struct mgmt_link_key_info *key = &cp->keys[i];
2903
2904                 /* Considering SMP over BREDR/LE, there is no need to check addr_type */
2905                 if (key->type > 0x08)
2906                         return mgmt_cmd_status(sk, hdev->id,
2907                                                MGMT_OP_LOAD_LINK_KEYS,
2908                                                MGMT_STATUS_INVALID_PARAMS);
2909         }
2910
2911         hci_dev_lock(hdev);
2912
2913         hci_link_keys_clear(hdev);
2914
2915         if (cp->debug_keys)
2916                 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2917         else
2918                 changed = hci_dev_test_and_clear_flag(hdev,
2919                                                       HCI_KEEP_DEBUG_KEYS);
2920
2921         if (changed)
2922                 new_settings(hdev, NULL);
2923
2924         for (i = 0; i < key_count; i++) {
2925                 struct mgmt_link_key_info *key = &cp->keys[i];
2926
2927                 if (hci_is_blocked_key(hdev,
2928                                        HCI_BLOCKED_KEY_TYPE_LINKKEY,
2929                                        key->val)) {
2930                         bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2931                                     &key->addr.bdaddr);
2932                         continue;
2933                 }
2934
2935                 /* Always ignore debug keys and require a new pairing if
2936                  * the user wants to use them.
2937                  */
2938                 if (key->type == HCI_LK_DEBUG_COMBINATION)
2939                         continue;
2940
2941                 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2942                                  key->type, key->pin_len, NULL);
2943         }
2944
2945         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2946
2947         hci_dev_unlock(hdev);
2948
2949         return 0;
2950 }
2951
2952 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2953                            u8 addr_type, struct sock *skip_sk)
2954 {
2955         struct mgmt_ev_device_unpaired ev;
2956
2957         bacpy(&ev.addr.bdaddr, bdaddr);
2958         ev.addr.type = addr_type;
2959
2960         return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2961                           skip_sk);
2962 }
2963
2964 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2965 {
2966         struct mgmt_pending_cmd *cmd = data;
2967         struct mgmt_cp_unpair_device *cp = cmd->param;
2968
2969         if (!err)
2970                 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2971
2972         cmd->cmd_complete(cmd, err);
2973         mgmt_pending_free(cmd);
2974 }
2975
2976 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2977 {
2978         struct mgmt_pending_cmd *cmd = data;
2979         struct mgmt_cp_unpair_device *cp = cmd->param;
2980         struct hci_conn *conn;
2981
2982         if (cp->addr.type == BDADDR_BREDR)
2983                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2984                                                &cp->addr.bdaddr);
2985         else
2986                 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2987                                                le_addr_type(cp->addr.type));
2988
2989         if (!conn)
2990                 return 0;
2991
2992         return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2993 }
2994
2995 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2996                          u16 len)
2997 {
2998         struct mgmt_cp_unpair_device *cp = data;
2999         struct mgmt_rp_unpair_device rp;
3000         struct hci_conn_params *params;
3001         struct mgmt_pending_cmd *cmd;
3002         struct hci_conn *conn;
3003         u8 addr_type;
3004         int err;
3005
3006         memset(&rp, 0, sizeof(rp));
3007         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3008         rp.addr.type = cp->addr.type;
3009
3010         if (!bdaddr_type_is_valid(cp->addr.type))
3011                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3012                                          MGMT_STATUS_INVALID_PARAMS,
3013                                          &rp, sizeof(rp));
3014
3015         if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3016                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3017                                          MGMT_STATUS_INVALID_PARAMS,
3018                                          &rp, sizeof(rp));
3019
3020         hci_dev_lock(hdev);
3021
3022         if (!hdev_is_powered(hdev)) {
3023                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3024                                         MGMT_STATUS_NOT_POWERED, &rp,
3025                                         sizeof(rp));
3026                 goto unlock;
3027         }
3028
3029         if (cp->addr.type == BDADDR_BREDR) {
3030                 /* If disconnection is requested, then look up the
3031                  * connection. If the remote device is connected, it
3032                  * will be later used to terminate the link.
3033                  *
3034                  * Setting it to NULL explicitly will cause no
3035                  * termination of the link.
3036                  */
3037                 if (cp->disconnect)
3038                         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3039                                                        &cp->addr.bdaddr);
3040                 else
3041                         conn = NULL;
3042
3043                 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3044                 if (err < 0) {
3045                         err = mgmt_cmd_complete(sk, hdev->id,
3046                                                 MGMT_OP_UNPAIR_DEVICE,
3047                                                 MGMT_STATUS_NOT_PAIRED, &rp,
3048                                                 sizeof(rp));
3049                         goto unlock;
3050                 }
3051
3052                 goto done;
3053         }
3054
3055         /* LE address type */
3056         addr_type = le_addr_type(cp->addr.type);
3057
3058         /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3059         err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3060         if (err < 0) {
3061                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3062                                         MGMT_STATUS_NOT_PAIRED, &rp,
3063                                         sizeof(rp));
3064                 goto unlock;
3065         }
3066
3067         conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3068         if (!conn) {
3069                 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3070                 goto done;
3071         }
3072
3073
3074         /* Defer clearing up the connection parameters until closing to
3075          * give a chance of keeping them if a repairing happens.
3076          */
3077         set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3078
3079         /* Disable auto-connection parameters if present */
3080         params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3081         if (params) {
3082                 if (params->explicit_connect)
3083                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3084                 else
3085                         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3086         }
3087
3088         /* If disconnection is not requested, then clear the connection
3089          * variable so that the link is not terminated.
3090          */
3091         if (!cp->disconnect)
3092                 conn = NULL;
3093
3094 done:
3095         /* If the connection variable is set, then termination of the
3096          * link is requested.
3097          */
3098         if (!conn) {
3099                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3100                                         &rp, sizeof(rp));
3101                 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3102                 goto unlock;
3103         }
3104
3105         cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3106                                sizeof(*cp));
3107         if (!cmd) {
3108                 err = -ENOMEM;
3109                 goto unlock;
3110         }
3111
3112         cmd->cmd_complete = addr_cmd_complete;
3113
3114         err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3115                                  unpair_device_complete);
3116         if (err < 0)
3117                 mgmt_pending_free(cmd);
3118
3119 unlock:
3120         hci_dev_unlock(hdev);
3121         return err;
3122 }
3123
3124 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3125                       u16 len)
3126 {
3127         struct mgmt_cp_disconnect *cp = data;
3128         struct mgmt_rp_disconnect rp;
3129         struct mgmt_pending_cmd *cmd;
3130         struct hci_conn *conn;
3131         int err;
3132
3133         bt_dev_dbg(hdev, "sock %p", sk);
3134
3135         memset(&rp, 0, sizeof(rp));
3136         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3137         rp.addr.type = cp->addr.type;
3138
3139         if (!bdaddr_type_is_valid(cp->addr.type))
3140                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3141                                          MGMT_STATUS_INVALID_PARAMS,
3142                                          &rp, sizeof(rp));
3143
3144         hci_dev_lock(hdev);
3145
3146         if (!test_bit(HCI_UP, &hdev->flags)) {
3147                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3148                                         MGMT_STATUS_NOT_POWERED, &rp,
3149                                         sizeof(rp));
3150                 goto failed;
3151         }
3152
3153         if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3154                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3155                                         MGMT_STATUS_BUSY, &rp, sizeof(rp));
3156                 goto failed;
3157         }
3158
3159         if (cp->addr.type == BDADDR_BREDR)
3160                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3161                                                &cp->addr.bdaddr);
3162         else
3163                 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3164                                                le_addr_type(cp->addr.type));
3165
3166         if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3167                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3168                                         MGMT_STATUS_NOT_CONNECTED, &rp,
3169                                         sizeof(rp));
3170                 goto failed;
3171         }
3172
3173         cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3174         if (!cmd) {
3175                 err = -ENOMEM;
3176                 goto failed;
3177         }
3178
3179         cmd->cmd_complete = generic_cmd_complete;
3180
3181         err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3182         if (err < 0)
3183                 mgmt_pending_remove(cmd);
3184
3185 failed:
3186         hci_dev_unlock(hdev);
3187         return err;
3188 }
3189
3190 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3191 {
3192         switch (link_type) {
3193         case LE_LINK:
3194                 switch (addr_type) {
3195                 case ADDR_LE_DEV_PUBLIC:
3196                         return BDADDR_LE_PUBLIC;
3197
3198                 default:
3199                         /* Fallback to LE Random address type */
3200                         return BDADDR_LE_RANDOM;
3201                 }
3202
3203         default:
3204                 /* Fallback to BR/EDR type */
3205                 return BDADDR_BREDR;
3206         }
3207 }
3208
3209 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3210                            u16 data_len)
3211 {
3212         struct mgmt_rp_get_connections *rp;
3213         struct hci_conn *c;
3214         int err;
3215         u16 i;
3216
3217         bt_dev_dbg(hdev, "sock %p", sk);
3218
3219         hci_dev_lock(hdev);
3220
3221         if (!hdev_is_powered(hdev)) {
3222                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3223                                       MGMT_STATUS_NOT_POWERED);
3224                 goto unlock;
3225         }
3226
3227         i = 0;
3228         list_for_each_entry(c, &hdev->conn_hash.list, list) {
3229                 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3230                         i++;
3231         }
3232
3233         rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3234         if (!rp) {
3235                 err = -ENOMEM;
3236                 goto unlock;
3237         }
3238
3239         i = 0;
3240         list_for_each_entry(c, &hdev->conn_hash.list, list) {
3241                 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3242                         continue;
3243                 bacpy(&rp->addr[i].bdaddr, &c->dst);
3244                 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3245                 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3246                         continue;
3247                 i++;
3248         }
3249
3250         rp->conn_count = cpu_to_le16(i);
3251
3252         /* Recalculate length in case of filtered SCO connections, etc */
3253         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3254                                 struct_size(rp, addr, i));
3255
3256         kfree(rp);
3257
3258 unlock:
3259         hci_dev_unlock(hdev);
3260         return err;
3261 }
3262
3263 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3264                                    struct mgmt_cp_pin_code_neg_reply *cp)
3265 {
3266         struct mgmt_pending_cmd *cmd;
3267         int err;
3268
3269         cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3270                                sizeof(*cp));
3271         if (!cmd)
3272                 return -ENOMEM;
3273
3274         cmd->cmd_complete = addr_cmd_complete;
3275
3276         err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3277                            sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3278         if (err < 0)
3279                 mgmt_pending_remove(cmd);
3280
3281         return err;
3282 }
3283
3284 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3285                           u16 len)
3286 {
3287         struct hci_conn *conn;
3288         struct mgmt_cp_pin_code_reply *cp = data;
3289         struct hci_cp_pin_code_reply reply;
3290         struct mgmt_pending_cmd *cmd;
3291         int err;
3292
3293         bt_dev_dbg(hdev, "sock %p", sk);
3294
3295         hci_dev_lock(hdev);
3296
3297         if (!hdev_is_powered(hdev)) {
3298                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3299                                       MGMT_STATUS_NOT_POWERED);
3300                 goto failed;
3301         }
3302
3303         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3304         if (!conn) {
3305                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3306                                       MGMT_STATUS_NOT_CONNECTED);
3307                 goto failed;
3308         }
3309
3310         if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3311                 struct mgmt_cp_pin_code_neg_reply ncp;
3312
3313                 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3314
3315                 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3316
3317                 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3318                 if (err >= 0)
3319                         err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3320                                               MGMT_STATUS_INVALID_PARAMS);
3321
3322                 goto failed;
3323         }
3324
3325         cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3326         if (!cmd) {
3327                 err = -ENOMEM;
3328                 goto failed;
3329         }
3330
3331         cmd->cmd_complete = addr_cmd_complete;
3332
3333         bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3334         reply.pin_len = cp->pin_len;
3335         memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3336
3337         err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3338         if (err < 0)
3339                 mgmt_pending_remove(cmd);
3340
3341 failed:
3342         hci_dev_unlock(hdev);
3343         return err;
3344 }
3345
3346 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3347                              u16 len)
3348 {
3349         struct mgmt_cp_set_io_capability *cp = data;
3350
3351         bt_dev_dbg(hdev, "sock %p", sk);
3352
3353         if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3354                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3355                                        MGMT_STATUS_INVALID_PARAMS);
3356
3357         hci_dev_lock(hdev);
3358
3359         hdev->io_capability = cp->io_capability;
3360
3361         bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3362
3363         hci_dev_unlock(hdev);
3364
3365         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3366                                  NULL, 0);
3367 }
3368
3369 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3370 {
3371         struct hci_dev *hdev = conn->hdev;
3372         struct mgmt_pending_cmd *cmd;
3373
3374         list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3375                 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3376                         continue;
3377
3378                 if (cmd->user_data != conn)
3379                         continue;
3380
3381                 return cmd;
3382         }
3383
3384         return NULL;
3385 }
3386
3387 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3388 {
3389         struct mgmt_rp_pair_device rp;
3390         struct hci_conn *conn = cmd->user_data;
3391         int err;
3392
3393         bacpy(&rp.addr.bdaddr, &conn->dst);
3394         rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3395
3396         err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3397                                 status, &rp, sizeof(rp));
3398
3399         /* So we don't get further callbacks for this connection */
3400         conn->connect_cfm_cb = NULL;
3401         conn->security_cfm_cb = NULL;
3402         conn->disconn_cfm_cb = NULL;
3403
3404         hci_conn_drop(conn);
3405
3406         /* The device is paired so there is no need to remove
3407          * its connection parameters anymore.
3408          */
3409         clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3410
3411         hci_conn_put(conn);
3412
3413         return err;
3414 }
3415
3416 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3417 {
3418         u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3419         struct mgmt_pending_cmd *cmd;
3420
3421         cmd = find_pairing(conn);
3422         if (cmd) {
3423                 cmd->cmd_complete(cmd, status);
3424                 mgmt_pending_remove(cmd);
3425         }
3426 }
3427
3428 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3429 {
3430         struct mgmt_pending_cmd *cmd;
3431
3432         BT_DBG("status %u", status);
3433
3434         cmd = find_pairing(conn);
3435         if (!cmd) {
3436                 BT_DBG("Unable to find a pending command");
3437                 return;
3438         }
3439
3440         cmd->cmd_complete(cmd, mgmt_status(status));
3441         mgmt_pending_remove(cmd);
3442 }
3443
3444 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3445 {
3446         struct mgmt_pending_cmd *cmd;
3447
3448         BT_DBG("status %u", status);
3449
3450         if (!status)
3451                 return;
3452
3453         cmd = find_pairing(conn);
3454         if (!cmd) {
3455                 BT_DBG("Unable to find a pending command");
3456                 return;
3457         }
3458
3459         cmd->cmd_complete(cmd, mgmt_status(status));
3460         mgmt_pending_remove(cmd);
3461 }
3462
3463 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3464                        u16 len)
3465 {
3466         struct mgmt_cp_pair_device *cp = data;
3467         struct mgmt_rp_pair_device rp;
3468         struct mgmt_pending_cmd *cmd;
3469         u8 sec_level, auth_type;
3470         struct hci_conn *conn;
3471         int err;
3472
3473         bt_dev_dbg(hdev, "sock %p", sk);
3474
3475         memset(&rp, 0, sizeof(rp));
3476         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3477         rp.addr.type = cp->addr.type;
3478
3479         if (!bdaddr_type_is_valid(cp->addr.type))
3480                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3481                                          MGMT_STATUS_INVALID_PARAMS,
3482                                          &rp, sizeof(rp));
3483
3484         if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3485                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3486                                          MGMT_STATUS_INVALID_PARAMS,
3487                                          &rp, sizeof(rp));
3488
3489         hci_dev_lock(hdev);
3490
3491         if (!hdev_is_powered(hdev)) {
3492                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3493                                         MGMT_STATUS_NOT_POWERED, &rp,
3494                                         sizeof(rp));
3495                 goto unlock;
3496         }
3497
3498         if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3499                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3500                                         MGMT_STATUS_ALREADY_PAIRED, &rp,
3501                                         sizeof(rp));
3502                 goto unlock;
3503         }
3504
3505         sec_level = BT_SECURITY_MEDIUM;
3506         auth_type = HCI_AT_DEDICATED_BONDING;
3507
3508         if (cp->addr.type == BDADDR_BREDR) {
3509                 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3510                                        auth_type, CONN_REASON_PAIR_DEVICE);
3511         } else {
3512                 u8 addr_type = le_addr_type(cp->addr.type);
3513                 struct hci_conn_params *p;
3514
3515                 /* When pairing a new device, it is expected to remember
3516                  * this device for future connections. Adding the connection
3517                  * parameter information ahead of time allows tracking
3518                  * of the peripheral preferred values and will speed up any
3519                  * further connection establishment.
3520                  *
3521                  * If connection parameters already exist, then they
3522                  * will be kept and this function does nothing.
3523                  */
3524                 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3525
3526                 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3527                         p->auto_connect = HCI_AUTO_CONN_DISABLED;
3528
3529                 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3530                                            sec_level, HCI_LE_CONN_TIMEOUT,
3531                                            CONN_REASON_PAIR_DEVICE);
3532         }
3533
3534         if (IS_ERR(conn)) {
3535                 int status;
3536
3537                 if (PTR_ERR(conn) == -EBUSY)
3538                         status = MGMT_STATUS_BUSY;
3539                 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3540                         status = MGMT_STATUS_NOT_SUPPORTED;
3541                 else if (PTR_ERR(conn) == -ECONNREFUSED)
3542                         status = MGMT_STATUS_REJECTED;
3543                 else
3544                         status = MGMT_STATUS_CONNECT_FAILED;
3545
3546                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3547                                         status, &rp, sizeof(rp));
3548                 goto unlock;
3549         }
3550
3551         if (conn->connect_cfm_cb) {
3552                 hci_conn_drop(conn);
3553                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3554                                         MGMT_STATUS_BUSY, &rp, sizeof(rp));
3555                 goto unlock;
3556         }
3557
3558         cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3559         if (!cmd) {
3560                 err = -ENOMEM;
3561                 hci_conn_drop(conn);
3562                 goto unlock;
3563         }
3564
3565         cmd->cmd_complete = pairing_complete;
3566
3567         /* For LE, just connecting isn't a proof that the pairing finished */
3568         if (cp->addr.type == BDADDR_BREDR) {
3569                 conn->connect_cfm_cb = pairing_complete_cb;
3570                 conn->security_cfm_cb = pairing_complete_cb;
3571                 conn->disconn_cfm_cb = pairing_complete_cb;
3572         } else {
3573                 conn->connect_cfm_cb = le_pairing_complete_cb;
3574                 conn->security_cfm_cb = le_pairing_complete_cb;
3575                 conn->disconn_cfm_cb = le_pairing_complete_cb;
3576         }
3577
3578         conn->io_capability = cp->io_cap;
3579         cmd->user_data = hci_conn_get(conn);
3580
3581         if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3582             hci_conn_security(conn, sec_level, auth_type, true)) {
3583                 cmd->cmd_complete(cmd, 0);
3584                 mgmt_pending_remove(cmd);
3585         }
3586
3587         err = 0;
3588
3589 unlock:
3590         hci_dev_unlock(hdev);
3591         return err;
3592 }
3593
3594 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3595                               u16 len)
3596 {
3597         struct mgmt_addr_info *addr = data;
3598         struct mgmt_pending_cmd *cmd;
3599         struct hci_conn *conn;
3600         int err;
3601
3602         bt_dev_dbg(hdev, "sock %p", sk);
3603
3604         hci_dev_lock(hdev);
3605
3606         if (!hdev_is_powered(hdev)) {
3607                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3608                                       MGMT_STATUS_NOT_POWERED);
3609                 goto unlock;
3610         }
3611
3612         cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3613         if (!cmd) {
3614                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3615                                       MGMT_STATUS_INVALID_PARAMS);
3616                 goto unlock;
3617         }
3618
3619         conn = cmd->user_data;
3620
3621         if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3622                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3623                                       MGMT_STATUS_INVALID_PARAMS);
3624                 goto unlock;
3625         }
3626
3627         cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3628         mgmt_pending_remove(cmd);
3629
3630         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3631                                 addr, sizeof(*addr));
3632
3633         /* Since user doesn't want to proceed with the connection, abort any
3634          * ongoing pairing and then terminate the link if it was created
3635          * because of the pair device action.
3636          */
3637         if (addr->type == BDADDR_BREDR)
3638                 hci_remove_link_key(hdev, &addr->bdaddr);
3639         else
3640                 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3641                                               le_addr_type(addr->type));
3642
3643         if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3644                 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3645
3646 unlock:
3647         hci_dev_unlock(hdev);
3648         return err;
3649 }
3650
3651 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3652                              struct mgmt_addr_info *addr, u16 mgmt_op,
3653                              u16 hci_op, __le32 passkey)
3654 {
3655         struct mgmt_pending_cmd *cmd;
3656         struct hci_conn *conn;
3657         int err;
3658
3659         hci_dev_lock(hdev);
3660
3661         if (!hdev_is_powered(hdev)) {
3662                 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3663                                         MGMT_STATUS_NOT_POWERED, addr,
3664                                         sizeof(*addr));
3665                 goto done;
3666         }
3667
3668         if (addr->type == BDADDR_BREDR)
3669                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3670         else
3671                 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3672                                                le_addr_type(addr->type));
3673
3674         if (!conn) {
3675                 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3676                                         MGMT_STATUS_NOT_CONNECTED, addr,
3677                                         sizeof(*addr));
3678                 goto done;
3679         }
3680
3681         if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3682                 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3683                 if (!err)
3684                         err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3685                                                 MGMT_STATUS_SUCCESS, addr,
3686                                                 sizeof(*addr));
3687                 else
3688                         err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3689                                                 MGMT_STATUS_FAILED, addr,
3690                                                 sizeof(*addr));
3691
3692                 goto done;
3693         }
3694
3695         cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3696         if (!cmd) {
3697                 err = -ENOMEM;
3698                 goto done;
3699         }
3700
3701         cmd->cmd_complete = addr_cmd_complete;
3702
3703         /* Continue with pairing via HCI */
3704         if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3705                 struct hci_cp_user_passkey_reply cp;
3706
3707                 bacpy(&cp.bdaddr, &addr->bdaddr);
3708                 cp.passkey = passkey;
3709                 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3710         } else
3711                 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3712                                    &addr->bdaddr);
3713
3714         if (err < 0)
3715                 mgmt_pending_remove(cmd);
3716
3717 done:
3718         hci_dev_unlock(hdev);
3719         return err;
3720 }
3721
3722 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3723                               void *data, u16 len)
3724 {
3725         struct mgmt_cp_pin_code_neg_reply *cp = data;
3726
3727         bt_dev_dbg(hdev, "sock %p", sk);
3728
3729         return user_pairing_resp(sk, hdev, &cp->addr,
3730                                 MGMT_OP_PIN_CODE_NEG_REPLY,
3731                                 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3732 }
3733
3734 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3735                               u16 len)
3736 {
3737         struct mgmt_cp_user_confirm_reply *cp = data;
3738
3739         bt_dev_dbg(hdev, "sock %p", sk);
3740
3741         if (len != sizeof(*cp))
3742                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3743                                        MGMT_STATUS_INVALID_PARAMS);
3744
3745         return user_pairing_resp(sk, hdev, &cp->addr,
3746                                  MGMT_OP_USER_CONFIRM_REPLY,
3747                                  HCI_OP_USER_CONFIRM_REPLY, 0);
3748 }
3749
3750 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3751                                   void *data, u16 len)
3752 {
3753         struct mgmt_cp_user_confirm_neg_reply *cp = data;
3754
3755         bt_dev_dbg(hdev, "sock %p", sk);
3756
3757         return user_pairing_resp(sk, hdev, &cp->addr,
3758                                  MGMT_OP_USER_CONFIRM_NEG_REPLY,
3759                                  HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3760 }
3761
3762 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3763                               u16 len)
3764 {
3765         struct mgmt_cp_user_passkey_reply *cp = data;
3766
3767         bt_dev_dbg(hdev, "sock %p", sk);
3768
3769         return user_pairing_resp(sk, hdev, &cp->addr,
3770                                  MGMT_OP_USER_PASSKEY_REPLY,
3771                                  HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3772 }
3773
3774 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3775                                   void *data, u16 len)
3776 {
3777         struct mgmt_cp_user_passkey_neg_reply *cp = data;
3778
3779         bt_dev_dbg(hdev, "sock %p", sk);
3780
3781         return user_pairing_resp(sk, hdev, &cp->addr,
3782                                  MGMT_OP_USER_PASSKEY_NEG_REPLY,
3783                                  HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3784 }
3785
3786 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3787 {
3788         struct adv_info *adv_instance;
3789
3790         adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3791         if (!adv_instance)
3792                 return 0;
3793
3794         /* stop if current instance doesn't need to be changed */
3795         if (!(adv_instance->flags & flags))
3796                 return 0;
3797
3798         cancel_adv_timeout(hdev);
3799
3800         adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3801         if (!adv_instance)
3802                 return 0;
3803
3804         hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3805
3806         return 0;
3807 }
3808
3809 static int name_changed_sync(struct hci_dev *hdev, void *data)
3810 {
3811         return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3812 }
3813
3814 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3815 {
3816         struct mgmt_pending_cmd *cmd = data;
3817         struct mgmt_cp_set_local_name *cp = cmd->param;
3818         u8 status = mgmt_status(err);
3819
3820         bt_dev_dbg(hdev, "err %d", err);
3821
3822         if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3823                 return;
3824
3825         if (status) {
3826                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3827                                 status);
3828         } else {
3829                 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3830                                   cp, sizeof(*cp));
3831
3832                 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3833                         hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3834         }
3835
3836         mgmt_pending_remove(cmd);
3837 }
3838
3839 static int set_name_sync(struct hci_dev *hdev, void *data)
3840 {
3841         if (lmp_bredr_capable(hdev)) {
3842                 hci_update_name_sync(hdev);
3843                 hci_update_eir_sync(hdev);
3844         }
3845
3846         /* The name is stored in the scan response data and so
3847          * no need to update the advertising data here.
3848          */
3849         if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3850                 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3851
3852         return 0;
3853 }
3854
3855 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3856                           u16 len)
3857 {
3858         struct mgmt_cp_set_local_name *cp = data;
3859         struct mgmt_pending_cmd *cmd;
3860         int err;
3861
3862         bt_dev_dbg(hdev, "sock %p", sk);
3863
3864         hci_dev_lock(hdev);
3865
3866         /* If the old values are the same as the new ones just return a
3867          * direct command complete event.
3868          */
3869         if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3870             !memcmp(hdev->short_name, cp->short_name,
3871                     sizeof(hdev->short_name))) {
3872                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3873                                         data, len);
3874                 goto failed;
3875         }
3876
3877         memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3878
3879         if (!hdev_is_powered(hdev)) {
3880                 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3881
3882                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3883                                         data, len);
3884                 if (err < 0)
3885                         goto failed;
3886
3887                 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3888                                          len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3889                 ext_info_changed(hdev, sk);
3890
3891                 goto failed;
3892         }
3893
3894         cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3895         if (!cmd)
3896                 err = -ENOMEM;
3897         else
3898                 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3899                                          set_name_complete);
3900
3901         if (err < 0) {
3902                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3903                                       MGMT_STATUS_FAILED);
3904
3905                 if (cmd)
3906                         mgmt_pending_remove(cmd);
3907
3908                 goto failed;
3909         }
3910
3911         memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3912
3913 failed:
3914         hci_dev_unlock(hdev);
3915         return err;
3916 }
3917
3918 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3919 {
3920         return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3921 }
3922
3923 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3924                           u16 len)
3925 {
3926         struct mgmt_cp_set_appearance *cp = data;
3927         u16 appearance;
3928         int err;
3929
3930         bt_dev_dbg(hdev, "sock %p", sk);
3931
3932         if (!lmp_le_capable(hdev))
3933                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3934                                        MGMT_STATUS_NOT_SUPPORTED);
3935
3936         appearance = le16_to_cpu(cp->appearance);
3937
3938         hci_dev_lock(hdev);
3939
3940         if (hdev->appearance != appearance) {
3941                 hdev->appearance = appearance;
3942
3943                 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3944                         hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3945                                            NULL);
3946
3947                 ext_info_changed(hdev, sk);
3948         }
3949
3950         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3951                                 0);
3952
3953         hci_dev_unlock(hdev);
3954
3955         return err;
3956 }
3957
3958 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3959                                  void *data, u16 len)
3960 {
3961         struct mgmt_rp_get_phy_configuration rp;
3962
3963         bt_dev_dbg(hdev, "sock %p", sk);
3964
3965         hci_dev_lock(hdev);
3966
3967         memset(&rp, 0, sizeof(rp));
3968
3969         rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3970         rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3971         rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3972
3973         hci_dev_unlock(hdev);
3974
3975         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3976                                  &rp, sizeof(rp));
3977 }
3978
3979 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3980 {
3981         struct mgmt_ev_phy_configuration_changed ev;
3982
3983         memset(&ev, 0, sizeof(ev));
3984
3985         ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3986
3987         return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3988                           sizeof(ev), skip);
3989 }
3990
3991 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3992 {
3993         struct mgmt_pending_cmd *cmd = data;
3994         struct sk_buff *skb = cmd->skb;
3995         u8 status = mgmt_status(err);
3996
3997         if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3998                 return;
3999
4000         if (!status) {
4001                 if (!skb)
4002                         status = MGMT_STATUS_FAILED;
4003                 else if (IS_ERR(skb))
4004                         status = mgmt_status(PTR_ERR(skb));
4005                 else
4006                         status = mgmt_status(skb->data[0]);
4007         }
4008
4009         bt_dev_dbg(hdev, "status %d", status);
4010
4011         if (status) {
4012                 mgmt_cmd_status(cmd->sk, hdev->id,
4013                                 MGMT_OP_SET_PHY_CONFIGURATION, status);
4014         } else {
4015                 mgmt_cmd_complete(cmd->sk, hdev->id,
4016                                   MGMT_OP_SET_PHY_CONFIGURATION, 0,
4017                                   NULL, 0);
4018
4019                 mgmt_phy_configuration_changed(hdev, cmd->sk);
4020         }
4021
4022         if (skb && !IS_ERR(skb))
4023                 kfree_skb(skb);
4024
4025         mgmt_pending_remove(cmd);
4026 }
4027
4028 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4029 {
4030         struct mgmt_pending_cmd *cmd = data;
4031         struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4032         struct hci_cp_le_set_default_phy cp_phy;
4033         u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4034
4035         memset(&cp_phy, 0, sizeof(cp_phy));
4036
4037         if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4038                 cp_phy.all_phys |= 0x01;
4039
4040         if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4041                 cp_phy.all_phys |= 0x02;
4042
4043         if (selected_phys & MGMT_PHY_LE_1M_TX)
4044                 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4045
4046         if (selected_phys & MGMT_PHY_LE_2M_TX)
4047                 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4048
4049         if (selected_phys & MGMT_PHY_LE_CODED_TX)
4050                 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4051
4052         if (selected_phys & MGMT_PHY_LE_1M_RX)
4053                 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4054
4055         if (selected_phys & MGMT_PHY_LE_2M_RX)
4056                 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4057
4058         if (selected_phys & MGMT_PHY_LE_CODED_RX)
4059                 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4060
4061         cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4062                                    sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4063
4064         return 0;
4065 }
4066
4067 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4068                                  void *data, u16 len)
4069 {
4070         struct mgmt_cp_set_phy_configuration *cp = data;
4071         struct mgmt_pending_cmd *cmd;
4072         u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4073         u16 pkt_type = (HCI_DH1 | HCI_DM1);
4074         bool changed = false;
4075         int err;
4076
4077         bt_dev_dbg(hdev, "sock %p", sk);
4078
4079         configurable_phys = get_configurable_phys(hdev);
4080         supported_phys = get_supported_phys(hdev);
4081         selected_phys = __le32_to_cpu(cp->selected_phys);
4082
4083         if (selected_phys & ~supported_phys)
4084                 return mgmt_cmd_status(sk, hdev->id,
4085                                        MGMT_OP_SET_PHY_CONFIGURATION,
4086                                        MGMT_STATUS_INVALID_PARAMS);
4087
4088         unconfigure_phys = supported_phys & ~configurable_phys;
4089
4090         if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4091                 return mgmt_cmd_status(sk, hdev->id,
4092                                        MGMT_OP_SET_PHY_CONFIGURATION,
4093                                        MGMT_STATUS_INVALID_PARAMS);
4094
4095         if (selected_phys == get_selected_phys(hdev))
4096                 return mgmt_cmd_complete(sk, hdev->id,
4097                                          MGMT_OP_SET_PHY_CONFIGURATION,
4098                                          0, NULL, 0);
4099
4100         hci_dev_lock(hdev);
4101
4102         if (!hdev_is_powered(hdev)) {
4103                 err = mgmt_cmd_status(sk, hdev->id,
4104                                       MGMT_OP_SET_PHY_CONFIGURATION,
4105                                       MGMT_STATUS_REJECTED);
4106                 goto unlock;
4107         }
4108
4109         if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4110                 err = mgmt_cmd_status(sk, hdev->id,
4111                                       MGMT_OP_SET_PHY_CONFIGURATION,
4112                                       MGMT_STATUS_BUSY);
4113                 goto unlock;
4114         }
4115
4116         if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4117                 pkt_type |= (HCI_DH3 | HCI_DM3);
4118         else
4119                 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4120
4121         if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4122                 pkt_type |= (HCI_DH5 | HCI_DM5);
4123         else
4124                 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4125
4126         if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4127                 pkt_type &= ~HCI_2DH1;
4128         else
4129                 pkt_type |= HCI_2DH1;
4130
4131         if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4132                 pkt_type &= ~HCI_2DH3;
4133         else
4134                 pkt_type |= HCI_2DH3;
4135
4136         if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4137                 pkt_type &= ~HCI_2DH5;
4138         else
4139                 pkt_type |= HCI_2DH5;
4140
4141         if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4142                 pkt_type &= ~HCI_3DH1;
4143         else
4144                 pkt_type |= HCI_3DH1;
4145
4146         if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4147                 pkt_type &= ~HCI_3DH3;
4148         else
4149                 pkt_type |= HCI_3DH3;
4150
4151         if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4152                 pkt_type &= ~HCI_3DH5;
4153         else
4154                 pkt_type |= HCI_3DH5;
4155
4156         if (pkt_type != hdev->pkt_type) {
4157                 hdev->pkt_type = pkt_type;
4158                 changed = true;
4159         }
4160
4161         if ((selected_phys & MGMT_PHY_LE_MASK) ==
4162             (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4163                 if (changed)
4164                         mgmt_phy_configuration_changed(hdev, sk);
4165
4166                 err = mgmt_cmd_complete(sk, hdev->id,
4167                                         MGMT_OP_SET_PHY_CONFIGURATION,
4168                                         0, NULL, 0);
4169
4170                 goto unlock;
4171         }
4172
4173         cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4174                                len);
4175         if (!cmd)
4176                 err = -ENOMEM;
4177         else
4178                 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4179                                          set_default_phy_complete);
4180
4181         if (err < 0) {
4182                 err = mgmt_cmd_status(sk, hdev->id,
4183                                       MGMT_OP_SET_PHY_CONFIGURATION,
4184                                       MGMT_STATUS_FAILED);
4185
4186                 if (cmd)
4187                         mgmt_pending_remove(cmd);
4188         }
4189
4190 unlock:
4191         hci_dev_unlock(hdev);
4192
4193         return err;
4194 }
4195
4196 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4197                             u16 len)
4198 {
4199         int err = MGMT_STATUS_SUCCESS;
4200         struct mgmt_cp_set_blocked_keys *keys = data;
4201         const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4202                                    sizeof(struct mgmt_blocked_key_info));
4203         u16 key_count, expected_len;
4204         int i;
4205
4206         bt_dev_dbg(hdev, "sock %p", sk);
4207
4208         key_count = __le16_to_cpu(keys->key_count);
4209         if (key_count > max_key_count) {
4210                 bt_dev_err(hdev, "too big key_count value %u", key_count);
4211                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4212                                        MGMT_STATUS_INVALID_PARAMS);
4213         }
4214
4215         expected_len = struct_size(keys, keys, key_count);
4216         if (expected_len != len) {
4217                 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4218                            expected_len, len);
4219                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4220                                        MGMT_STATUS_INVALID_PARAMS);
4221         }
4222
4223         hci_dev_lock(hdev);
4224
4225         hci_blocked_keys_clear(hdev);
4226
4227         for (i = 0; i < key_count; ++i) {
4228                 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4229
4230                 if (!b) {
4231                         err = MGMT_STATUS_NO_RESOURCES;
4232                         break;
4233                 }
4234
4235                 b->type = keys->keys[i].type;
4236                 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4237                 list_add_rcu(&b->list, &hdev->blocked_keys);
4238         }
4239         hci_dev_unlock(hdev);
4240
4241         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4242                                 err, NULL, 0);
4243 }
4244
4245 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4246                                void *data, u16 len)
4247 {
4248         struct mgmt_mode *cp = data;
4249         int err;
4250         bool changed = false;
4251
4252         bt_dev_dbg(hdev, "sock %p", sk);
4253
4254         if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4255                 return mgmt_cmd_status(sk, hdev->id,
4256                                        MGMT_OP_SET_WIDEBAND_SPEECH,
4257                                        MGMT_STATUS_NOT_SUPPORTED);
4258
4259         if (cp->val != 0x00 && cp->val != 0x01)
4260                 return mgmt_cmd_status(sk, hdev->id,
4261                                        MGMT_OP_SET_WIDEBAND_SPEECH,
4262                                        MGMT_STATUS_INVALID_PARAMS);
4263
4264         hci_dev_lock(hdev);
4265
4266         if (hdev_is_powered(hdev) &&
4267             !!cp->val != hci_dev_test_flag(hdev,
4268                                            HCI_WIDEBAND_SPEECH_ENABLED)) {
4269                 err = mgmt_cmd_status(sk, hdev->id,
4270                                       MGMT_OP_SET_WIDEBAND_SPEECH,
4271                                       MGMT_STATUS_REJECTED);
4272                 goto unlock;
4273         }
4274
4275         if (cp->val)
4276                 changed = !hci_dev_test_and_set_flag(hdev,
4277                                                    HCI_WIDEBAND_SPEECH_ENABLED);
4278         else
4279                 changed = hci_dev_test_and_clear_flag(hdev,
4280                                                    HCI_WIDEBAND_SPEECH_ENABLED);
4281
4282         err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4283         if (err < 0)
4284                 goto unlock;
4285
4286         if (changed)
4287                 err = new_settings(hdev, sk);
4288
4289 unlock:
4290         hci_dev_unlock(hdev);
4291         return err;
4292 }
4293
4294 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4295                                void *data, u16 data_len)
4296 {
4297         char buf[20];
4298         struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4299         u16 cap_len = 0;
4300         u8 flags = 0;
4301         u8 tx_power_range[2];
4302
4303         bt_dev_dbg(hdev, "sock %p", sk);
4304
4305         memset(&buf, 0, sizeof(buf));
4306
4307         hci_dev_lock(hdev);
4308
4309         /* When the Read Simple Pairing Options command is supported, then
4310          * the remote public key validation is supported.
4311          *
4312          * Alternatively, when Microsoft extensions are available, they can
4313          * indicate support for public key validation as well.
4314          */
4315         if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4316                 flags |= 0x01;  /* Remote public key validation (BR/EDR) */
4317
4318         flags |= 0x02;          /* Remote public key validation (LE) */
4319
4320         /* When the Read Encryption Key Size command is supported, then the
4321          * encryption key size is enforced.
4322          */
4323         if (hdev->commands[20] & 0x10)
4324                 flags |= 0x04;  /* Encryption key size enforcement (BR/EDR) */
4325
4326         flags |= 0x08;          /* Encryption key size enforcement (LE) */
4327
4328         cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4329                                   &flags, 1);
4330
4331         /* When the Read Simple Pairing Options command is supported, then
4332          * also max encryption key size information is provided.
4333          */
4334         if (hdev->commands[41] & 0x08)
4335                 cap_len = eir_append_le16(rp->cap, cap_len,
4336                                           MGMT_CAP_MAX_ENC_KEY_SIZE,
4337                                           hdev->max_enc_key_size);
4338
4339         cap_len = eir_append_le16(rp->cap, cap_len,
4340                                   MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4341                                   SMP_MAX_ENC_KEY_SIZE);
4342
4343         /* Append the min/max LE tx power parameters if we were able to fetch
4344          * it from the controller
4345          */
4346         if (hdev->commands[38] & 0x80) {
4347                 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4348                 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4349                 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4350                                           tx_power_range, 2);
4351         }
4352
4353         rp->cap_len = cpu_to_le16(cap_len);
4354
4355         hci_dev_unlock(hdev);
4356
4357         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4358                                  rp, sizeof(*rp) + cap_len);
4359 }
4360
4361 #ifdef CONFIG_BT_FEATURE_DEBUG
4362 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4363 static const u8 debug_uuid[16] = {
4364         0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4365         0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4366 };
4367 #endif
4368
4369 /* 330859bc-7506-492d-9370-9a6f0614037f */
4370 static const u8 quality_report_uuid[16] = {
4371         0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4372         0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4373 };
4374
4375 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4376 static const u8 offload_codecs_uuid[16] = {
4377         0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4378         0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4379 };
4380
4381 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4382 static const u8 le_simultaneous_roles_uuid[16] = {
4383         0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4384         0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4385 };
4386
4387 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4388 static const u8 rpa_resolution_uuid[16] = {
4389         0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4390         0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4391 };
4392
4393 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4394 static const u8 iso_socket_uuid[16] = {
4395         0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4396         0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4397 };
4398
4399 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4400 static const u8 mgmt_mesh_uuid[16] = {
4401         0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4402         0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4403 };
4404
4405 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4406                                   void *data, u16 data_len)
4407 {
4408         struct mgmt_rp_read_exp_features_info *rp;
4409         size_t len;
4410         u16 idx = 0;
4411         u32 flags;
4412         int status;
4413
4414         bt_dev_dbg(hdev, "sock %p", sk);
4415
4416         /* Enough space for 7 features */
4417         len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4418         rp = kzalloc(len, GFP_KERNEL);
4419         if (!rp)
4420                 return -ENOMEM;
4421
4422 #ifdef CONFIG_BT_FEATURE_DEBUG
4423         if (!hdev) {
4424                 flags = bt_dbg_get() ? BIT(0) : 0;
4425
4426                 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4427                 rp->features[idx].flags = cpu_to_le32(flags);
4428                 idx++;
4429         }
4430 #endif
4431
4432         if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4433                 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4434                         flags = BIT(0);
4435                 else
4436                         flags = 0;
4437
4438                 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4439                 rp->features[idx].flags = cpu_to_le32(flags);
4440                 idx++;
4441         }
4442
4443         if (hdev && ll_privacy_capable(hdev)) {
4444                 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4445                         flags = BIT(0) | BIT(1);
4446                 else
4447                         flags = BIT(1);
4448
4449                 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4450                 rp->features[idx].flags = cpu_to_le32(flags);
4451                 idx++;
4452         }
4453
4454         if (hdev && (aosp_has_quality_report(hdev) ||
4455                      hdev->set_quality_report)) {
4456                 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4457                         flags = BIT(0);
4458                 else
4459                         flags = 0;
4460
4461                 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4462                 rp->features[idx].flags = cpu_to_le32(flags);
4463                 idx++;
4464         }
4465
4466         if (hdev && hdev->get_data_path_id) {
4467                 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4468                         flags = BIT(0);
4469                 else
4470                         flags = 0;
4471
4472                 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4473                 rp->features[idx].flags = cpu_to_le32(flags);
4474                 idx++;
4475         }
4476
4477         if (IS_ENABLED(CONFIG_BT_LE)) {
4478                 flags = iso_enabled() ? BIT(0) : 0;
4479                 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4480                 rp->features[idx].flags = cpu_to_le32(flags);
4481                 idx++;
4482         }
4483
4484         if (hdev && lmp_le_capable(hdev)) {
4485                 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4486                         flags = BIT(0);
4487                 else
4488                         flags = 0;
4489
4490                 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4491                 rp->features[idx].flags = cpu_to_le32(flags);
4492                 idx++;
4493         }
4494
4495         rp->feature_count = cpu_to_le16(idx);
4496
4497         /* After reading the experimental features information, enable
4498          * the events to update client on any future change.
4499          */
4500         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4501
4502         status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4503                                    MGMT_OP_READ_EXP_FEATURES_INFO,
4504                                    0, rp, sizeof(*rp) + (20 * idx));
4505
4506         kfree(rp);
4507         return status;
4508 }
4509
4510 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4511                                           struct sock *skip)
4512 {
4513         struct mgmt_ev_exp_feature_changed ev;
4514
4515         memset(&ev, 0, sizeof(ev));
4516         memcpy(ev.uuid, rpa_resolution_uuid, 16);
4517         ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4518
4519         // Do we need to be atomic with the conn_flags?
4520         if (enabled && privacy_mode_capable(hdev))
4521                 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4522         else
4523                 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4524
4525         return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4526                                   &ev, sizeof(ev),
4527                                   HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4528
4529 }
4530
4531 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4532                                bool enabled, struct sock *skip)
4533 {
4534         struct mgmt_ev_exp_feature_changed ev;
4535
4536         memset(&ev, 0, sizeof(ev));
4537         memcpy(ev.uuid, uuid, 16);
4538         ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4539
4540         return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4541                                   &ev, sizeof(ev),
4542                                   HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4543 }
4544
4545 #define EXP_FEAT(_uuid, _set_func)      \
4546 {                                       \
4547         .uuid = _uuid,                  \
4548         .set_func = _set_func,          \
4549 }
4550
4551 /* The zero key uuid is special. Multiple exp features are set through it. */
4552 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4553                              struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4554 {
4555         struct mgmt_rp_set_exp_feature rp;
4556
4557         memset(rp.uuid, 0, 16);
4558         rp.flags = cpu_to_le32(0);
4559
4560 #ifdef CONFIG_BT_FEATURE_DEBUG
4561         if (!hdev) {
4562                 bool changed = bt_dbg_get();
4563
4564                 bt_dbg_set(false);
4565
4566                 if (changed)
4567                         exp_feature_changed(NULL, ZERO_KEY, false, sk);
4568         }
4569 #endif
4570
4571         if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4572                 bool changed;
4573
4574                 changed = hci_dev_test_and_clear_flag(hdev,
4575                                                       HCI_ENABLE_LL_PRIVACY);
4576                 if (changed)
4577                         exp_feature_changed(hdev, rpa_resolution_uuid, false,
4578                                             sk);
4579         }
4580
4581         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4582
4583         return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4584                                  MGMT_OP_SET_EXP_FEATURE, 0,
4585                                  &rp, sizeof(rp));
4586 }
4587
4588 #ifdef CONFIG_BT_FEATURE_DEBUG
4589 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4590                           struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4591 {
4592         struct mgmt_rp_set_exp_feature rp;
4593
4594         bool val, changed;
4595         int err;
4596
4597         /* Command requires to use the non-controller index */
4598         if (hdev)
4599                 return mgmt_cmd_status(sk, hdev->id,
4600                                        MGMT_OP_SET_EXP_FEATURE,
4601                                        MGMT_STATUS_INVALID_INDEX);
4602
4603         /* Parameters are limited to a single octet */
4604         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4605                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4606                                        MGMT_OP_SET_EXP_FEATURE,
4607                                        MGMT_STATUS_INVALID_PARAMS);
4608
4609         /* Only boolean on/off is supported */
4610         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4611                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4612                                        MGMT_OP_SET_EXP_FEATURE,
4613                                        MGMT_STATUS_INVALID_PARAMS);
4614
4615         val = !!cp->param[0];
4616         changed = val ? !bt_dbg_get() : bt_dbg_get();
4617         bt_dbg_set(val);
4618
4619         memcpy(rp.uuid, debug_uuid, 16);
4620         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4621
4622         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4623
4624         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4625                                 MGMT_OP_SET_EXP_FEATURE, 0,
4626                                 &rp, sizeof(rp));
4627
4628         if (changed)
4629                 exp_feature_changed(hdev, debug_uuid, val, sk);
4630
4631         return err;
4632 }
4633 #endif
4634
4635 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4636                               struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4637 {
4638         struct mgmt_rp_set_exp_feature rp;
4639         bool val, changed;
4640         int err;
4641
4642         /* Command requires to use the controller index */
4643         if (!hdev)
4644                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4645                                        MGMT_OP_SET_EXP_FEATURE,
4646                                        MGMT_STATUS_INVALID_INDEX);
4647
4648         /* Parameters are limited to a single octet */
4649         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4650                 return mgmt_cmd_status(sk, hdev->id,
4651                                        MGMT_OP_SET_EXP_FEATURE,
4652                                        MGMT_STATUS_INVALID_PARAMS);
4653
4654         /* Only boolean on/off is supported */
4655         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4656                 return mgmt_cmd_status(sk, hdev->id,
4657                                        MGMT_OP_SET_EXP_FEATURE,
4658                                        MGMT_STATUS_INVALID_PARAMS);
4659
4660         val = !!cp->param[0];
4661
4662         if (val) {
4663                 changed = !hci_dev_test_and_set_flag(hdev,
4664                                                      HCI_MESH_EXPERIMENTAL);
4665         } else {
4666                 hci_dev_clear_flag(hdev, HCI_MESH);
4667                 changed = hci_dev_test_and_clear_flag(hdev,
4668                                                       HCI_MESH_EXPERIMENTAL);
4669         }
4670
4671         memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4672         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4673
4674         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4675
4676         err = mgmt_cmd_complete(sk, hdev->id,
4677                                 MGMT_OP_SET_EXP_FEATURE, 0,
4678                                 &rp, sizeof(rp));
4679
4680         if (changed)
4681                 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4682
4683         return err;
4684 }
4685
4686 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4687                                    struct mgmt_cp_set_exp_feature *cp,
4688                                    u16 data_len)
4689 {
4690         struct mgmt_rp_set_exp_feature rp;
4691         bool val, changed;
4692         int err;
4693         u32 flags;
4694
4695         /* Command requires to use the controller index */
4696         if (!hdev)
4697                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4698                                        MGMT_OP_SET_EXP_FEATURE,
4699                                        MGMT_STATUS_INVALID_INDEX);
4700
4701         /* Changes can only be made when controller is powered down */
4702         if (hdev_is_powered(hdev))
4703                 return mgmt_cmd_status(sk, hdev->id,
4704                                        MGMT_OP_SET_EXP_FEATURE,
4705                                        MGMT_STATUS_REJECTED);
4706
4707         /* Parameters are limited to a single octet */
4708         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4709                 return mgmt_cmd_status(sk, hdev->id,
4710                                        MGMT_OP_SET_EXP_FEATURE,
4711                                        MGMT_STATUS_INVALID_PARAMS);
4712
4713         /* Only boolean on/off is supported */
4714         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4715                 return mgmt_cmd_status(sk, hdev->id,
4716                                        MGMT_OP_SET_EXP_FEATURE,
4717                                        MGMT_STATUS_INVALID_PARAMS);
4718
4719         val = !!cp->param[0];
4720
4721         if (val) {
4722                 changed = !hci_dev_test_and_set_flag(hdev,
4723                                                      HCI_ENABLE_LL_PRIVACY);
4724                 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4725
4726                 /* Enable LL privacy + supported settings changed */
4727                 flags = BIT(0) | BIT(1);
4728         } else {
4729                 changed = hci_dev_test_and_clear_flag(hdev,
4730                                                       HCI_ENABLE_LL_PRIVACY);
4731
4732                 /* Disable LL privacy + supported settings changed */
4733                 flags = BIT(1);
4734         }
4735
4736         memcpy(rp.uuid, rpa_resolution_uuid, 16);
4737         rp.flags = cpu_to_le32(flags);
4738
4739         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4740
4741         err = mgmt_cmd_complete(sk, hdev->id,
4742                                 MGMT_OP_SET_EXP_FEATURE, 0,
4743                                 &rp, sizeof(rp));
4744
4745         if (changed)
4746                 exp_ll_privacy_feature_changed(val, hdev, sk);
4747
4748         return err;
4749 }
4750
4751 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4752                                    struct mgmt_cp_set_exp_feature *cp,
4753                                    u16 data_len)
4754 {
4755         struct mgmt_rp_set_exp_feature rp;
4756         bool val, changed;
4757         int err;
4758
4759         /* Command requires to use a valid controller index */
4760         if (!hdev)
4761                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4762                                        MGMT_OP_SET_EXP_FEATURE,
4763                                        MGMT_STATUS_INVALID_INDEX);
4764
4765         /* Parameters are limited to a single octet */
4766         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4767                 return mgmt_cmd_status(sk, hdev->id,
4768                                        MGMT_OP_SET_EXP_FEATURE,
4769                                        MGMT_STATUS_INVALID_PARAMS);
4770
4771         /* Only boolean on/off is supported */
4772         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4773                 return mgmt_cmd_status(sk, hdev->id,
4774                                        MGMT_OP_SET_EXP_FEATURE,
4775                                        MGMT_STATUS_INVALID_PARAMS);
4776
4777         hci_req_sync_lock(hdev);
4778
4779         val = !!cp->param[0];
4780         changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4781
4782         if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4783                 err = mgmt_cmd_status(sk, hdev->id,
4784                                       MGMT_OP_SET_EXP_FEATURE,
4785                                       MGMT_STATUS_NOT_SUPPORTED);
4786                 goto unlock_quality_report;
4787         }
4788
4789         if (changed) {
4790                 if (hdev->set_quality_report)
4791                         err = hdev->set_quality_report(hdev, val);
4792                 else
4793                         err = aosp_set_quality_report(hdev, val);
4794
4795                 if (err) {
4796                         err = mgmt_cmd_status(sk, hdev->id,
4797                                               MGMT_OP_SET_EXP_FEATURE,
4798                                               MGMT_STATUS_FAILED);
4799                         goto unlock_quality_report;
4800                 }
4801
4802                 if (val)
4803                         hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4804                 else
4805                         hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4806         }
4807
4808         bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4809
4810         memcpy(rp.uuid, quality_report_uuid, 16);
4811         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4812         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4813
4814         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4815                                 &rp, sizeof(rp));
4816
4817         if (changed)
4818                 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4819
4820 unlock_quality_report:
4821         hci_req_sync_unlock(hdev);
4822         return err;
4823 }
4824
4825 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4826                                   struct mgmt_cp_set_exp_feature *cp,
4827                                   u16 data_len)
4828 {
4829         bool val, changed;
4830         int err;
4831         struct mgmt_rp_set_exp_feature rp;
4832
4833         /* Command requires to use a valid controller index */
4834         if (!hdev)
4835                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4836                                        MGMT_OP_SET_EXP_FEATURE,
4837                                        MGMT_STATUS_INVALID_INDEX);
4838
4839         /* Parameters are limited to a single octet */
4840         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4841                 return mgmt_cmd_status(sk, hdev->id,
4842                                        MGMT_OP_SET_EXP_FEATURE,
4843                                        MGMT_STATUS_INVALID_PARAMS);
4844
4845         /* Only boolean on/off is supported */
4846         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4847                 return mgmt_cmd_status(sk, hdev->id,
4848                                        MGMT_OP_SET_EXP_FEATURE,
4849                                        MGMT_STATUS_INVALID_PARAMS);
4850
4851         val = !!cp->param[0];
4852         changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4853
4854         if (!hdev->get_data_path_id) {
4855                 return mgmt_cmd_status(sk, hdev->id,
4856                                        MGMT_OP_SET_EXP_FEATURE,
4857                                        MGMT_STATUS_NOT_SUPPORTED);
4858         }
4859
4860         if (changed) {
4861                 if (val)
4862                         hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4863                 else
4864                         hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4865         }
4866
4867         bt_dev_info(hdev, "offload codecs enable %d changed %d",
4868                     val, changed);
4869
4870         memcpy(rp.uuid, offload_codecs_uuid, 16);
4871         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4872         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4873         err = mgmt_cmd_complete(sk, hdev->id,
4874                                 MGMT_OP_SET_EXP_FEATURE, 0,
4875                                 &rp, sizeof(rp));
4876
4877         if (changed)
4878                 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4879
4880         return err;
4881 }
4882
4883 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4884                                           struct mgmt_cp_set_exp_feature *cp,
4885                                           u16 data_len)
4886 {
4887         bool val, changed;
4888         int err;
4889         struct mgmt_rp_set_exp_feature rp;
4890
4891         /* Command requires to use a valid controller index */
4892         if (!hdev)
4893                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4894                                        MGMT_OP_SET_EXP_FEATURE,
4895                                        MGMT_STATUS_INVALID_INDEX);
4896
4897         /* Parameters are limited to a single octet */
4898         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4899                 return mgmt_cmd_status(sk, hdev->id,
4900                                        MGMT_OP_SET_EXP_FEATURE,
4901                                        MGMT_STATUS_INVALID_PARAMS);
4902
4903         /* Only boolean on/off is supported */
4904         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4905                 return mgmt_cmd_status(sk, hdev->id,
4906                                        MGMT_OP_SET_EXP_FEATURE,
4907                                        MGMT_STATUS_INVALID_PARAMS);
4908
4909         val = !!cp->param[0];
4910         changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4911
4912         if (!hci_dev_le_state_simultaneous(hdev)) {
4913                 return mgmt_cmd_status(sk, hdev->id,
4914                                        MGMT_OP_SET_EXP_FEATURE,
4915                                        MGMT_STATUS_NOT_SUPPORTED);
4916         }
4917
4918         if (changed) {
4919                 if (val)
4920                         hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4921                 else
4922                         hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4923         }
4924
4925         bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4926                     val, changed);
4927
4928         memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4929         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4930         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4931         err = mgmt_cmd_complete(sk, hdev->id,
4932                                 MGMT_OP_SET_EXP_FEATURE, 0,
4933                                 &rp, sizeof(rp));
4934
4935         if (changed)
4936                 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4937
4938         return err;
4939 }
4940
4941 #ifdef CONFIG_BT_LE
4942 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4943                                struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4944 {
4945         struct mgmt_rp_set_exp_feature rp;
4946         bool val, changed = false;
4947         int err;
4948
4949         /* Command requires to use the non-controller index */
4950         if (hdev)
4951                 return mgmt_cmd_status(sk, hdev->id,
4952                                        MGMT_OP_SET_EXP_FEATURE,
4953                                        MGMT_STATUS_INVALID_INDEX);
4954
4955         /* Parameters are limited to a single octet */
4956         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4957                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4958                                        MGMT_OP_SET_EXP_FEATURE,
4959                                        MGMT_STATUS_INVALID_PARAMS);
4960
4961         /* Only boolean on/off is supported */
4962         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4963                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4964                                        MGMT_OP_SET_EXP_FEATURE,
4965                                        MGMT_STATUS_INVALID_PARAMS);
4966
4967         val = cp->param[0] ? true : false;
4968         if (val)
4969                 err = iso_init();
4970         else
4971                 err = iso_exit();
4972
4973         if (!err)
4974                 changed = true;
4975
4976         memcpy(rp.uuid, iso_socket_uuid, 16);
4977         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4978
4979         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4980
4981         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4982                                 MGMT_OP_SET_EXP_FEATURE, 0,
4983                                 &rp, sizeof(rp));
4984
4985         if (changed)
4986                 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4987
4988         return err;
4989 }
4990 #endif
4991
4992 static const struct mgmt_exp_feature {
4993         const u8 *uuid;
4994         int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4995                         struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4996 } exp_features[] = {
4997         EXP_FEAT(ZERO_KEY, set_zero_key_func),
4998 #ifdef CONFIG_BT_FEATURE_DEBUG
4999         EXP_FEAT(debug_uuid, set_debug_func),
5000 #endif
5001         EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
5002         EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
5003         EXP_FEAT(quality_report_uuid, set_quality_report_func),
5004         EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5005         EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5006 #ifdef CONFIG_BT_LE
5007         EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5008 #endif
5009
5010         /* end with a null feature */
5011         EXP_FEAT(NULL, NULL)
5012 };
5013
5014 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5015                            void *data, u16 data_len)
5016 {
5017         struct mgmt_cp_set_exp_feature *cp = data;
5018         size_t i = 0;
5019
5020         bt_dev_dbg(hdev, "sock %p", sk);
5021
5022         for (i = 0; exp_features[i].uuid; i++) {
5023                 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5024                         return exp_features[i].set_func(sk, hdev, cp, data_len);
5025         }
5026
5027         return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5028                                MGMT_OP_SET_EXP_FEATURE,
5029                                MGMT_STATUS_NOT_SUPPORTED);
5030 }
5031
5032 static u32 get_params_flags(struct hci_dev *hdev,
5033                             struct hci_conn_params *params)
5034 {
5035         u32 flags = hdev->conn_flags;
5036
5037         /* Devices using RPAs can only be programmed in the acceptlist if
5038          * LL Privacy has been enable otherwise they cannot mark
5039          * HCI_CONN_FLAG_REMOTE_WAKEUP.
5040          */
5041         if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5042             hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
5043                 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5044
5045         return flags;
5046 }
5047
5048 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5049                             u16 data_len)
5050 {
5051         struct mgmt_cp_get_device_flags *cp = data;
5052         struct mgmt_rp_get_device_flags rp;
5053         struct bdaddr_list_with_flags *br_params;
5054         struct hci_conn_params *params;
5055         u32 supported_flags;
5056         u32 current_flags = 0;
5057         u8 status = MGMT_STATUS_INVALID_PARAMS;
5058
5059         bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5060                    &cp->addr.bdaddr, cp->addr.type);
5061
5062         hci_dev_lock(hdev);
5063
5064         supported_flags = hdev->conn_flags;
5065
5066         memset(&rp, 0, sizeof(rp));
5067
5068         if (cp->addr.type == BDADDR_BREDR) {
5069                 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5070                                                               &cp->addr.bdaddr,
5071                                                               cp->addr.type);
5072                 if (!br_params)
5073                         goto done;
5074
5075                 current_flags = br_params->flags;
5076         } else {
5077                 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5078                                                 le_addr_type(cp->addr.type));
5079                 if (!params)
5080                         goto done;
5081
5082                 supported_flags = get_params_flags(hdev, params);
5083                 current_flags = params->flags;
5084         }
5085
5086         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5087         rp.addr.type = cp->addr.type;
5088         rp.supported_flags = cpu_to_le32(supported_flags);
5089         rp.current_flags = cpu_to_le32(current_flags);
5090
5091         status = MGMT_STATUS_SUCCESS;
5092
5093 done:
5094         hci_dev_unlock(hdev);
5095
5096         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5097                                 &rp, sizeof(rp));
5098 }
5099
5100 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5101                                  bdaddr_t *bdaddr, u8 bdaddr_type,
5102                                  u32 supported_flags, u32 current_flags)
5103 {
5104         struct mgmt_ev_device_flags_changed ev;
5105
5106         bacpy(&ev.addr.bdaddr, bdaddr);
5107         ev.addr.type = bdaddr_type;
5108         ev.supported_flags = cpu_to_le32(supported_flags);
5109         ev.current_flags = cpu_to_le32(current_flags);
5110
5111         mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5112 }
5113
5114 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5115                             u16 len)
5116 {
5117         struct mgmt_cp_set_device_flags *cp = data;
5118         struct bdaddr_list_with_flags *br_params;
5119         struct hci_conn_params *params;
5120         u8 status = MGMT_STATUS_INVALID_PARAMS;
5121         u32 supported_flags;
5122         u32 current_flags = __le32_to_cpu(cp->current_flags);
5123
5124         bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5125                    &cp->addr.bdaddr, cp->addr.type, current_flags);
5126
5127         // We should take hci_dev_lock() early, I think.. conn_flags can change
5128         supported_flags = hdev->conn_flags;
5129
5130         if ((supported_flags | current_flags) != supported_flags) {
5131                 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5132                             current_flags, supported_flags);
5133                 goto done;
5134         }
5135
5136         hci_dev_lock(hdev);
5137
5138         if (cp->addr.type == BDADDR_BREDR) {
5139                 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5140                                                               &cp->addr.bdaddr,
5141                                                               cp->addr.type);
5142
5143                 if (br_params) {
5144                         br_params->flags = current_flags;
5145                         status = MGMT_STATUS_SUCCESS;
5146                 } else {
5147                         bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5148                                     &cp->addr.bdaddr, cp->addr.type);
5149                 }
5150
5151                 goto unlock;
5152         }
5153
5154         params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5155                                         le_addr_type(cp->addr.type));
5156         if (!params) {
5157                 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5158                             &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5159                 goto unlock;
5160         }
5161
5162         supported_flags = get_params_flags(hdev, params);
5163
5164         if ((supported_flags | current_flags) != supported_flags) {
5165                 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5166                             current_flags, supported_flags);
5167                 goto unlock;
5168         }
5169
5170         WRITE_ONCE(params->flags, current_flags);
5171         status = MGMT_STATUS_SUCCESS;
5172
5173         /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5174          * has been set.
5175          */
5176         if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5177                 hci_update_passive_scan(hdev);
5178
5179 unlock:
5180         hci_dev_unlock(hdev);
5181
5182 done:
5183         if (status == MGMT_STATUS_SUCCESS)
5184                 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5185                                      supported_flags, current_flags);
5186
5187         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5188                                  &cp->addr, sizeof(cp->addr));
5189 }
5190
5191 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5192                                    u16 handle)
5193 {
5194         struct mgmt_ev_adv_monitor_added ev;
5195
5196         ev.monitor_handle = cpu_to_le16(handle);
5197
5198         mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5199 }
5200
5201 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5202 {
5203         struct mgmt_ev_adv_monitor_removed ev;
5204         struct mgmt_pending_cmd *cmd;
5205         struct sock *sk_skip = NULL;
5206         struct mgmt_cp_remove_adv_monitor *cp;
5207
5208         cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5209         if (cmd) {
5210                 cp = cmd->param;
5211
5212                 if (cp->monitor_handle)
5213                         sk_skip = cmd->sk;
5214         }
5215
5216         ev.monitor_handle = cpu_to_le16(handle);
5217
5218         mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5219 }
5220
5221 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5222                                  void *data, u16 len)
5223 {
5224         struct adv_monitor *monitor = NULL;
5225         struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5226         int handle, err;
5227         size_t rp_size = 0;
5228         __u32 supported = 0;
5229         __u32 enabled = 0;
5230         __u16 num_handles = 0;
5231         __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5232
5233         BT_DBG("request for %s", hdev->name);
5234
5235         hci_dev_lock(hdev);
5236
5237         if (msft_monitor_supported(hdev))
5238                 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5239
5240         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5241                 handles[num_handles++] = monitor->handle;
5242
5243         hci_dev_unlock(hdev);
5244
5245         rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5246         rp = kmalloc(rp_size, GFP_KERNEL);
5247         if (!rp)
5248                 return -ENOMEM;
5249
5250         /* All supported features are currently enabled */
5251         enabled = supported;
5252
5253         rp->supported_features = cpu_to_le32(supported);
5254         rp->enabled_features = cpu_to_le32(enabled);
5255         rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5256         rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5257         rp->num_handles = cpu_to_le16(num_handles);
5258         if (num_handles)
5259                 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5260
5261         err = mgmt_cmd_complete(sk, hdev->id,
5262                                 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5263                                 MGMT_STATUS_SUCCESS, rp, rp_size);
5264
5265         kfree(rp);
5266
5267         return err;
5268 }
5269
5270 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5271                                                    void *data, int status)
5272 {
5273         struct mgmt_rp_add_adv_patterns_monitor rp;
5274         struct mgmt_pending_cmd *cmd = data;
5275         struct adv_monitor *monitor = cmd->user_data;
5276
5277         hci_dev_lock(hdev);
5278
5279         rp.monitor_handle = cpu_to_le16(monitor->handle);
5280
5281         if (!status) {
5282                 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5283                 hdev->adv_monitors_cnt++;
5284                 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5285                         monitor->state = ADV_MONITOR_STATE_REGISTERED;
5286                 hci_update_passive_scan(hdev);
5287         }
5288
5289         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5290                           mgmt_status(status), &rp, sizeof(rp));
5291         mgmt_pending_remove(cmd);
5292
5293         hci_dev_unlock(hdev);
5294         bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5295                    rp.monitor_handle, status);
5296 }
5297
5298 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5299 {
5300         struct mgmt_pending_cmd *cmd = data;
5301         struct adv_monitor *monitor = cmd->user_data;
5302
5303         return hci_add_adv_monitor(hdev, monitor);
5304 }
5305
5306 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5307                                       struct adv_monitor *m, u8 status,
5308                                       void *data, u16 len, u16 op)
5309 {
5310         struct mgmt_pending_cmd *cmd;
5311         int err;
5312
5313         hci_dev_lock(hdev);
5314
5315         if (status)
5316                 goto unlock;
5317
5318         if (pending_find(MGMT_OP_SET_LE, hdev) ||
5319             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5320             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5321             pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5322                 status = MGMT_STATUS_BUSY;
5323                 goto unlock;
5324         }
5325
5326         cmd = mgmt_pending_add(sk, op, hdev, data, len);
5327         if (!cmd) {
5328                 status = MGMT_STATUS_NO_RESOURCES;
5329                 goto unlock;
5330         }
5331
5332         cmd->user_data = m;
5333         err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5334                                  mgmt_add_adv_patterns_monitor_complete);
5335         if (err) {
5336                 if (err == -ENOMEM)
5337                         status = MGMT_STATUS_NO_RESOURCES;
5338                 else
5339                         status = MGMT_STATUS_FAILED;
5340
5341                 goto unlock;
5342         }
5343
5344         hci_dev_unlock(hdev);
5345
5346         return 0;
5347
5348 unlock:
5349         hci_free_adv_monitor(hdev, m);
5350         hci_dev_unlock(hdev);
5351         return mgmt_cmd_status(sk, hdev->id, op, status);
5352 }
5353
5354 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5355                                    struct mgmt_adv_rssi_thresholds *rssi)
5356 {
5357         if (rssi) {
5358                 m->rssi.low_threshold = rssi->low_threshold;
5359                 m->rssi.low_threshold_timeout =
5360                     __le16_to_cpu(rssi->low_threshold_timeout);
5361                 m->rssi.high_threshold = rssi->high_threshold;
5362                 m->rssi.high_threshold_timeout =
5363                     __le16_to_cpu(rssi->high_threshold_timeout);
5364                 m->rssi.sampling_period = rssi->sampling_period;
5365         } else {
5366                 /* Default values. These numbers are the least constricting
5367                  * parameters for MSFT API to work, so it behaves as if there
5368                  * are no rssi parameter to consider. May need to be changed
5369                  * if other API are to be supported.
5370                  */
5371                 m->rssi.low_threshold = -127;
5372                 m->rssi.low_threshold_timeout = 60;
5373                 m->rssi.high_threshold = -127;
5374                 m->rssi.high_threshold_timeout = 0;
5375                 m->rssi.sampling_period = 0;
5376         }
5377 }
5378
5379 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5380                                     struct mgmt_adv_pattern *patterns)
5381 {
5382         u8 offset = 0, length = 0;
5383         struct adv_pattern *p = NULL;
5384         int i;
5385
5386         for (i = 0; i < pattern_count; i++) {
5387                 offset = patterns[i].offset;
5388                 length = patterns[i].length;
5389                 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5390                     length > HCI_MAX_EXT_AD_LENGTH ||
5391                     (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5392                         return MGMT_STATUS_INVALID_PARAMS;
5393
5394                 p = kmalloc(sizeof(*p), GFP_KERNEL);
5395                 if (!p)
5396                         return MGMT_STATUS_NO_RESOURCES;
5397
5398                 p->ad_type = patterns[i].ad_type;
5399                 p->offset = patterns[i].offset;
5400                 p->length = patterns[i].length;
5401                 memcpy(p->value, patterns[i].value, p->length);
5402
5403                 INIT_LIST_HEAD(&p->list);
5404                 list_add(&p->list, &m->patterns);
5405         }
5406
5407         return MGMT_STATUS_SUCCESS;
5408 }
5409
5410 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5411                                     void *data, u16 len)
5412 {
5413         struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5414         struct adv_monitor *m = NULL;
5415         u8 status = MGMT_STATUS_SUCCESS;
5416         size_t expected_size = sizeof(*cp);
5417
5418         BT_DBG("request for %s", hdev->name);
5419
5420         if (len <= sizeof(*cp)) {
5421                 status = MGMT_STATUS_INVALID_PARAMS;
5422                 goto done;
5423         }
5424
5425         expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5426         if (len != expected_size) {
5427                 status = MGMT_STATUS_INVALID_PARAMS;
5428                 goto done;
5429         }
5430
5431         m = kzalloc(sizeof(*m), GFP_KERNEL);
5432         if (!m) {
5433                 status = MGMT_STATUS_NO_RESOURCES;
5434                 goto done;
5435         }
5436
5437         INIT_LIST_HEAD(&m->patterns);
5438
5439         parse_adv_monitor_rssi(m, NULL);
5440         status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5441
5442 done:
5443         return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5444                                           MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5445 }
5446
5447 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5448                                          void *data, u16 len)
5449 {
5450         struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5451         struct adv_monitor *m = NULL;
5452         u8 status = MGMT_STATUS_SUCCESS;
5453         size_t expected_size = sizeof(*cp);
5454
5455         BT_DBG("request for %s", hdev->name);
5456
5457         if (len <= sizeof(*cp)) {
5458                 status = MGMT_STATUS_INVALID_PARAMS;
5459                 goto done;
5460         }
5461
5462         expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5463         if (len != expected_size) {
5464                 status = MGMT_STATUS_INVALID_PARAMS;
5465                 goto done;
5466         }
5467
5468         m = kzalloc(sizeof(*m), GFP_KERNEL);
5469         if (!m) {
5470                 status = MGMT_STATUS_NO_RESOURCES;
5471                 goto done;
5472         }
5473
5474         INIT_LIST_HEAD(&m->patterns);
5475
5476         parse_adv_monitor_rssi(m, &cp->rssi);
5477         status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5478
5479 done:
5480         return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5481                                          MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5482 }
5483
5484 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5485                                              void *data, int status)
5486 {
5487         struct mgmt_rp_remove_adv_monitor rp;
5488         struct mgmt_pending_cmd *cmd = data;
5489         struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5490
5491         hci_dev_lock(hdev);
5492
5493         rp.monitor_handle = cp->monitor_handle;
5494
5495         if (!status)
5496                 hci_update_passive_scan(hdev);
5497
5498         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5499                           mgmt_status(status), &rp, sizeof(rp));
5500         mgmt_pending_remove(cmd);
5501
5502         hci_dev_unlock(hdev);
5503         bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5504                    rp.monitor_handle, status);
5505 }
5506
5507 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5508 {
5509         struct mgmt_pending_cmd *cmd = data;
5510         struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5511         u16 handle = __le16_to_cpu(cp->monitor_handle);
5512
5513         if (!handle)
5514                 return hci_remove_all_adv_monitor(hdev);
5515
5516         return hci_remove_single_adv_monitor(hdev, handle);
5517 }
5518
5519 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5520                               void *data, u16 len)
5521 {
5522         struct mgmt_pending_cmd *cmd;
5523         int err, status;
5524
5525         hci_dev_lock(hdev);
5526
5527         if (pending_find(MGMT_OP_SET_LE, hdev) ||
5528             pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5529             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5530             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5531                 status = MGMT_STATUS_BUSY;
5532                 goto unlock;
5533         }
5534
5535         cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5536         if (!cmd) {
5537                 status = MGMT_STATUS_NO_RESOURCES;
5538                 goto unlock;
5539         }
5540
5541         err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5542                                  mgmt_remove_adv_monitor_complete);
5543
5544         if (err) {
5545                 mgmt_pending_remove(cmd);
5546
5547                 if (err == -ENOMEM)
5548                         status = MGMT_STATUS_NO_RESOURCES;
5549                 else
5550                         status = MGMT_STATUS_FAILED;
5551
5552                 goto unlock;
5553         }
5554
5555         hci_dev_unlock(hdev);
5556
5557         return 0;
5558
5559 unlock:
5560         hci_dev_unlock(hdev);
5561         return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5562                                status);
5563 }
5564
5565 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5566 {
5567         struct mgmt_rp_read_local_oob_data mgmt_rp;
5568         size_t rp_size = sizeof(mgmt_rp);
5569         struct mgmt_pending_cmd *cmd = data;
5570         struct sk_buff *skb = cmd->skb;
5571         u8 status = mgmt_status(err);
5572
5573         if (!status) {
5574                 if (!skb)
5575                         status = MGMT_STATUS_FAILED;
5576                 else if (IS_ERR(skb))
5577                         status = mgmt_status(PTR_ERR(skb));
5578                 else
5579                         status = mgmt_status(skb->data[0]);
5580         }
5581
5582         bt_dev_dbg(hdev, "status %d", status);
5583
5584         if (status) {
5585                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5586                 goto remove;
5587         }
5588
5589         memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5590
5591         if (!bredr_sc_enabled(hdev)) {
5592                 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5593
5594                 if (skb->len < sizeof(*rp)) {
5595                         mgmt_cmd_status(cmd->sk, hdev->id,
5596                                         MGMT_OP_READ_LOCAL_OOB_DATA,
5597                                         MGMT_STATUS_FAILED);
5598                         goto remove;
5599                 }
5600
5601                 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5602                 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5603
5604                 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5605         } else {
5606                 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5607
5608                 if (skb->len < sizeof(*rp)) {
5609                         mgmt_cmd_status(cmd->sk, hdev->id,
5610                                         MGMT_OP_READ_LOCAL_OOB_DATA,
5611                                         MGMT_STATUS_FAILED);
5612                         goto remove;
5613                 }
5614
5615                 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5616                 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5617
5618                 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5619                 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5620         }
5621
5622         mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5623                           MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5624
5625 remove:
5626         if (skb && !IS_ERR(skb))
5627                 kfree_skb(skb);
5628
5629         mgmt_pending_free(cmd);
5630 }
5631
5632 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5633 {
5634         struct mgmt_pending_cmd *cmd = data;
5635
5636         if (bredr_sc_enabled(hdev))
5637                 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5638         else
5639                 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5640
5641         if (IS_ERR(cmd->skb))
5642                 return PTR_ERR(cmd->skb);
5643         else
5644                 return 0;
5645 }
5646
5647 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5648                                void *data, u16 data_len)
5649 {
5650         struct mgmt_pending_cmd *cmd;
5651         int err;
5652
5653         bt_dev_dbg(hdev, "sock %p", sk);
5654
5655         hci_dev_lock(hdev);
5656
5657         if (!hdev_is_powered(hdev)) {
5658                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5659                                       MGMT_STATUS_NOT_POWERED);
5660                 goto unlock;
5661         }
5662
5663         if (!lmp_ssp_capable(hdev)) {
5664                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5665                                       MGMT_STATUS_NOT_SUPPORTED);
5666                 goto unlock;
5667         }
5668
5669         cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5670         if (!cmd)
5671                 err = -ENOMEM;
5672         else
5673                 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5674                                          read_local_oob_data_complete);
5675
5676         if (err < 0) {
5677                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5678                                       MGMT_STATUS_FAILED);
5679
5680                 if (cmd)
5681                         mgmt_pending_free(cmd);
5682         }
5683
5684 unlock:
5685         hci_dev_unlock(hdev);
5686         return err;
5687 }
5688
5689 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5690                                void *data, u16 len)
5691 {
5692         struct mgmt_addr_info *addr = data;
5693         int err;
5694
5695         bt_dev_dbg(hdev, "sock %p", sk);
5696
5697         if (!bdaddr_type_is_valid(addr->type))
5698                 return mgmt_cmd_complete(sk, hdev->id,
5699                                          MGMT_OP_ADD_REMOTE_OOB_DATA,
5700                                          MGMT_STATUS_INVALID_PARAMS,
5701                                          addr, sizeof(*addr));
5702
5703         hci_dev_lock(hdev);
5704
5705         if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5706                 struct mgmt_cp_add_remote_oob_data *cp = data;
5707                 u8 status;
5708
5709                 if (cp->addr.type != BDADDR_BREDR) {
5710                         err = mgmt_cmd_complete(sk, hdev->id,
5711                                                 MGMT_OP_ADD_REMOTE_OOB_DATA,
5712                                                 MGMT_STATUS_INVALID_PARAMS,
5713                                                 &cp->addr, sizeof(cp->addr));
5714                         goto unlock;
5715                 }
5716
5717                 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5718                                               cp->addr.type, cp->hash,
5719                                               cp->rand, NULL, NULL);
5720                 if (err < 0)
5721                         status = MGMT_STATUS_FAILED;
5722                 else
5723                         status = MGMT_STATUS_SUCCESS;
5724
5725                 err = mgmt_cmd_complete(sk, hdev->id,
5726                                         MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5727                                         &cp->addr, sizeof(cp->addr));
5728         } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5729                 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5730                 u8 *rand192, *hash192, *rand256, *hash256;
5731                 u8 status;
5732
5733                 if (bdaddr_type_is_le(cp->addr.type)) {
5734                         /* Enforce zero-valued 192-bit parameters as
5735                          * long as legacy SMP OOB isn't implemented.
5736                          */
5737                         if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5738                             memcmp(cp->hash192, ZERO_KEY, 16)) {
5739                                 err = mgmt_cmd_complete(sk, hdev->id,
5740                                                         MGMT_OP_ADD_REMOTE_OOB_DATA,
5741                                                         MGMT_STATUS_INVALID_PARAMS,
5742                                                         addr, sizeof(*addr));
5743                                 goto unlock;
5744                         }
5745
5746                         rand192 = NULL;
5747                         hash192 = NULL;
5748                 } else {
5749                         /* In case one of the P-192 values is set to zero,
5750                          * then just disable OOB data for P-192.
5751                          */
5752                         if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5753                             !memcmp(cp->hash192, ZERO_KEY, 16)) {
5754                                 rand192 = NULL;
5755                                 hash192 = NULL;
5756                         } else {
5757                                 rand192 = cp->rand192;
5758                                 hash192 = cp->hash192;
5759                         }
5760                 }
5761
5762                 /* In case one of the P-256 values is set to zero, then just
5763                  * disable OOB data for P-256.
5764                  */
5765                 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5766                     !memcmp(cp->hash256, ZERO_KEY, 16)) {
5767                         rand256 = NULL;
5768                         hash256 = NULL;
5769                 } else {
5770                         rand256 = cp->rand256;
5771                         hash256 = cp->hash256;
5772                 }
5773
5774                 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5775                                               cp->addr.type, hash192, rand192,
5776                                               hash256, rand256);
5777                 if (err < 0)
5778                         status = MGMT_STATUS_FAILED;
5779                 else
5780                         status = MGMT_STATUS_SUCCESS;
5781
5782                 err = mgmt_cmd_complete(sk, hdev->id,
5783                                         MGMT_OP_ADD_REMOTE_OOB_DATA,
5784                                         status, &cp->addr, sizeof(cp->addr));
5785         } else {
5786                 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5787                            len);
5788                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5789                                       MGMT_STATUS_INVALID_PARAMS);
5790         }
5791
5792 unlock:
5793         hci_dev_unlock(hdev);
5794         return err;
5795 }
5796
5797 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5798                                   void *data, u16 len)
5799 {
5800         struct mgmt_cp_remove_remote_oob_data *cp = data;
5801         u8 status;
5802         int err;
5803
5804         bt_dev_dbg(hdev, "sock %p", sk);
5805
5806         if (cp->addr.type != BDADDR_BREDR)
5807                 return mgmt_cmd_complete(sk, hdev->id,
5808                                          MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5809                                          MGMT_STATUS_INVALID_PARAMS,
5810                                          &cp->addr, sizeof(cp->addr));
5811
5812         hci_dev_lock(hdev);
5813
5814         if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5815                 hci_remote_oob_data_clear(hdev);
5816                 status = MGMT_STATUS_SUCCESS;
5817                 goto done;
5818         }
5819
5820         err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5821         if (err < 0)
5822                 status = MGMT_STATUS_INVALID_PARAMS;
5823         else
5824                 status = MGMT_STATUS_SUCCESS;
5825
5826 done:
5827         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5828                                 status, &cp->addr, sizeof(cp->addr));
5829
5830         hci_dev_unlock(hdev);
5831         return err;
5832 }
5833
5834 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5835 {
5836         struct mgmt_pending_cmd *cmd;
5837
5838         bt_dev_dbg(hdev, "status %u", status);
5839
5840         hci_dev_lock(hdev);
5841
5842         cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5843         if (!cmd)
5844                 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5845
5846         if (!cmd)
5847                 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5848
5849         if (cmd) {
5850                 cmd->cmd_complete(cmd, mgmt_status(status));
5851                 mgmt_pending_remove(cmd);
5852         }
5853
5854         hci_dev_unlock(hdev);
5855 }
5856
5857 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5858                                     uint8_t *mgmt_status)
5859 {
5860         switch (type) {
5861         case DISCOV_TYPE_LE:
5862                 *mgmt_status = mgmt_le_support(hdev);
5863                 if (*mgmt_status)
5864                         return false;
5865                 break;
5866         case DISCOV_TYPE_INTERLEAVED:
5867                 *mgmt_status = mgmt_le_support(hdev);
5868                 if (*mgmt_status)
5869                         return false;
5870                 fallthrough;
5871         case DISCOV_TYPE_BREDR:
5872                 *mgmt_status = mgmt_bredr_support(hdev);
5873                 if (*mgmt_status)
5874                         return false;
5875                 break;
5876         default:
5877                 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5878                 return false;
5879         }
5880
5881         return true;
5882 }
5883
5884 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5885 {
5886         struct mgmt_pending_cmd *cmd = data;
5887
5888         if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5889             cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5890             cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5891                 return;
5892
5893         bt_dev_dbg(hdev, "err %d", err);
5894
5895         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5896                           cmd->param, 1);
5897         mgmt_pending_remove(cmd);
5898
5899         hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5900                                 DISCOVERY_FINDING);
5901 }
5902
5903 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5904 {
5905         return hci_start_discovery_sync(hdev);
5906 }
5907
5908 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5909                                     u16 op, void *data, u16 len)
5910 {
5911         struct mgmt_cp_start_discovery *cp = data;
5912         struct mgmt_pending_cmd *cmd;
5913         u8 status;
5914         int err;
5915
5916         bt_dev_dbg(hdev, "sock %p", sk);
5917
5918         hci_dev_lock(hdev);
5919
5920         if (!hdev_is_powered(hdev)) {
5921                 err = mgmt_cmd_complete(sk, hdev->id, op,
5922                                         MGMT_STATUS_NOT_POWERED,
5923                                         &cp->type, sizeof(cp->type));
5924                 goto failed;
5925         }
5926
5927         if (hdev->discovery.state != DISCOVERY_STOPPED ||
5928             hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5929                 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5930                                         &cp->type, sizeof(cp->type));
5931                 goto failed;
5932         }
5933
5934         if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5935                 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5936                                         &cp->type, sizeof(cp->type));
5937                 goto failed;
5938         }
5939
5940         /* Can't start discovery when it is paused */
5941         if (hdev->discovery_paused) {
5942                 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5943                                         &cp->type, sizeof(cp->type));
5944                 goto failed;
5945         }
5946
5947         /* Clear the discovery filter first to free any previously
5948          * allocated memory for the UUID list.
5949          */
5950         hci_discovery_filter_clear(hdev);
5951
5952         hdev->discovery.type = cp->type;
5953         hdev->discovery.report_invalid_rssi = false;
5954         if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5955                 hdev->discovery.limited = true;
5956         else
5957                 hdev->discovery.limited = false;
5958
5959         cmd = mgmt_pending_add(sk, op, hdev, data, len);
5960         if (!cmd) {
5961                 err = -ENOMEM;
5962                 goto failed;
5963         }
5964
5965         err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5966                                  start_discovery_complete);
5967         if (err < 0) {
5968                 mgmt_pending_remove(cmd);
5969                 goto failed;
5970         }
5971
5972         hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5973
5974 failed:
5975         hci_dev_unlock(hdev);
5976         return err;
5977 }
5978
5979 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5980                            void *data, u16 len)
5981 {
5982         return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5983                                         data, len);
5984 }
5985
5986 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5987                                    void *data, u16 len)
5988 {
5989         return start_discovery_internal(sk, hdev,
5990                                         MGMT_OP_START_LIMITED_DISCOVERY,
5991                                         data, len);
5992 }
5993
5994 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5995                                    void *data, u16 len)
5996 {
5997         struct mgmt_cp_start_service_discovery *cp = data;
5998         struct mgmt_pending_cmd *cmd;
5999         const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
6000         u16 uuid_count, expected_len;
6001         u8 status;
6002         int err;
6003
6004         bt_dev_dbg(hdev, "sock %p", sk);
6005
6006         hci_dev_lock(hdev);
6007
6008         if (!hdev_is_powered(hdev)) {
6009                 err = mgmt_cmd_complete(sk, hdev->id,
6010                                         MGMT_OP_START_SERVICE_DISCOVERY,
6011                                         MGMT_STATUS_NOT_POWERED,
6012                                         &cp->type, sizeof(cp->type));
6013                 goto failed;
6014         }
6015
6016         if (hdev->discovery.state != DISCOVERY_STOPPED ||
6017             hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6018                 err = mgmt_cmd_complete(sk, hdev->id,
6019                                         MGMT_OP_START_SERVICE_DISCOVERY,
6020                                         MGMT_STATUS_BUSY, &cp->type,
6021                                         sizeof(cp->type));
6022                 goto failed;
6023         }
6024
6025         if (hdev->discovery_paused) {
6026                 err = mgmt_cmd_complete(sk, hdev->id,
6027                                         MGMT_OP_START_SERVICE_DISCOVERY,
6028                                         MGMT_STATUS_BUSY, &cp->type,
6029                                         sizeof(cp->type));
6030                 goto failed;
6031         }
6032
6033         uuid_count = __le16_to_cpu(cp->uuid_count);
6034         if (uuid_count > max_uuid_count) {
6035                 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6036                            uuid_count);
6037                 err = mgmt_cmd_complete(sk, hdev->id,
6038                                         MGMT_OP_START_SERVICE_DISCOVERY,
6039                                         MGMT_STATUS_INVALID_PARAMS, &cp->type,
6040                                         sizeof(cp->type));
6041                 goto failed;
6042         }
6043
6044         expected_len = sizeof(*cp) + uuid_count * 16;
6045         if (expected_len != len) {
6046                 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6047                            expected_len, len);
6048                 err = mgmt_cmd_complete(sk, hdev->id,
6049                                         MGMT_OP_START_SERVICE_DISCOVERY,
6050                                         MGMT_STATUS_INVALID_PARAMS, &cp->type,
6051                                         sizeof(cp->type));
6052                 goto failed;
6053         }
6054
6055         if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6056                 err = mgmt_cmd_complete(sk, hdev->id,
6057                                         MGMT_OP_START_SERVICE_DISCOVERY,
6058                                         status, &cp->type, sizeof(cp->type));
6059                 goto failed;
6060         }
6061
6062         cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6063                                hdev, data, len);
6064         if (!cmd) {
6065                 err = -ENOMEM;
6066                 goto failed;
6067         }
6068
6069         /* Clear the discovery filter first to free any previously
6070          * allocated memory for the UUID list.
6071          */
6072         hci_discovery_filter_clear(hdev);
6073
6074         hdev->discovery.result_filtering = true;
6075         hdev->discovery.type = cp->type;
6076         hdev->discovery.rssi = cp->rssi;
6077         hdev->discovery.uuid_count = uuid_count;
6078
6079         if (uuid_count > 0) {
6080                 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6081                                                 GFP_KERNEL);
6082                 if (!hdev->discovery.uuids) {
6083                         err = mgmt_cmd_complete(sk, hdev->id,
6084                                                 MGMT_OP_START_SERVICE_DISCOVERY,
6085                                                 MGMT_STATUS_FAILED,
6086                                                 &cp->type, sizeof(cp->type));
6087                         mgmt_pending_remove(cmd);
6088                         goto failed;
6089                 }
6090         }
6091
6092         err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6093                                  start_discovery_complete);
6094         if (err < 0) {
6095                 mgmt_pending_remove(cmd);
6096                 goto failed;
6097         }
6098
6099         hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6100
6101 failed:
6102         hci_dev_unlock(hdev);
6103         return err;
6104 }
6105
6106 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6107 {
6108         struct mgmt_pending_cmd *cmd;
6109
6110         bt_dev_dbg(hdev, "status %u", status);
6111
6112         hci_dev_lock(hdev);
6113
6114         cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6115         if (cmd) {
6116                 cmd->cmd_complete(cmd, mgmt_status(status));
6117                 mgmt_pending_remove(cmd);
6118         }
6119
6120         hci_dev_unlock(hdev);
6121 }
6122
6123 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6124 {
6125         struct mgmt_pending_cmd *cmd = data;
6126
6127         if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6128                 return;
6129
6130         bt_dev_dbg(hdev, "err %d", err);
6131
6132         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6133                           cmd->param, 1);
6134         mgmt_pending_remove(cmd);
6135
6136         if (!err)
6137                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6138 }
6139
6140 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6141 {
6142         return hci_stop_discovery_sync(hdev);
6143 }
6144
6145 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6146                           u16 len)
6147 {
6148         struct mgmt_cp_stop_discovery *mgmt_cp = data;
6149         struct mgmt_pending_cmd *cmd;
6150         int err;
6151
6152         bt_dev_dbg(hdev, "sock %p", sk);
6153
6154         hci_dev_lock(hdev);
6155
6156         if (!hci_discovery_active(hdev)) {
6157                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6158                                         MGMT_STATUS_REJECTED, &mgmt_cp->type,
6159                                         sizeof(mgmt_cp->type));
6160                 goto unlock;
6161         }
6162
6163         if (hdev->discovery.type != mgmt_cp->type) {
6164                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6165                                         MGMT_STATUS_INVALID_PARAMS,
6166                                         &mgmt_cp->type, sizeof(mgmt_cp->type));
6167                 goto unlock;
6168         }
6169
6170         cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6171         if (!cmd) {
6172                 err = -ENOMEM;
6173                 goto unlock;
6174         }
6175
6176         err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6177                                  stop_discovery_complete);
6178         if (err < 0) {
6179                 mgmt_pending_remove(cmd);
6180                 goto unlock;
6181         }
6182
6183         hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6184
6185 unlock:
6186         hci_dev_unlock(hdev);
6187         return err;
6188 }
6189
6190 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6191                         u16 len)
6192 {
6193         struct mgmt_cp_confirm_name *cp = data;
6194         struct inquiry_entry *e;
6195         int err;
6196
6197         bt_dev_dbg(hdev, "sock %p", sk);
6198
6199         hci_dev_lock(hdev);
6200
6201         if (!hci_discovery_active(hdev)) {
6202                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6203                                         MGMT_STATUS_FAILED, &cp->addr,
6204                                         sizeof(cp->addr));
6205                 goto failed;
6206         }
6207
6208         e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6209         if (!e) {
6210                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6211                                         MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6212                                         sizeof(cp->addr));
6213                 goto failed;
6214         }
6215
6216         if (cp->name_known) {
6217                 e->name_state = NAME_KNOWN;
6218                 list_del(&e->list);
6219         } else {
6220                 e->name_state = NAME_NEEDED;
6221                 hci_inquiry_cache_update_resolve(hdev, e);
6222         }
6223
6224         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6225                                 &cp->addr, sizeof(cp->addr));
6226
6227 failed:
6228         hci_dev_unlock(hdev);
6229         return err;
6230 }
6231
6232 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6233                         u16 len)
6234 {
6235         struct mgmt_cp_block_device *cp = data;
6236         u8 status;
6237         int err;
6238
6239         bt_dev_dbg(hdev, "sock %p", sk);
6240
6241         if (!bdaddr_type_is_valid(cp->addr.type))
6242                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6243                                          MGMT_STATUS_INVALID_PARAMS,
6244                                          &cp->addr, sizeof(cp->addr));
6245
6246         hci_dev_lock(hdev);
6247
6248         err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6249                                   cp->addr.type);
6250         if (err < 0) {
6251                 status = MGMT_STATUS_FAILED;
6252                 goto done;
6253         }
6254
6255         mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6256                    sk);
6257         status = MGMT_STATUS_SUCCESS;
6258
6259 done:
6260         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6261                                 &cp->addr, sizeof(cp->addr));
6262
6263         hci_dev_unlock(hdev);
6264
6265         return err;
6266 }
6267
6268 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6269                           u16 len)
6270 {
6271         struct mgmt_cp_unblock_device *cp = data;
6272         u8 status;
6273         int err;
6274
6275         bt_dev_dbg(hdev, "sock %p", sk);
6276
6277         if (!bdaddr_type_is_valid(cp->addr.type))
6278                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6279                                          MGMT_STATUS_INVALID_PARAMS,
6280                                          &cp->addr, sizeof(cp->addr));
6281
6282         hci_dev_lock(hdev);
6283
6284         err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6285                                   cp->addr.type);
6286         if (err < 0) {
6287                 status = MGMT_STATUS_INVALID_PARAMS;
6288                 goto done;
6289         }
6290
6291         mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6292                    sk);
6293         status = MGMT_STATUS_SUCCESS;
6294
6295 done:
6296         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6297                                 &cp->addr, sizeof(cp->addr));
6298
6299         hci_dev_unlock(hdev);
6300
6301         return err;
6302 }
6303
6304 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6305 {
6306         return hci_update_eir_sync(hdev);
6307 }
6308
6309 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6310                          u16 len)
6311 {
6312         struct mgmt_cp_set_device_id *cp = data;
6313         int err;
6314         __u16 source;
6315
6316         bt_dev_dbg(hdev, "sock %p", sk);
6317
6318         source = __le16_to_cpu(cp->source);
6319
6320         if (source > 0x0002)
6321                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6322                                        MGMT_STATUS_INVALID_PARAMS);
6323
6324         hci_dev_lock(hdev);
6325
6326         hdev->devid_source = source;
6327         hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6328         hdev->devid_product = __le16_to_cpu(cp->product);
6329         hdev->devid_version = __le16_to_cpu(cp->version);
6330
6331         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6332                                 NULL, 0);
6333
6334         hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6335
6336         hci_dev_unlock(hdev);
6337
6338         return err;
6339 }
6340
6341 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6342 {
6343         if (err)
6344                 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6345         else
6346                 bt_dev_dbg(hdev, "status %d", err);
6347 }
6348
6349 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6350 {
6351         struct cmd_lookup match = { NULL, hdev };
6352         u8 instance;
6353         struct adv_info *adv_instance;
6354         u8 status = mgmt_status(err);
6355
6356         if (status) {
6357                 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6358                                      cmd_status_rsp, &status);
6359                 return;
6360         }
6361
6362         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6363                 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6364         else
6365                 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6366
6367         mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6368                              &match);
6369
6370         new_settings(hdev, match.sk);
6371
6372         if (match.sk)
6373                 sock_put(match.sk);
6374
6375         /* If "Set Advertising" was just disabled and instance advertising was
6376          * set up earlier, then re-enable multi-instance advertising.
6377          */
6378         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6379             list_empty(&hdev->adv_instances))
6380                 return;
6381
6382         instance = hdev->cur_adv_instance;
6383         if (!instance) {
6384                 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6385                                                         struct adv_info, list);
6386                 if (!adv_instance)
6387                         return;
6388
6389                 instance = adv_instance->instance;
6390         }
6391
6392         err = hci_schedule_adv_instance_sync(hdev, instance, true);
6393
6394         enable_advertising_instance(hdev, err);
6395 }
6396
6397 static int set_adv_sync(struct hci_dev *hdev, void *data)
6398 {
6399         struct mgmt_pending_cmd *cmd = data;
6400         struct mgmt_mode *cp = cmd->param;
6401         u8 val = !!cp->val;
6402
6403         if (cp->val == 0x02)
6404                 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6405         else
6406                 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6407
6408         cancel_adv_timeout(hdev);
6409
6410         if (val) {
6411                 /* Switch to instance "0" for the Set Advertising setting.
6412                  * We cannot use update_[adv|scan_rsp]_data() here as the
6413                  * HCI_ADVERTISING flag is not yet set.
6414                  */
6415                 hdev->cur_adv_instance = 0x00;
6416
6417                 if (ext_adv_capable(hdev)) {
6418                         hci_start_ext_adv_sync(hdev, 0x00);
6419                 } else {
6420                         hci_update_adv_data_sync(hdev, 0x00);
6421                         hci_update_scan_rsp_data_sync(hdev, 0x00);
6422                         hci_enable_advertising_sync(hdev);
6423                 }
6424         } else {
6425                 hci_disable_advertising_sync(hdev);
6426         }
6427
6428         return 0;
6429 }
6430
6431 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6432                            u16 len)
6433 {
6434         struct mgmt_mode *cp = data;
6435         struct mgmt_pending_cmd *cmd;
6436         u8 val, status;
6437         int err;
6438
6439         bt_dev_dbg(hdev, "sock %p", sk);
6440
6441         status = mgmt_le_support(hdev);
6442         if (status)
6443                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6444                                        status);
6445
6446         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6447                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6448                                        MGMT_STATUS_INVALID_PARAMS);
6449
6450         if (hdev->advertising_paused)
6451                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6452                                        MGMT_STATUS_BUSY);
6453
6454         hci_dev_lock(hdev);
6455
6456         val = !!cp->val;
6457
6458         /* The following conditions are ones which mean that we should
6459          * not do any HCI communication but directly send a mgmt
6460          * response to user space (after toggling the flag if
6461          * necessary).
6462          */
6463         if (!hdev_is_powered(hdev) ||
6464             (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6465              (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6466             hci_dev_test_flag(hdev, HCI_MESH) ||
6467             hci_conn_num(hdev, LE_LINK) > 0 ||
6468             (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6469              hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6470                 bool changed;
6471
6472                 if (cp->val) {
6473                         hdev->cur_adv_instance = 0x00;
6474                         changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6475                         if (cp->val == 0x02)
6476                                 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6477                         else
6478                                 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6479                 } else {
6480                         changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6481                         hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6482                 }
6483
6484                 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6485                 if (err < 0)
6486                         goto unlock;
6487
6488                 if (changed)
6489                         err = new_settings(hdev, sk);
6490
6491                 goto unlock;
6492         }
6493
6494         if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6495             pending_find(MGMT_OP_SET_LE, hdev)) {
6496                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6497                                       MGMT_STATUS_BUSY);
6498                 goto unlock;
6499         }
6500
6501         cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6502         if (!cmd)
6503                 err = -ENOMEM;
6504         else
6505                 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6506                                          set_advertising_complete);
6507
6508         if (err < 0 && cmd)
6509                 mgmt_pending_remove(cmd);
6510
6511 unlock:
6512         hci_dev_unlock(hdev);
6513         return err;
6514 }
6515
6516 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6517                               void *data, u16 len)
6518 {
6519         struct mgmt_cp_set_static_address *cp = data;
6520         int err;
6521
6522         bt_dev_dbg(hdev, "sock %p", sk);
6523
6524         if (!lmp_le_capable(hdev))
6525                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6526                                        MGMT_STATUS_NOT_SUPPORTED);
6527
6528         if (hdev_is_powered(hdev))
6529                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6530                                        MGMT_STATUS_REJECTED);
6531
6532         if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6533                 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6534                         return mgmt_cmd_status(sk, hdev->id,
6535                                                MGMT_OP_SET_STATIC_ADDRESS,
6536                                                MGMT_STATUS_INVALID_PARAMS);
6537
6538                 /* Two most significant bits shall be set */
6539                 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6540                         return mgmt_cmd_status(sk, hdev->id,
6541                                                MGMT_OP_SET_STATIC_ADDRESS,
6542                                                MGMT_STATUS_INVALID_PARAMS);
6543         }
6544
6545         hci_dev_lock(hdev);
6546
6547         bacpy(&hdev->static_addr, &cp->bdaddr);
6548
6549         err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6550         if (err < 0)
6551                 goto unlock;
6552
6553         err = new_settings(hdev, sk);
6554
6555 unlock:
6556         hci_dev_unlock(hdev);
6557         return err;
6558 }
6559
6560 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6561                            void *data, u16 len)
6562 {
6563         struct mgmt_cp_set_scan_params *cp = data;
6564         __u16 interval, window;
6565         int err;
6566
6567         bt_dev_dbg(hdev, "sock %p", sk);
6568
6569         if (!lmp_le_capable(hdev))
6570                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6571                                        MGMT_STATUS_NOT_SUPPORTED);
6572
6573         interval = __le16_to_cpu(cp->interval);
6574
6575         if (interval < 0x0004 || interval > 0x4000)
6576                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6577                                        MGMT_STATUS_INVALID_PARAMS);
6578
6579         window = __le16_to_cpu(cp->window);
6580
6581         if (window < 0x0004 || window > 0x4000)
6582                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6583                                        MGMT_STATUS_INVALID_PARAMS);
6584
6585         if (window > interval)
6586                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6587                                        MGMT_STATUS_INVALID_PARAMS);
6588
6589         hci_dev_lock(hdev);
6590
6591         hdev->le_scan_interval = interval;
6592         hdev->le_scan_window = window;
6593
6594         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6595                                 NULL, 0);
6596
6597         /* If background scan is running, restart it so new parameters are
6598          * loaded.
6599          */
6600         if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6601             hdev->discovery.state == DISCOVERY_STOPPED)
6602                 hci_update_passive_scan(hdev);
6603
6604         hci_dev_unlock(hdev);
6605
6606         return err;
6607 }
6608
6609 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6610 {
6611         struct mgmt_pending_cmd *cmd = data;
6612
6613         bt_dev_dbg(hdev, "err %d", err);
6614
6615         if (err) {
6616                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6617                                 mgmt_status(err));
6618         } else {
6619                 struct mgmt_mode *cp = cmd->param;
6620
6621                 if (cp->val)
6622                         hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6623                 else
6624                         hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6625
6626                 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6627                 new_settings(hdev, cmd->sk);
6628         }
6629
6630         mgmt_pending_free(cmd);
6631 }
6632
6633 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6634 {
6635         struct mgmt_pending_cmd *cmd = data;
6636         struct mgmt_mode *cp = cmd->param;
6637
6638         return hci_write_fast_connectable_sync(hdev, cp->val);
6639 }
6640
6641 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6642                                 void *data, u16 len)
6643 {
6644         struct mgmt_mode *cp = data;
6645         struct mgmt_pending_cmd *cmd;
6646         int err;
6647
6648         bt_dev_dbg(hdev, "sock %p", sk);
6649
6650         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6651             hdev->hci_ver < BLUETOOTH_VER_1_2)
6652                 return mgmt_cmd_status(sk, hdev->id,
6653                                        MGMT_OP_SET_FAST_CONNECTABLE,
6654                                        MGMT_STATUS_NOT_SUPPORTED);
6655
6656         if (cp->val != 0x00 && cp->val != 0x01)
6657                 return mgmt_cmd_status(sk, hdev->id,
6658                                        MGMT_OP_SET_FAST_CONNECTABLE,
6659                                        MGMT_STATUS_INVALID_PARAMS);
6660
6661         hci_dev_lock(hdev);
6662
6663         if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6664                 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6665                 goto unlock;
6666         }
6667
6668         if (!hdev_is_powered(hdev)) {
6669                 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6670                 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6671                 new_settings(hdev, sk);
6672                 goto unlock;
6673         }
6674
6675         cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6676                                len);
6677         if (!cmd)
6678                 err = -ENOMEM;
6679         else
6680                 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6681                                          fast_connectable_complete);
6682
6683         if (err < 0) {
6684                 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6685                                 MGMT_STATUS_FAILED);
6686
6687                 if (cmd)
6688                         mgmt_pending_free(cmd);
6689         }
6690
6691 unlock:
6692         hci_dev_unlock(hdev);
6693
6694         return err;
6695 }
6696
6697 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6698 {
6699         struct mgmt_pending_cmd *cmd = data;
6700
6701         bt_dev_dbg(hdev, "err %d", err);
6702
6703         if (err) {
6704                 u8 mgmt_err = mgmt_status(err);
6705
6706                 /* We need to restore the flag if related HCI commands
6707                  * failed.
6708                  */
6709                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6710
6711                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6712         } else {
6713                 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6714                 new_settings(hdev, cmd->sk);
6715         }
6716
6717         mgmt_pending_free(cmd);
6718 }
6719
6720 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6721 {
6722         int status;
6723
6724         status = hci_write_fast_connectable_sync(hdev, false);
6725
6726         if (!status)
6727                 status = hci_update_scan_sync(hdev);
6728
6729         /* Since only the advertising data flags will change, there
6730          * is no need to update the scan response data.
6731          */
6732         if (!status)
6733                 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6734
6735         return status;
6736 }
6737
6738 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6739 {
6740         struct mgmt_mode *cp = data;
6741         struct mgmt_pending_cmd *cmd;
6742         int err;
6743
6744         bt_dev_dbg(hdev, "sock %p", sk);
6745
6746         if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6747                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6748                                        MGMT_STATUS_NOT_SUPPORTED);
6749
6750         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6751                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6752                                        MGMT_STATUS_REJECTED);
6753
6754         if (cp->val != 0x00 && cp->val != 0x01)
6755                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6756                                        MGMT_STATUS_INVALID_PARAMS);
6757
6758         hci_dev_lock(hdev);
6759
6760         if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6761                 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6762                 goto unlock;
6763         }
6764
6765         if (!hdev_is_powered(hdev)) {
6766                 if (!cp->val) {
6767                         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6768                         hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6769                         hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6770                         hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6771                         hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6772                 }
6773
6774                 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6775
6776                 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6777                 if (err < 0)
6778                         goto unlock;
6779
6780                 err = new_settings(hdev, sk);
6781                 goto unlock;
6782         }
6783
6784         /* Reject disabling when powered on */
6785         if (!cp->val) {
6786                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6787                                       MGMT_STATUS_REJECTED);
6788                 goto unlock;
6789         } else {
6790                 /* When configuring a dual-mode controller to operate
6791                  * with LE only and using a static address, then switching
6792                  * BR/EDR back on is not allowed.
6793                  *
6794                  * Dual-mode controllers shall operate with the public
6795                  * address as its identity address for BR/EDR and LE. So
6796                  * reject the attempt to create an invalid configuration.
6797                  *
6798                  * The same restrictions applies when secure connections
6799                  * has been enabled. For BR/EDR this is a controller feature
6800                  * while for LE it is a host stack feature. This means that
6801                  * switching BR/EDR back on when secure connections has been
6802                  * enabled is not a supported transaction.
6803                  */
6804                 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6805                     (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6806                      hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6807                         err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6808                                               MGMT_STATUS_REJECTED);
6809                         goto unlock;
6810                 }
6811         }
6812
6813         cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6814         if (!cmd)
6815                 err = -ENOMEM;
6816         else
6817                 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6818                                          set_bredr_complete);
6819
6820         if (err < 0) {
6821                 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6822                                 MGMT_STATUS_FAILED);
6823                 if (cmd)
6824                         mgmt_pending_free(cmd);
6825
6826                 goto unlock;
6827         }
6828
6829         /* We need to flip the bit already here so that
6830          * hci_req_update_adv_data generates the correct flags.
6831          */
6832         hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6833
6834 unlock:
6835         hci_dev_unlock(hdev);
6836         return err;
6837 }
6838
6839 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6840 {
6841         struct mgmt_pending_cmd *cmd = data;
6842         struct mgmt_mode *cp;
6843
6844         bt_dev_dbg(hdev, "err %d", err);
6845
6846         if (err) {
6847                 u8 mgmt_err = mgmt_status(err);
6848
6849                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6850                 goto done;
6851         }
6852
6853         cp = cmd->param;
6854
6855         switch (cp->val) {
6856         case 0x00:
6857                 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6858                 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6859                 break;
6860         case 0x01:
6861                 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6862                 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6863                 break;
6864         case 0x02:
6865                 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6866                 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6867                 break;
6868         }
6869
6870         send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6871         new_settings(hdev, cmd->sk);
6872
6873 done:
6874         mgmt_pending_free(cmd);
6875 }
6876
6877 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6878 {
6879         struct mgmt_pending_cmd *cmd = data;
6880         struct mgmt_mode *cp = cmd->param;
6881         u8 val = !!cp->val;
6882
6883         /* Force write of val */
6884         hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6885
6886         return hci_write_sc_support_sync(hdev, val);
6887 }
6888
6889 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6890                            void *data, u16 len)
6891 {
6892         struct mgmt_mode *cp = data;
6893         struct mgmt_pending_cmd *cmd;
6894         u8 val;
6895         int err;
6896
6897         bt_dev_dbg(hdev, "sock %p", sk);
6898
6899         if (!lmp_sc_capable(hdev) &&
6900             !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6901                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6902                                        MGMT_STATUS_NOT_SUPPORTED);
6903
6904         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6905             lmp_sc_capable(hdev) &&
6906             !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6907                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6908                                        MGMT_STATUS_REJECTED);
6909
6910         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6911                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6912                                        MGMT_STATUS_INVALID_PARAMS);
6913
6914         hci_dev_lock(hdev);
6915
6916         if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6917             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6918                 bool changed;
6919
6920                 if (cp->val) {
6921                         changed = !hci_dev_test_and_set_flag(hdev,
6922                                                              HCI_SC_ENABLED);
6923                         if (cp->val == 0x02)
6924                                 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6925                         else
6926                                 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6927                 } else {
6928                         changed = hci_dev_test_and_clear_flag(hdev,
6929                                                               HCI_SC_ENABLED);
6930                         hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6931                 }
6932
6933                 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6934                 if (err < 0)
6935                         goto failed;
6936
6937                 if (changed)
6938                         err = new_settings(hdev, sk);
6939
6940                 goto failed;
6941         }
6942
6943         val = !!cp->val;
6944
6945         if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6946             (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6947                 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6948                 goto failed;
6949         }
6950
6951         cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6952         if (!cmd)
6953                 err = -ENOMEM;
6954         else
6955                 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6956                                          set_secure_conn_complete);
6957
6958         if (err < 0) {
6959                 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6960                                 MGMT_STATUS_FAILED);
6961                 if (cmd)
6962                         mgmt_pending_free(cmd);
6963         }
6964
6965 failed:
6966         hci_dev_unlock(hdev);
6967         return err;
6968 }
6969
6970 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6971                           void *data, u16 len)
6972 {
6973         struct mgmt_mode *cp = data;
6974         bool changed, use_changed;
6975         int err;
6976
6977         bt_dev_dbg(hdev, "sock %p", sk);
6978
6979         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6980                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6981                                        MGMT_STATUS_INVALID_PARAMS);
6982
6983         hci_dev_lock(hdev);
6984
6985         if (cp->val)
6986                 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6987         else
6988                 changed = hci_dev_test_and_clear_flag(hdev,
6989                                                       HCI_KEEP_DEBUG_KEYS);
6990
6991         if (cp->val == 0x02)
6992                 use_changed = !hci_dev_test_and_set_flag(hdev,
6993                                                          HCI_USE_DEBUG_KEYS);
6994         else
6995                 use_changed = hci_dev_test_and_clear_flag(hdev,
6996                                                           HCI_USE_DEBUG_KEYS);
6997
6998         if (hdev_is_powered(hdev) && use_changed &&
6999             hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7000                 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
7001                 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
7002                              sizeof(mode), &mode);
7003         }
7004
7005         err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7006         if (err < 0)
7007                 goto unlock;
7008
7009         if (changed)
7010                 err = new_settings(hdev, sk);
7011
7012 unlock:
7013         hci_dev_unlock(hdev);
7014         return err;
7015 }
7016
7017 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7018                        u16 len)
7019 {
7020         struct mgmt_cp_set_privacy *cp = cp_data;
7021         bool changed;
7022         int err;
7023
7024         bt_dev_dbg(hdev, "sock %p", sk);
7025
7026         if (!lmp_le_capable(hdev))
7027                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7028                                        MGMT_STATUS_NOT_SUPPORTED);
7029
7030         if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7031                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7032                                        MGMT_STATUS_INVALID_PARAMS);
7033
7034         if (hdev_is_powered(hdev))
7035                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7036                                        MGMT_STATUS_REJECTED);
7037
7038         hci_dev_lock(hdev);
7039
7040         /* If user space supports this command it is also expected to
7041          * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7042          */
7043         hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7044
7045         if (cp->privacy) {
7046                 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7047                 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7048                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7049                 hci_adv_instances_set_rpa_expired(hdev, true);
7050                 if (cp->privacy == 0x02)
7051                         hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7052                 else
7053                         hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7054         } else {
7055                 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7056                 memset(hdev->irk, 0, sizeof(hdev->irk));
7057                 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7058                 hci_adv_instances_set_rpa_expired(hdev, false);
7059                 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7060         }
7061
7062         err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7063         if (err < 0)
7064                 goto unlock;
7065
7066         if (changed)
7067                 err = new_settings(hdev, sk);
7068
7069 unlock:
7070         hci_dev_unlock(hdev);
7071         return err;
7072 }
7073
7074 static bool irk_is_valid(struct mgmt_irk_info *irk)
7075 {
7076         switch (irk->addr.type) {
7077         case BDADDR_LE_PUBLIC:
7078                 return true;
7079
7080         case BDADDR_LE_RANDOM:
7081                 /* Two most significant bits shall be set */
7082                 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7083                         return false;
7084                 return true;
7085         }
7086
7087         return false;
7088 }
7089
7090 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7091                      u16 len)
7092 {
7093         struct mgmt_cp_load_irks *cp = cp_data;
7094         const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7095                                    sizeof(struct mgmt_irk_info));
7096         u16 irk_count, expected_len;
7097         int i, err;
7098
7099         bt_dev_dbg(hdev, "sock %p", sk);
7100
7101         if (!lmp_le_capable(hdev))
7102                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7103                                        MGMT_STATUS_NOT_SUPPORTED);
7104
7105         irk_count = __le16_to_cpu(cp->irk_count);
7106         if (irk_count > max_irk_count) {
7107                 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7108                            irk_count);
7109                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7110                                        MGMT_STATUS_INVALID_PARAMS);
7111         }
7112
7113         expected_len = struct_size(cp, irks, irk_count);
7114         if (expected_len != len) {
7115                 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7116                            expected_len, len);
7117                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7118                                        MGMT_STATUS_INVALID_PARAMS);
7119         }
7120
7121         bt_dev_dbg(hdev, "irk_count %u", irk_count);
7122
7123         for (i = 0; i < irk_count; i++) {
7124                 struct mgmt_irk_info *key = &cp->irks[i];
7125
7126                 if (!irk_is_valid(key))
7127                         return mgmt_cmd_status(sk, hdev->id,
7128                                                MGMT_OP_LOAD_IRKS,
7129                                                MGMT_STATUS_INVALID_PARAMS);
7130         }
7131
7132         hci_dev_lock(hdev);
7133
7134         hci_smp_irks_clear(hdev);
7135
7136         for (i = 0; i < irk_count; i++) {
7137                 struct mgmt_irk_info *irk = &cp->irks[i];
7138                 u8 addr_type = le_addr_type(irk->addr.type);
7139
7140                 if (hci_is_blocked_key(hdev,
7141                                        HCI_BLOCKED_KEY_TYPE_IRK,
7142                                        irk->val)) {
7143                         bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7144                                     &irk->addr.bdaddr);
7145                         continue;
7146                 }
7147
7148                 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7149                 if (irk->addr.type == BDADDR_BREDR)
7150                         addr_type = BDADDR_BREDR;
7151
7152                 hci_add_irk(hdev, &irk->addr.bdaddr,
7153                             addr_type, irk->val,
7154                             BDADDR_ANY);
7155         }
7156
7157         hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7158
7159         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7160
7161         hci_dev_unlock(hdev);
7162
7163         return err;
7164 }
7165
7166 #ifdef TIZEN_BT
7167 static int set_advertising_params(struct sock *sk, struct hci_dev *hdev,
7168                         void *data, u16 len)
7169 {
7170         struct mgmt_cp_set_advertising_params *cp = data;
7171         __u16 min_interval;
7172         __u16 max_interval;
7173         int err;
7174
7175         BT_DBG("%s", hdev->name);
7176
7177         if (!lmp_le_capable(hdev))
7178                 return mgmt_cmd_status(sk, hdev->id,
7179                                 MGMT_OP_SET_ADVERTISING_PARAMS,
7180                                 MGMT_STATUS_NOT_SUPPORTED);
7181
7182         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7183                 return mgmt_cmd_status(sk, hdev->id,
7184                                 MGMT_OP_SET_ADVERTISING_PARAMS,
7185                                 MGMT_STATUS_BUSY);
7186
7187         min_interval = __le16_to_cpu(cp->interval_min);
7188         max_interval = __le16_to_cpu(cp->interval_max);
7189
7190         if (min_interval > max_interval ||
7191             min_interval < 0x0020 || max_interval > 0x4000)
7192                 return mgmt_cmd_status(sk, hdev->id,
7193                                 MGMT_OP_SET_ADVERTISING_PARAMS,
7194                                 MGMT_STATUS_INVALID_PARAMS);
7195
7196         hci_dev_lock(hdev);
7197
7198         hdev->le_adv_min_interval = min_interval;
7199         hdev->le_adv_max_interval = max_interval;
7200         hdev->adv_filter_policy = cp->filter_policy;
7201         hdev->adv_type = cp->type;
7202
7203         err = mgmt_cmd_complete(sk, hdev->id,
7204                         MGMT_OP_SET_ADVERTISING_PARAMS, 0, NULL, 0);
7205
7206         hci_dev_unlock(hdev);
7207
7208         return err;
7209 }
7210
7211 static void set_advertising_data_complete(struct hci_dev *hdev,
7212                         u8 status, u16 opcode)
7213 {
7214         struct mgmt_cp_set_advertising_data *cp;
7215         struct mgmt_pending_cmd *cmd;
7216
7217         BT_DBG("status 0x%02x", status);
7218
7219         hci_dev_lock(hdev);
7220
7221         cmd = pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev);
7222         if (!cmd)
7223                 goto unlock;
7224
7225         cp = cmd->param;
7226
7227         if (status)
7228                 mgmt_cmd_status(cmd->sk, hdev->id,
7229                                 MGMT_OP_SET_ADVERTISING_DATA,
7230                                 mgmt_status(status));
7231         else
7232                 mgmt_cmd_complete(cmd->sk, hdev->id,
7233                                 MGMT_OP_SET_ADVERTISING_DATA, 0,
7234                                 cp, sizeof(*cp));
7235
7236         mgmt_pending_remove(cmd);
7237
7238 unlock:
7239         hci_dev_unlock(hdev);
7240 }
7241
7242 static int set_advertising_data(struct sock *sk, struct hci_dev *hdev,
7243                         void *data, u16 len)
7244 {
7245         struct mgmt_pending_cmd *cmd;
7246         struct hci_request req;
7247         struct mgmt_cp_set_advertising_data *cp = data;
7248         struct hci_cp_le_set_adv_data adv;
7249         int err;
7250
7251         BT_DBG("%s", hdev->name);
7252
7253         if (!lmp_le_capable(hdev)) {
7254                 return mgmt_cmd_status(sk, hdev->id,
7255                                 MGMT_OP_SET_ADVERTISING_DATA,
7256                                 MGMT_STATUS_NOT_SUPPORTED);
7257         }
7258
7259         hci_dev_lock(hdev);
7260
7261         if (pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev)) {
7262                 err = mgmt_cmd_status(sk, hdev->id,
7263                                 MGMT_OP_SET_ADVERTISING_DATA,
7264                                 MGMT_STATUS_BUSY);
7265                 goto unlocked;
7266         }
7267
7268         if (len > HCI_MAX_AD_LENGTH) {
7269                 err = mgmt_cmd_status(sk, hdev->id,
7270                                 MGMT_OP_SET_ADVERTISING_DATA,
7271                                 MGMT_STATUS_INVALID_PARAMS);
7272                 goto unlocked;
7273         }
7274
7275         cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING_DATA,
7276                                hdev, data, len);
7277         if (!cmd) {
7278                 err = -ENOMEM;
7279                 goto unlocked;
7280         }
7281
7282         hci_req_init(&req, hdev);
7283
7284         memset(&adv, 0, sizeof(adv));
7285         memcpy(adv.data, cp->data, len);
7286         adv.length = len;
7287
7288         hci_req_add(&req, HCI_OP_LE_SET_ADV_DATA, sizeof(adv), &adv);
7289
7290         err = hci_req_run(&req, set_advertising_data_complete);
7291         if (err < 0)
7292                 mgmt_pending_remove(cmd);
7293
7294 unlocked:
7295         hci_dev_unlock(hdev);
7296
7297         return err;
7298 }
7299
7300 /* Adv White List feature */
7301 static void add_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7302 {
7303         struct mgmt_cp_add_dev_white_list *cp;
7304         struct mgmt_pending_cmd *cmd;
7305
7306         BT_DBG("status 0x%02x", status);
7307
7308         hci_dev_lock(hdev);
7309
7310         cmd = pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev);
7311         if (!cmd)
7312                 goto unlock;
7313
7314         cp = cmd->param;
7315
7316         if (status)
7317                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7318                            mgmt_status(status));
7319         else
7320                 mgmt_cmd_complete(cmd->sk, hdev->id,
7321                                 MGMT_OP_ADD_DEV_WHITE_LIST, 0, cp, sizeof(*cp));
7322
7323         mgmt_pending_remove(cmd);
7324
7325 unlock:
7326         hci_dev_unlock(hdev);
7327 }
7328
7329 static int add_white_list(struct sock *sk, struct hci_dev *hdev,
7330                            void *data, u16 len)
7331 {
7332         struct mgmt_pending_cmd *cmd;
7333         struct mgmt_cp_add_dev_white_list *cp = data;
7334         struct hci_request req;
7335         int err;
7336
7337         BT_DBG("%s", hdev->name);
7338
7339         if (!lmp_le_capable(hdev))
7340                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7341                                   MGMT_STATUS_NOT_SUPPORTED);
7342
7343         if (!hdev_is_powered(hdev))
7344                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7345                                   MGMT_STATUS_REJECTED);
7346
7347         hci_dev_lock(hdev);
7348
7349         if (pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev)) {
7350                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7351                                 MGMT_STATUS_BUSY);
7352                 goto unlocked;
7353         }
7354
7355         cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEV_WHITE_LIST, hdev, data, len);
7356         if (!cmd) {
7357                 err = -ENOMEM;
7358                 goto unlocked;
7359         }
7360
7361         hci_req_init(&req, hdev);
7362
7363         hci_req_add(&req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(*cp), cp);
7364
7365         err = hci_req_run(&req, add_white_list_complete);
7366         if (err < 0) {
7367                 mgmt_pending_remove(cmd);
7368                 goto unlocked;
7369         }
7370
7371 unlocked:
7372         hci_dev_unlock(hdev);
7373
7374         return err;
7375 }
7376
7377 static void remove_from_white_list_complete(struct hci_dev *hdev,
7378                         u8 status, u16 opcode)
7379 {
7380         struct mgmt_cp_remove_dev_from_white_list *cp;
7381         struct mgmt_pending_cmd *cmd;
7382
7383         BT_DBG("status 0x%02x", status);
7384
7385         hci_dev_lock(hdev);
7386
7387         cmd = pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev);
7388         if (!cmd)
7389                 goto unlock;
7390
7391         cp = cmd->param;
7392
7393         if (status)
7394                 mgmt_cmd_status(cmd->sk, hdev->id,
7395                         MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7396                         mgmt_status(status));
7397         else
7398                 mgmt_cmd_complete(cmd->sk, hdev->id,
7399                         MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, 0,
7400                         cp, sizeof(*cp));
7401
7402         mgmt_pending_remove(cmd);
7403
7404 unlock:
7405         hci_dev_unlock(hdev);
7406 }
7407
7408 static int remove_from_white_list(struct sock *sk, struct hci_dev *hdev,
7409                            void *data, u16 len)
7410 {
7411         struct mgmt_pending_cmd *cmd;
7412         struct mgmt_cp_remove_dev_from_white_list *cp = data;
7413         struct hci_request req;
7414         int err;
7415
7416         BT_DBG("%s", hdev->name);
7417
7418         if (!lmp_le_capable(hdev))
7419                 return mgmt_cmd_status(sk, hdev->id,
7420                                 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7421                                 MGMT_STATUS_NOT_SUPPORTED);
7422
7423         if (!hdev_is_powered(hdev))
7424                 return mgmt_cmd_status(sk, hdev->id,
7425                                 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7426                                 MGMT_STATUS_REJECTED);
7427
7428         hci_dev_lock(hdev);
7429
7430         if (pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev)) {
7431                 err = mgmt_cmd_status(sk, hdev->id,
7432                                 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7433                                 MGMT_STATUS_BUSY);
7434                 goto unlocked;
7435         }
7436
7437         cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7438                                 hdev, data, len);
7439         if (!cmd) {
7440                 err = -ENOMEM;
7441                 goto unlocked;
7442         }
7443
7444         hci_req_init(&req, hdev);
7445
7446         hci_req_add(&req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(*cp), cp);
7447
7448         err = hci_req_run(&req, remove_from_white_list_complete);
7449         if (err < 0) {
7450                 mgmt_pending_remove(cmd);
7451                 goto unlocked;
7452         }
7453
7454 unlocked:
7455         hci_dev_unlock(hdev);
7456
7457         return err;
7458 }
7459
7460 static void clear_white_list_complete(struct hci_dev *hdev, u8 status,
7461                         u16 opcode)
7462 {
7463         struct mgmt_pending_cmd *cmd;
7464
7465         BT_DBG("status 0x%02x", status);
7466
7467         hci_dev_lock(hdev);
7468
7469         cmd = pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev);
7470         if (!cmd)
7471                 goto unlock;
7472
7473         if (status)
7474                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
7475                            mgmt_status(status));
7476         else
7477                 mgmt_cmd_complete(cmd->sk, hdev->id,
7478                                 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7479                                 0, NULL, 0);
7480
7481         mgmt_pending_remove(cmd);
7482
7483 unlock:
7484         hci_dev_unlock(hdev);
7485 }
7486
7487 static int clear_white_list(struct sock *sk, struct hci_dev *hdev,
7488                            void *data, u16 len)
7489 {
7490         struct mgmt_pending_cmd *cmd;
7491         struct hci_request req;
7492         int err;
7493
7494         BT_DBG("%s", hdev->name);
7495
7496         if (!lmp_le_capable(hdev))
7497                 return mgmt_cmd_status(sk, hdev->id,
7498                                 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7499                                 MGMT_STATUS_NOT_SUPPORTED);
7500
7501         if (!hdev_is_powered(hdev))
7502                 return mgmt_cmd_status(sk, hdev->id,
7503                                 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7504                                 MGMT_STATUS_REJECTED);
7505
7506         hci_dev_lock(hdev);
7507
7508         if (pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev)) {
7509                 err = mgmt_cmd_status(sk, hdev->id,
7510                                 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7511                                 MGMT_STATUS_BUSY);
7512                 goto unlocked;
7513         }
7514
7515         cmd = mgmt_pending_add(sk, MGMT_OP_CLEAR_DEV_WHITE_LIST,
7516                                 hdev, NULL, 0);
7517         if (!cmd) {
7518                 err = -ENOMEM;
7519                 goto unlocked;
7520         }
7521
7522         hci_req_init(&req, hdev);
7523
7524         hci_req_add(&req, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL);
7525
7526         err = hci_req_run(&req, clear_white_list_complete);
7527         if (err < 0) {
7528                 mgmt_pending_remove(cmd);
7529                 goto unlocked;
7530         }
7531
7532 unlocked:
7533         hci_dev_unlock(hdev);
7534
7535         return err;
7536 }
7537
7538 static void set_scan_rsp_data_complete(struct hci_dev *hdev, u8 status,
7539                         u16 opcode)
7540 {
7541         struct mgmt_cp_set_scan_rsp_data *cp;
7542         struct mgmt_pending_cmd *cmd;
7543
7544         BT_DBG("status 0x%02x", status);
7545
7546         hci_dev_lock(hdev);
7547
7548         cmd = pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev);
7549         if (!cmd)
7550                 goto unlock;
7551
7552         cp = cmd->param;
7553
7554         if (status)
7555                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7556                                 mgmt_status(status));
7557         else
7558                 mgmt_cmd_complete(cmd->sk, hdev->id,
7559                                 MGMT_OP_SET_SCAN_RSP_DATA, 0,
7560                                 cp, sizeof(*cp));
7561
7562         mgmt_pending_remove(cmd);
7563
7564 unlock:
7565         hci_dev_unlock(hdev);
7566 }
7567
7568 static int set_scan_rsp_data(struct sock *sk, struct hci_dev *hdev, void *data,
7569                         u16 len)
7570 {
7571         struct mgmt_pending_cmd *cmd;
7572         struct hci_request req;
7573         struct mgmt_cp_set_scan_rsp_data *cp = data;
7574         struct hci_cp_le_set_scan_rsp_data rsp;
7575         int err;
7576
7577         BT_DBG("%s", hdev->name);
7578
7579         if (!lmp_le_capable(hdev))
7580                 return mgmt_cmd_status(sk, hdev->id,
7581                                 MGMT_OP_SET_SCAN_RSP_DATA,
7582                                 MGMT_STATUS_NOT_SUPPORTED);
7583
7584         hci_dev_lock(hdev);
7585
7586         if (pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev)) {
7587                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7588                                 MGMT_STATUS_BUSY);
7589                 goto unlocked;
7590         }
7591
7592         if (len > HCI_MAX_AD_LENGTH) {
7593                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7594                                 MGMT_STATUS_INVALID_PARAMS);
7595                 goto unlocked;
7596         }
7597
7598         cmd = mgmt_pending_add(sk, MGMT_OP_SET_SCAN_RSP_DATA, hdev, data, len);
7599         if (!cmd) {
7600                 err = -ENOMEM;
7601                 goto unlocked;
7602         }
7603
7604         hci_req_init(&req, hdev);
7605
7606         memset(&rsp, 0, sizeof(rsp));
7607         memcpy(rsp.data, cp->data, len);
7608         rsp.length = len;
7609
7610         hci_req_add(&req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(rsp), &rsp);
7611
7612         err = hci_req_run(&req, set_scan_rsp_data_complete);
7613         if (err < 0)
7614                 mgmt_pending_remove(cmd);
7615
7616 unlocked:
7617         hci_dev_unlock(hdev);
7618
7619         return err;
7620 }
7621
7622 static void set_rssi_threshold_complete(struct hci_dev *hdev,
7623                         u8 status, u16 opcode)
7624 {
7625         struct mgmt_pending_cmd *cmd;
7626
7627         BT_DBG("status 0x%02x", status);
7628
7629         hci_dev_lock(hdev);
7630
7631         cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7632         if (!cmd)
7633                 goto unlock;
7634
7635         if (status)
7636                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7637                                 mgmt_status(status));
7638         else
7639                 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
7640                                 NULL, 0);
7641
7642         mgmt_pending_remove(cmd);
7643
7644 unlock:
7645         hci_dev_unlock(hdev);
7646 }
7647
7648 static void set_rssi_disable_complete(struct hci_dev *hdev,
7649                         u8 status, u16 opcode)
7650 {
7651         struct mgmt_pending_cmd *cmd;
7652
7653         BT_DBG("status 0x%02x", status);
7654
7655         hci_dev_lock(hdev);
7656
7657         cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7658         if (!cmd)
7659                 goto unlock;
7660
7661         if (status)
7662                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7663                                 mgmt_status(status));
7664         else
7665                 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7666                                 0, NULL, 0);
7667
7668         mgmt_pending_remove(cmd);
7669
7670 unlock:
7671         hci_dev_unlock(hdev);
7672 }
7673
7674 int mgmt_set_rssi_threshold(struct sock *sk, struct hci_dev *hdev,
7675                 void *data, u16 len)
7676 {
7677         int err = 0;
7678         struct hci_cp_set_rssi_threshold th = { 0, };
7679         struct mgmt_cp_set_enable_rssi *cp = data;
7680         struct hci_conn *conn;
7681         struct mgmt_pending_cmd *cmd;
7682         struct hci_request req;
7683         __u8 dest_type;
7684
7685         hci_dev_lock(hdev);
7686
7687         cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7688         if (!cmd) {
7689                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7690                                 MGMT_STATUS_FAILED);
7691                 goto unlocked;
7692         }
7693
7694         if (!lmp_le_capable(hdev)) {
7695                 mgmt_pending_remove(cmd);
7696                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7697                                 MGMT_STATUS_NOT_SUPPORTED);
7698                 goto unlocked;
7699         }
7700
7701         if (!hdev_is_powered(hdev)) {
7702                 BT_DBG("%s", hdev->name);
7703                 mgmt_pending_remove(cmd);
7704                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7705                                 MGMT_STATUS_NOT_POWERED);
7706                 goto unlocked;
7707         }
7708
7709         if (cp->link_type == 0x01)
7710                 dest_type = LE_LINK;
7711         else
7712                 dest_type = ACL_LINK;
7713
7714         /* Get LE/ACL link handle info */
7715         conn = hci_conn_hash_lookup_ba(hdev,
7716                         dest_type, &cp->bdaddr);
7717
7718         if (!conn) {
7719                 err = mgmt_cmd_complete(sk, hdev->id,
7720                                 MGMT_OP_SET_RSSI_ENABLE, 1, NULL, 0);
7721                 mgmt_pending_remove(cmd);
7722                 goto unlocked;
7723         }
7724
7725         hci_req_init(&req, hdev);
7726
7727         th.hci_le_ext_opcode = 0x0B;
7728         th.mode = 0x01;
7729         th.conn_handle = conn->handle;
7730         th.alert_mask = 0x07;
7731         th.low_th = cp->low_th;
7732         th.in_range_th = cp->in_range_th;
7733         th.high_th = cp->high_th;
7734
7735         hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
7736         err = hci_req_run(&req, set_rssi_threshold_complete);
7737
7738         if (err < 0) {
7739                 mgmt_pending_remove(cmd);
7740                 BT_ERR("Error in requesting hci_req_run");
7741                 goto unlocked;
7742         }
7743
7744 unlocked:
7745         hci_dev_unlock(hdev);
7746         return err;
7747 }
7748
7749 void mgmt_rssi_enable_success(struct sock *sk, struct hci_dev *hdev,
7750                 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
7751 {
7752         struct mgmt_cc_rsp_enable_rssi mgmt_rp = { 0, };
7753         struct mgmt_cp_set_enable_rssi *cp = data;
7754         struct mgmt_pending_cmd *cmd;
7755
7756         if (!cp || !rp)
7757                 goto remove_cmd;
7758
7759         mgmt_rp.status = rp->status;
7760         mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
7761         mgmt_rp.bt_address = cp->bdaddr;
7762         mgmt_rp.link_type = cp->link_type;
7763
7764         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7765                         MGMT_STATUS_SUCCESS, &mgmt_rp,
7766                         sizeof(struct mgmt_cc_rsp_enable_rssi));
7767
7768         mgmt_event(MGMT_EV_RSSI_ENABLED, hdev, &mgmt_rp,
7769                         sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
7770
7771         hci_conn_rssi_unset_all(hdev, mgmt_rp.link_type);
7772         hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
7773                         &mgmt_rp.bt_address, true);
7774
7775 remove_cmd:
7776         hci_dev_lock(hdev);
7777         cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7778         if (cmd)
7779                 mgmt_pending_remove(cmd);
7780
7781         hci_dev_unlock(hdev);
7782 }
7783
7784 void mgmt_rssi_disable_success(struct sock *sk, struct hci_dev *hdev,
7785                 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
7786 {
7787         struct mgmt_cc_rp_disable_rssi mgmt_rp = { 0, };
7788         struct mgmt_cp_disable_rssi *cp = data;
7789         struct mgmt_pending_cmd *cmd;
7790
7791         if (!cp || !rp)
7792                 goto remove_cmd;
7793
7794         mgmt_rp.status = rp->status;
7795         mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
7796         mgmt_rp.bt_address = cp->bdaddr;
7797         mgmt_rp.link_type = cp->link_type;
7798
7799         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7800                         MGMT_STATUS_SUCCESS, &mgmt_rp,
7801                         sizeof(struct mgmt_cc_rsp_enable_rssi));
7802
7803         mgmt_event(MGMT_EV_RSSI_DISABLED, hdev, &mgmt_rp,
7804                         sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
7805
7806         hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
7807                         &mgmt_rp.bt_address, false);
7808
7809 remove_cmd:
7810         hci_dev_lock(hdev);
7811         cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7812         if (cmd)
7813                 mgmt_pending_remove(cmd);
7814
7815         hci_dev_unlock(hdev);
7816 }
7817
7818 static int mgmt_set_disable_rssi(struct sock *sk, struct hci_dev *hdev,
7819                 void *data, u16 len)
7820 {
7821         struct mgmt_pending_cmd *cmd;
7822         struct hci_request req;
7823         struct hci_cp_set_enable_rssi cp_en = { 0, };
7824         int err;
7825
7826         BT_DBG("Set Disable RSSI.");
7827
7828         cp_en.hci_le_ext_opcode = 0x01;
7829         cp_en.le_enable_cs_Features = 0x00;
7830         cp_en.data[0] = 0x00;
7831         cp_en.data[1] = 0x00;
7832         cp_en.data[2] = 0x00;
7833
7834         hci_dev_lock(hdev);
7835
7836         cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7837         if (!cmd) {
7838                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7839                                 MGMT_STATUS_FAILED);
7840                 goto unlocked;
7841         }
7842
7843         if (!lmp_le_capable(hdev)) {
7844                 mgmt_pending_remove(cmd);
7845                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7846                                 MGMT_STATUS_NOT_SUPPORTED);
7847                 goto unlocked;
7848         }
7849
7850         if (!hdev_is_powered(hdev)) {
7851                 BT_DBG("%s", hdev->name);
7852                 mgmt_pending_remove(cmd);
7853                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7854                                 MGMT_STATUS_NOT_POWERED);
7855                 goto unlocked;
7856         }
7857
7858         hci_req_init(&req, hdev);
7859
7860         BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
7861                         sizeof(struct hci_cp_set_enable_rssi),
7862                         cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
7863                         cp_en.data[0], cp_en.data[1], cp_en.data[2]);
7864
7865         hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
7866         err = hci_req_run(&req, set_rssi_disable_complete);
7867
7868         if (err < 0) {
7869                 mgmt_pending_remove(cmd);
7870                 BT_ERR("Error in requesting hci_req_run");
7871                 goto unlocked;
7872         }
7873
7874 unlocked:
7875         hci_dev_unlock(hdev);
7876         return err;
7877 }
7878
7879 void mgmt_enable_rssi_cc(struct hci_dev *hdev, void *response, u8 status)
7880 {
7881         struct hci_cc_rsp_enable_rssi *rp = response;
7882         struct mgmt_pending_cmd *cmd_enable = NULL;
7883         struct mgmt_pending_cmd *cmd_disable = NULL;
7884         struct mgmt_cp_set_enable_rssi *cp_en;
7885         struct mgmt_cp_disable_rssi *cp_dis;
7886
7887         hci_dev_lock(hdev);
7888         cmd_enable = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7889         cmd_disable = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7890         hci_dev_unlock(hdev);
7891
7892         if (cmd_enable)
7893                 BT_DBG("Enable Request");
7894
7895         if (cmd_disable)
7896                 BT_DBG("Disable Request");
7897
7898         if (cmd_enable) {
7899                 cp_en = cmd_enable->param;
7900
7901                 if (status != 0x00)
7902                         return;
7903
7904                 switch (rp->le_ext_opcode) {
7905                 case 0x01:
7906                         BT_DBG("RSSI enabled.. Setting Threshold...");
7907                         mgmt_set_rssi_threshold(cmd_enable->sk, hdev,
7908                                         cp_en, sizeof(*cp_en));
7909                         break;
7910
7911                 case 0x0B:
7912                         BT_DBG("Sending RSSI enable success");
7913                         mgmt_rssi_enable_success(cmd_enable->sk, hdev,
7914                                         cp_en, rp, rp->status);
7915                         break;
7916                 }
7917
7918         } else if (cmd_disable) {
7919                 cp_dis = cmd_disable->param;
7920
7921                 if (status != 0x00)
7922                         return;
7923
7924                 switch (rp->le_ext_opcode) {
7925                 case 0x01:
7926                         BT_DBG("Sending RSSI disable success");
7927                         mgmt_rssi_disable_success(cmd_disable->sk, hdev,
7928                                         cp_dis, rp, rp->status);
7929                         break;
7930
7931                 case 0x0B:
7932                         /*
7933                          * Only unset RSSI Threshold values for the Link if
7934                          * RSSI is monitored for other BREDR or LE Links
7935                          */
7936                         if (hci_conn_hash_lookup_rssi_count(hdev) > 1) {
7937                                 BT_DBG("Unset Threshold. Other links being monitored");
7938                                 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
7939                                                 cp_dis, rp, rp->status);
7940                         } else {
7941                                 BT_DBG("Unset Threshold. Disabling...");
7942                                 mgmt_set_disable_rssi(cmd_disable->sk, hdev,
7943                                                 cp_dis, sizeof(*cp_dis));
7944                         }
7945                         break;
7946                 }
7947         }
7948 }
7949
7950 static void set_rssi_enable_complete(struct hci_dev *hdev, u8 status,
7951                 u16 opcode)
7952 {
7953         struct mgmt_pending_cmd *cmd;
7954
7955         BT_DBG("status 0x%02x", status);
7956
7957         hci_dev_lock(hdev);
7958
7959         cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7960         if (!cmd)
7961                 goto unlock;
7962
7963         if (status)
7964                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7965                                 mgmt_status(status));
7966         else
7967                 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
7968                                 NULL, 0);
7969
7970         mgmt_pending_remove(cmd);
7971
7972 unlock:
7973         hci_dev_unlock(hdev);
7974 }
7975
7976 static int set_enable_rssi(struct sock *sk, struct hci_dev *hdev,
7977                 void *data, u16 len)
7978 {
7979         struct mgmt_pending_cmd *cmd;
7980         struct hci_request req;
7981         struct mgmt_cp_set_enable_rssi *cp = data;
7982         struct hci_cp_set_enable_rssi cp_en = { 0, };
7983         int err;
7984
7985         BT_DBG("Set Enable RSSI.");
7986
7987         cp_en.hci_le_ext_opcode = 0x01;
7988         cp_en.le_enable_cs_Features = 0x04;
7989         cp_en.data[0] = 0x00;
7990         cp_en.data[1] = 0x00;
7991         cp_en.data[2] = 0x00;
7992
7993         hci_dev_lock(hdev);
7994
7995         if (!lmp_le_capable(hdev)) {
7996                 err =  mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7997                                 MGMT_STATUS_NOT_SUPPORTED);
7998                 goto unlocked;
7999         }
8000
8001         if (!hdev_is_powered(hdev)) {
8002                 BT_DBG("%s", hdev->name);
8003                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
8004                                 MGMT_STATUS_NOT_POWERED);
8005                 goto unlocked;
8006         }
8007
8008         if (pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev)) {
8009                 BT_DBG("%s", hdev->name);
8010                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
8011                                 MGMT_STATUS_BUSY);
8012                 goto unlocked;
8013         }
8014
8015         cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_ENABLE, hdev, cp,
8016                         sizeof(*cp));
8017         if (!cmd) {
8018                 BT_DBG("%s", hdev->name);
8019                 err = -ENOMEM;
8020                 goto unlocked;
8021         }
8022
8023         /* If RSSI is already enabled directly set Threshold values */
8024         if (hci_conn_hash_lookup_rssi_count(hdev) > 0) {
8025                 hci_dev_unlock(hdev);
8026                 BT_DBG("RSSI Enabled. Directly set Threshold");
8027                 err = mgmt_set_rssi_threshold(sk, hdev, cp, sizeof(*cp));
8028                 return err;
8029         }
8030
8031         hci_req_init(&req, hdev);
8032
8033         BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
8034                         sizeof(struct hci_cp_set_enable_rssi),
8035                         cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
8036                         cp_en.data[0], cp_en.data[1], cp_en.data[2]);
8037
8038         hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
8039         err = hci_req_run(&req, set_rssi_enable_complete);
8040
8041         if (err < 0) {
8042                 mgmt_pending_remove(cmd);
8043                 BT_ERR("Error in requesting hci_req_run");
8044                 goto unlocked;
8045         }
8046
8047 unlocked:
8048         hci_dev_unlock(hdev);
8049
8050         return err;
8051 }
8052
8053 static void get_raw_rssi_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8054 {
8055         struct mgmt_pending_cmd *cmd;
8056
8057         BT_DBG("status 0x%02x", status);
8058
8059         hci_dev_lock(hdev);
8060
8061         cmd = pending_find(MGMT_OP_GET_RAW_RSSI, hdev);
8062         if (!cmd)
8063                 goto unlock;
8064
8065         mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8066                         MGMT_STATUS_SUCCESS, &status, 1);
8067
8068         mgmt_pending_remove(cmd);
8069
8070 unlock:
8071         hci_dev_unlock(hdev);
8072 }
8073
8074 static int get_raw_rssi(struct sock *sk, struct hci_dev *hdev, void *data,
8075                         u16 len)
8076 {
8077         struct mgmt_pending_cmd *cmd;
8078         struct hci_request req;
8079         struct mgmt_cp_get_raw_rssi *cp = data;
8080         struct hci_cp_get_raw_rssi hci_cp;
8081
8082         struct hci_conn *conn;
8083         int err;
8084         __u8 dest_type;
8085
8086         BT_DBG("Get Raw RSSI.");
8087
8088         hci_dev_lock(hdev);
8089
8090         if (!lmp_le_capable(hdev)) {
8091                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8092                                 MGMT_STATUS_NOT_SUPPORTED);
8093                 goto unlocked;
8094         }
8095
8096         if (cp->link_type == 0x01)
8097                 dest_type = LE_LINK;
8098         else
8099                 dest_type = ACL_LINK;
8100
8101         /* Get LE/BREDR link handle info */
8102         conn = hci_conn_hash_lookup_ba(hdev,
8103                         dest_type, &cp->bt_address);
8104         if (!conn) {
8105                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8106                                                    MGMT_STATUS_NOT_CONNECTED);
8107                 goto unlocked;
8108         }
8109         hci_cp.conn_handle = conn->handle;
8110
8111         if (!hdev_is_powered(hdev)) {
8112                 BT_DBG("%s", hdev->name);
8113                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8114                                 MGMT_STATUS_NOT_POWERED);
8115                 goto unlocked;
8116         }
8117
8118         if (pending_find(MGMT_OP_GET_RAW_RSSI, hdev)) {
8119                 BT_DBG("%s", hdev->name);
8120                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8121                                 MGMT_STATUS_BUSY);
8122                 goto unlocked;
8123         }
8124
8125         cmd = mgmt_pending_add(sk, MGMT_OP_GET_RAW_RSSI, hdev, data, len);
8126         if (!cmd) {
8127                 BT_DBG("%s", hdev->name);
8128                 err = -ENOMEM;
8129                 goto unlocked;
8130         }
8131
8132         hci_req_init(&req, hdev);
8133
8134         BT_DBG("Connection Handle [%d]", hci_cp.conn_handle);
8135         hci_req_add(&req, HCI_OP_GET_RAW_RSSI, sizeof(hci_cp), &hci_cp);
8136         err = hci_req_run(&req, get_raw_rssi_complete);
8137
8138         if (err < 0) {
8139                 mgmt_pending_remove(cmd);
8140                 BT_ERR("Error in requesting hci_req_run");
8141         }
8142
8143 unlocked:
8144         hci_dev_unlock(hdev);
8145
8146         return err;
8147 }
8148
8149 void mgmt_raw_rssi_response(struct hci_dev *hdev,
8150                 struct hci_cc_rp_get_raw_rssi *rp, int success)
8151 {
8152         struct mgmt_cc_rp_get_raw_rssi mgmt_rp = { 0, };
8153         struct hci_conn *conn;
8154
8155         mgmt_rp.status = rp->status;
8156         mgmt_rp.rssi_dbm = rp->rssi_dbm;
8157
8158         conn = hci_conn_hash_lookup_handle(hdev, rp->conn_handle);
8159         if (!conn)
8160                 return;
8161
8162         bacpy(&mgmt_rp.bt_address, &conn->dst);
8163         if (conn->type == LE_LINK)
8164                 mgmt_rp.link_type = 0x01;
8165         else
8166                 mgmt_rp.link_type = 0x00;
8167
8168         mgmt_event(MGMT_EV_RAW_RSSI, hdev, &mgmt_rp,
8169                         sizeof(struct mgmt_cc_rp_get_raw_rssi), NULL);
8170 }
8171
8172 static void set_disable_threshold_complete(struct hci_dev *hdev,
8173                         u8 status, u16 opcode)
8174 {
8175         struct mgmt_pending_cmd *cmd;
8176
8177         BT_DBG("status 0x%02x", status);
8178
8179         hci_dev_lock(hdev);
8180
8181         cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
8182         if (!cmd)
8183                 goto unlock;
8184
8185         mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8186                         MGMT_STATUS_SUCCESS, &status, 1);
8187
8188         mgmt_pending_remove(cmd);
8189
8190 unlock:
8191         hci_dev_unlock(hdev);
8192 }
8193
8194 /** Removes monitoring for a link*/
8195 static int set_disable_threshold(struct sock *sk, struct hci_dev *hdev,
8196                 void *data, u16 len)
8197 {
8198         int err = 0;
8199         struct hci_cp_set_rssi_threshold th = { 0, };
8200         struct mgmt_cp_disable_rssi *cp = data;
8201         struct hci_conn *conn;
8202         struct mgmt_pending_cmd *cmd;
8203         struct hci_request req;
8204         __u8 dest_type;
8205
8206         BT_DBG("Set Disable RSSI.");
8207
8208         hci_dev_lock(hdev);
8209
8210         if (!lmp_le_capable(hdev)) {
8211                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8212                                 MGMT_STATUS_NOT_SUPPORTED);
8213                 goto unlocked;
8214         }
8215
8216         /* Get LE/ACL link handle info*/
8217         if (cp->link_type == 0x01)
8218                 dest_type = LE_LINK;
8219         else
8220                 dest_type = ACL_LINK;
8221
8222         conn = hci_conn_hash_lookup_ba(hdev, dest_type, &cp->bdaddr);
8223         if (!conn) {
8224                 err = mgmt_cmd_complete(sk, hdev->id,
8225                                 MGMT_OP_SET_RSSI_DISABLE, 1, NULL, 0);
8226                 goto unlocked;
8227         }
8228
8229         th.hci_le_ext_opcode = 0x0B;
8230         th.mode = 0x01;
8231         th.conn_handle = conn->handle;
8232         th.alert_mask = 0x00;
8233         th.low_th = 0x00;
8234         th.in_range_th = 0x00;
8235         th.high_th = 0x00;
8236
8237         if (!hdev_is_powered(hdev)) {
8238                 BT_DBG("%s", hdev->name);
8239                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8240                                 0, data, len);
8241                 goto unlocked;
8242         }
8243
8244         if (pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev)) {
8245                 BT_DBG("%s", hdev->name);
8246                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8247                                 MGMT_STATUS_BUSY);
8248                 goto unlocked;
8249         }
8250
8251         cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_DISABLE, hdev, cp,
8252                         sizeof(*cp));
8253         if (!cmd) {
8254                 BT_DBG("%s", hdev->name);
8255                 err = -ENOMEM;
8256                 goto unlocked;
8257         }
8258
8259         hci_req_init(&req, hdev);
8260
8261         hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
8262         err = hci_req_run(&req, set_disable_threshold_complete);
8263         if (err < 0) {
8264                 mgmt_pending_remove(cmd);
8265                 BT_ERR("Error in requesting hci_req_run");
8266                 goto unlocked;
8267         }
8268
8269 unlocked:
8270         hci_dev_unlock(hdev);
8271
8272         return err;
8273 }
8274
8275 void mgmt_rssi_alert_evt(struct hci_dev *hdev, u16 conn_handle,
8276                 s8 alert_type, s8 rssi_dbm)
8277 {
8278         struct mgmt_ev_vendor_specific_rssi_alert mgmt_ev;
8279         struct hci_conn *conn;
8280
8281         BT_DBG("RSSI alert [%2.2X %2.2X %2.2X]",
8282                         conn_handle, alert_type, rssi_dbm);
8283
8284         conn = hci_conn_hash_lookup_handle(hdev, conn_handle);
8285
8286         if (!conn) {
8287                 BT_ERR("RSSI alert Error: Device not found for handle");
8288                 return;
8289         }
8290         bacpy(&mgmt_ev.bdaddr, &conn->dst);
8291
8292         if (conn->type == LE_LINK)
8293                 mgmt_ev.link_type = 0x01;
8294         else
8295                 mgmt_ev.link_type = 0x00;
8296
8297         mgmt_ev.alert_type = alert_type;
8298         mgmt_ev.rssi_dbm = rssi_dbm;
8299
8300         mgmt_event(MGMT_EV_RSSI_ALERT, hdev, &mgmt_ev,
8301                         sizeof(struct mgmt_ev_vendor_specific_rssi_alert),
8302                         NULL);
8303 }
8304
8305 static int mgmt_start_le_discovery_failed(struct hci_dev *hdev, u8 status)
8306 {
8307         struct mgmt_pending_cmd *cmd;
8308         u8 type;
8309         int err;
8310
8311         hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
8312
8313         cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
8314         if (!cmd)
8315                 return -ENOENT;
8316
8317         type = hdev->le_discovery.type;
8318
8319         err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
8320                                 mgmt_status(status), &type, sizeof(type));
8321         mgmt_pending_remove(cmd);
8322
8323         return err;
8324 }
8325
8326 static void start_le_discovery_complete(struct hci_dev *hdev, u8 status,
8327                 u16 opcode)
8328 {
8329         unsigned long timeout = 0;
8330
8331         BT_DBG("status %d", status);
8332
8333         if (status) {
8334                 hci_dev_lock(hdev);
8335                 mgmt_start_le_discovery_failed(hdev, status);
8336                 hci_dev_unlock(hdev);
8337                 return;
8338         }
8339
8340         hci_dev_lock(hdev);
8341         hci_le_discovery_set_state(hdev, DISCOVERY_FINDING);
8342         hci_dev_unlock(hdev);
8343
8344         if (hdev->le_discovery.type != DISCOV_TYPE_LE)
8345                 BT_ERR("Invalid discovery type %d", hdev->le_discovery.type);
8346
8347         if (!timeout)
8348                 return;
8349
8350         queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
8351 }
8352
8353 static int start_le_discovery(struct sock *sk, struct hci_dev *hdev,
8354                 void *data, u16 len)
8355 {
8356         struct mgmt_cp_start_le_discovery *cp = data;
8357         struct mgmt_pending_cmd *cmd;
8358         struct hci_cp_le_set_scan_param param_cp;
8359         struct hci_cp_le_set_scan_enable enable_cp;
8360         struct hci_request req;
8361         u8 status, own_addr_type;
8362         int err;
8363
8364         BT_DBG("%s", hdev->name);
8365
8366         if (!hdev_is_powered(hdev)) {
8367                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8368                                 MGMT_STATUS_NOT_POWERED);
8369                 goto unlock;
8370         }
8371
8372         if (hdev->le_discovery.state != DISCOVERY_STOPPED) {
8373                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8374                                 MGMT_STATUS_BUSY);
8375                 goto unlock;
8376         }
8377
8378         if (cp->type != DISCOV_TYPE_LE) {
8379                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8380                                 MGMT_STATUS_INVALID_PARAMS);
8381                 goto unlock;
8382         }
8383
8384         cmd = mgmt_pending_add(sk, MGMT_OP_START_LE_DISCOVERY, hdev, NULL, 0);
8385         if (!cmd) {
8386                 err = -ENOMEM;
8387                 goto unlock;
8388         }
8389
8390         hdev->le_discovery.type = cp->type;
8391
8392         hci_req_init(&req, hdev);
8393
8394         status = mgmt_le_support(hdev);
8395         if (status) {
8396                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8397                                 status);
8398                 mgmt_pending_remove(cmd);
8399                 goto unlock;
8400         }
8401
8402         /* If controller is scanning, it means the background scanning
8403          * is running. Thus, we should temporarily stop it in order to
8404          * set the discovery scanning parameters.
8405          */
8406         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
8407                 hci_req_add_le_scan_disable(&req, false);
8408
8409         memset(&param_cp, 0, sizeof(param_cp));
8410
8411         /* All active scans will be done with either a resolvable
8412          * private address (when privacy feature has been enabled)
8413          * or unresolvable private address.
8414          */
8415         err = hci_update_random_address_sync(hdev, true, hci_dev_test_flag(hdev, HCI_PRIVACY), &own_addr_type);
8416         if (err < 0) {
8417                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8418                                 MGMT_STATUS_FAILED);
8419                 mgmt_pending_remove(cmd);
8420                 goto unlock;
8421         }
8422
8423         param_cp.type = hdev->le_scan_type;
8424         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
8425         param_cp.window = cpu_to_le16(hdev->le_scan_window);
8426         param_cp.own_address_type = own_addr_type;
8427         hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
8428                     &param_cp);
8429
8430         memset(&enable_cp, 0, sizeof(enable_cp));
8431         enable_cp.enable = LE_SCAN_ENABLE;
8432         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
8433
8434         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
8435                     &enable_cp);
8436
8437         err = hci_req_run(&req, start_le_discovery_complete);
8438         if (err < 0)
8439                 mgmt_pending_remove(cmd);
8440         else
8441                 hci_le_discovery_set_state(hdev, DISCOVERY_STARTING);
8442
8443 unlock:
8444         return err;
8445 }
8446
8447 static int mgmt_stop_le_discovery_failed(struct hci_dev *hdev, u8 status)
8448 {
8449         struct mgmt_pending_cmd *cmd;
8450         int err;
8451
8452         cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
8453         if (!cmd)
8454                 return -ENOENT;
8455
8456         err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
8457                                 mgmt_status(status), &hdev->le_discovery.type,
8458                                 sizeof(hdev->le_discovery.type));
8459         mgmt_pending_remove(cmd);
8460
8461         return err;
8462 }
8463
8464 static void stop_le_discovery_complete(struct hci_dev *hdev, u8 status,
8465                 u16 opcode)
8466 {
8467         BT_DBG("status %d", status);
8468
8469         hci_dev_lock(hdev);
8470
8471         if (status) {
8472                 mgmt_stop_le_discovery_failed(hdev, status);
8473                 goto unlock;
8474         }
8475
8476         hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
8477
8478 unlock:
8479         hci_dev_unlock(hdev);
8480 }
8481
8482 static int stop_le_discovery(struct sock *sk, struct hci_dev *hdev,
8483                 void *data, u16 len)
8484 {
8485         struct mgmt_cp_stop_le_discovery *mgmt_cp = data;
8486         struct mgmt_pending_cmd *cmd;
8487         struct hci_request req;
8488         int err;
8489
8490         BT_DBG("%s", hdev->name);
8491
8492         hci_dev_lock(hdev);
8493
8494         if (!hci_le_discovery_active(hdev)) {
8495                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
8496                                         MGMT_STATUS_REJECTED, &mgmt_cp->type,
8497                                         sizeof(mgmt_cp->type));
8498                 goto unlock;
8499         }
8500
8501         if (hdev->le_discovery.type != mgmt_cp->type) {
8502                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
8503                                         MGMT_STATUS_INVALID_PARAMS,
8504                                         &mgmt_cp->type, sizeof(mgmt_cp->type));
8505                 goto unlock;
8506         }
8507
8508         cmd = mgmt_pending_add(sk, MGMT_OP_STOP_LE_DISCOVERY, hdev, NULL, 0);
8509         if (!cmd) {
8510                 err = -ENOMEM;
8511                 goto unlock;
8512         }
8513
8514         hci_req_init(&req, hdev);
8515
8516         if (hdev->le_discovery.state  != DISCOVERY_FINDING) {
8517                 BT_DBG("unknown le discovery state %u",
8518                                         hdev->le_discovery.state);
8519
8520                 mgmt_pending_remove(cmd);
8521                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
8522                                         MGMT_STATUS_FAILED, &mgmt_cp->type,
8523                                         sizeof(mgmt_cp->type));
8524                 goto unlock;
8525         }
8526
8527         cancel_delayed_work(&hdev->le_scan_disable);
8528         hci_req_add_le_scan_disable(&req, false);
8529
8530         err = hci_req_run(&req, stop_le_discovery_complete);
8531         if (err < 0)
8532                 mgmt_pending_remove(cmd);
8533         else
8534                 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPING);
8535
8536 unlock:
8537         hci_dev_unlock(hdev);
8538         return err;
8539 }
8540
8541 /* Separate LE discovery */
8542 void mgmt_le_discovering(struct hci_dev *hdev, u8 discovering)
8543 {
8544         struct mgmt_ev_discovering ev;
8545         struct mgmt_pending_cmd *cmd;
8546
8547         BT_DBG("%s le discovering %u", hdev->name, discovering);
8548
8549         if (discovering)
8550                 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
8551         else
8552                 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
8553
8554         if (cmd) {
8555                 u8 type = hdev->le_discovery.type;
8556
8557                 mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
8558                                   sizeof(type));
8559                 mgmt_pending_remove(cmd);
8560         }
8561
8562         memset(&ev, 0, sizeof(ev));
8563         ev.type = hdev->le_discovery.type;
8564         ev.discovering = discovering;
8565
8566         mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8567 }
8568
8569 static int disable_le_auto_connect(struct sock *sk, struct hci_dev *hdev,
8570                         void *data, u16 len)
8571 {
8572         int err;
8573
8574         BT_DBG("%s", hdev->name);
8575
8576         hci_dev_lock(hdev);
8577
8578         err = hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
8579         if (err < 0)
8580                 BT_ERR("HCI_OP_LE_CREATE_CONN_CANCEL is failed");
8581
8582         hci_dev_unlock(hdev);
8583
8584         return err;
8585 }
8586
8587 static inline int check_le_conn_update_param(u16 min, u16 max, u16 latency,
8588                 u16 to_multiplier)
8589 {
8590         u16 max_latency;
8591
8592         if (min > max || min < 6 || max > 3200)
8593                 return -EINVAL;
8594
8595         if (to_multiplier < 10 || to_multiplier > 3200)
8596                 return -EINVAL;
8597
8598         if (max >= to_multiplier * 8)
8599                 return -EINVAL;
8600
8601         max_latency = (to_multiplier * 8 / max) - 1;
8602
8603         if (latency > 499 || latency > max_latency)
8604                 return -EINVAL;
8605
8606         return 0;
8607 }
8608
8609 static int le_conn_update(struct sock *sk, struct hci_dev *hdev, void *data,
8610                 u16 len)
8611 {
8612         struct mgmt_cp_le_conn_update *cp = data;
8613
8614         struct hci_conn *conn;
8615         u16 min, max, latency, supervision_timeout;
8616         int err = -1;
8617
8618         if (!hdev_is_powered(hdev))
8619                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
8620                                 MGMT_STATUS_NOT_POWERED);
8621
8622         min = __le16_to_cpu(cp->conn_interval_min);
8623         max = __le16_to_cpu(cp->conn_interval_max);
8624         latency = __le16_to_cpu(cp->conn_latency);
8625         supervision_timeout = __le16_to_cpu(cp->supervision_timeout);
8626
8627         BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x supervision_timeout: 0x%4.4x",
8628                         min, max, latency, supervision_timeout);
8629
8630         err = check_le_conn_update_param(min, max, latency,
8631                         supervision_timeout);
8632
8633         if (err < 0)
8634                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
8635                                 MGMT_STATUS_INVALID_PARAMS);
8636
8637         hci_dev_lock(hdev);
8638
8639         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
8640         if (!conn) {
8641                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
8642                                 MGMT_STATUS_NOT_CONNECTED);
8643                 hci_dev_unlock(hdev);
8644                 return err;
8645         }
8646
8647         hci_dev_unlock(hdev);
8648
8649         hci_le_conn_update(conn, min, max, latency, supervision_timeout);
8650
8651         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE, 0,
8652                                  NULL, 0);
8653 }
8654
8655 static void set_manufacturer_data_complete(struct hci_dev *hdev, u8 status,
8656                 u16 opcode)
8657 {
8658         struct mgmt_cp_set_manufacturer_data *cp;
8659         struct mgmt_pending_cmd *cmd;
8660
8661         BT_DBG("status 0x%02x", status);
8662
8663         hci_dev_lock(hdev);
8664
8665         cmd = pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev);
8666         if (!cmd)
8667                 goto unlock;
8668
8669         cp = cmd->param;
8670
8671         if (status)
8672                 mgmt_cmd_status(cmd->sk, hdev->id,
8673                                 MGMT_OP_SET_MANUFACTURER_DATA,
8674                                 mgmt_status(status));
8675         else
8676                 mgmt_cmd_complete(cmd->sk, hdev->id,
8677                                   MGMT_OP_SET_MANUFACTURER_DATA, 0,
8678                                   cp, sizeof(*cp));
8679
8680         mgmt_pending_remove(cmd);
8681
8682 unlock:
8683         hci_dev_unlock(hdev);
8684 }
8685
8686 static int set_manufacturer_data(struct sock *sk, struct hci_dev *hdev,
8687                 void *data, u16 len)
8688 {
8689         struct mgmt_pending_cmd *cmd;
8690         struct hci_request req;
8691         struct mgmt_cp_set_manufacturer_data *cp = data;
8692         u8 old_data[HCI_MAX_EIR_LENGTH] = {0, };
8693         u8 old_len;
8694         int err;
8695
8696         BT_DBG("%s", hdev->name);
8697
8698         if (!lmp_bredr_capable(hdev))
8699                 return mgmt_cmd_status(sk, hdev->id,
8700                                 MGMT_OP_SET_MANUFACTURER_DATA,
8701                                 MGMT_STATUS_NOT_SUPPORTED);
8702
8703         if (cp->data[0] == 0 ||
8704                         cp->data[0] - 1 > sizeof(hdev->manufacturer_data))
8705                 return mgmt_cmd_status(sk, hdev->id,
8706                                 MGMT_OP_SET_MANUFACTURER_DATA,
8707                                 MGMT_STATUS_INVALID_PARAMS);
8708
8709         if (cp->data[1] != 0xFF)
8710                 return mgmt_cmd_status(sk, hdev->id,
8711                                 MGMT_OP_SET_MANUFACTURER_DATA,
8712                                 MGMT_STATUS_NOT_SUPPORTED);
8713
8714         hci_dev_lock(hdev);
8715
8716         if (pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev)) {
8717                 err = mgmt_cmd_status(sk, hdev->id,
8718                                 MGMT_OP_SET_MANUFACTURER_DATA,
8719                                 MGMT_STATUS_BUSY);
8720                 goto unlocked;
8721         }
8722
8723         cmd = mgmt_pending_add(sk, MGMT_OP_SET_MANUFACTURER_DATA, hdev, data,
8724                         len);
8725         if (!cmd) {
8726                 err = -ENOMEM;
8727                 goto unlocked;
8728         }
8729
8730         hci_req_init(&req, hdev);
8731
8732         /* if new data is same as previous data then return command
8733          * complete event
8734          */
8735         if (hdev->manufacturer_len == cp->data[0] - 1 &&
8736             !memcmp(hdev->manufacturer_data, cp->data + 2, cp->data[0] - 1)) {
8737                 mgmt_pending_remove(cmd);
8738                 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA,
8739                                   0, cp, sizeof(*cp));
8740                 err = 0;
8741                 goto unlocked;
8742         }
8743
8744         old_len = hdev->manufacturer_len;
8745         if (old_len > 0)
8746                 memcpy(old_data, hdev->manufacturer_data, old_len);
8747
8748         hdev->manufacturer_len = cp->data[0] - 1;
8749         if (hdev->manufacturer_len > 0)
8750                 memcpy(hdev->manufacturer_data, cp->data + 2,
8751                                 hdev->manufacturer_len);
8752
8753         hci_update_eir_sync(hdev);
8754
8755         err = hci_req_run(&req, set_manufacturer_data_complete);
8756         if (err < 0) {
8757                 mgmt_pending_remove(cmd);
8758                 goto failed;
8759         }
8760
8761 unlocked:
8762         hci_dev_unlock(hdev);
8763
8764         return err;
8765
8766 failed:
8767         memset(hdev->manufacturer_data, 0x00, sizeof(hdev->manufacturer_data));
8768         hdev->manufacturer_len = old_len;
8769         if (hdev->manufacturer_len > 0)
8770                 memcpy(hdev->manufacturer_data, old_data,
8771                        hdev->manufacturer_len);
8772         hci_dev_unlock(hdev);
8773         return err;
8774 }
8775
8776 static int le_set_scan_params(struct sock *sk, struct hci_dev *hdev,
8777                 void *data, u16 len)
8778 {
8779         struct mgmt_cp_le_set_scan_params *cp = data;
8780         __u16 interval, window;
8781         int err;
8782
8783         BT_DBG("%s", hdev->name);
8784
8785         if (!lmp_le_capable(hdev))
8786                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8787                                 MGMT_STATUS_NOT_SUPPORTED);
8788
8789         interval = __le16_to_cpu(cp->interval);
8790
8791         if (interval < 0x0004 || interval > 0x4000)
8792                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8793                                 MGMT_STATUS_INVALID_PARAMS);
8794
8795         window = __le16_to_cpu(cp->window);
8796
8797         if (window < 0x0004 || window > 0x4000)
8798                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8799                                 MGMT_STATUS_INVALID_PARAMS);
8800
8801         if (window > interval)
8802                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8803                                 MGMT_STATUS_INVALID_PARAMS);
8804
8805         hci_dev_lock(hdev);
8806
8807         hdev->le_scan_type = cp->type;
8808         hdev->le_scan_interval = interval;
8809         hdev->le_scan_window = window;
8810
8811         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS, 0,
8812                                 NULL, 0);
8813
8814         /* If background scan is running, restart it so new parameters are
8815          * loaded.
8816          */
8817         if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
8818             hdev->discovery.state == DISCOVERY_STOPPED) {
8819                 struct hci_request req;
8820
8821                 hci_req_init(&req, hdev);
8822
8823                 hci_req_add_le_scan_disable(&req, false);
8824                 hci_req_add_le_passive_scan(&req);
8825
8826                 hci_req_run(&req, NULL);
8827         }
8828
8829         hci_dev_unlock(hdev);
8830
8831         return err;
8832 }
8833
8834 static int set_voice_setting(struct sock *sk, struct hci_dev *hdev,
8835                 void *data, u16 len)
8836 {
8837         struct mgmt_cp_set_voice_setting *cp = data;
8838         struct hci_conn *conn;
8839         struct hci_conn *sco_conn;
8840
8841         int err;
8842
8843         BT_DBG("%s", hdev->name);
8844
8845         if (!lmp_bredr_capable(hdev)) {
8846                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING,
8847                                 MGMT_STATUS_NOT_SUPPORTED);
8848         }
8849
8850         hci_dev_lock(hdev);
8851
8852         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
8853         if (!conn) {
8854                 err = mgmt_cmd_complete(sk, hdev->id,
8855                                 MGMT_OP_SET_VOICE_SETTING, 0, NULL, 0);
8856                 goto unlock;
8857         }
8858
8859         conn->voice_setting = cp->voice_setting;
8860         conn->sco_role = cp->sco_role;
8861
8862         sco_conn = hci_conn_hash_lookup_sco(hdev);
8863         if (sco_conn && bacmp(&sco_conn->dst, &cp->bdaddr) != 0) {
8864                 BT_ERR("There is other SCO connection.");
8865                 goto done;
8866         }
8867
8868         if (conn->sco_role == MGMT_SCO_ROLE_HANDSFREE) {
8869                 if (conn->voice_setting == 0x0063)
8870                         sco_connect_set_wbc(hdev);
8871                 else
8872                         sco_connect_set_nbc(hdev);
8873         } else {
8874                 if (conn->voice_setting == 0x0063)
8875                         sco_connect_set_gw_wbc(hdev);
8876                 else
8877                         sco_connect_set_gw_nbc(hdev);
8878         }
8879
8880 done:
8881         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING, 0,
8882                         cp, sizeof(cp));
8883
8884 unlock:
8885         hci_dev_unlock(hdev);
8886         return err;
8887 }
8888
8889 static int get_adv_tx_power(struct sock *sk, struct hci_dev *hdev,
8890                 void *data, u16 len)
8891 {
8892         struct mgmt_rp_get_adv_tx_power *rp;
8893         size_t rp_len;
8894         int err;
8895
8896         BT_DBG("%s", hdev->name);
8897
8898         hci_dev_lock(hdev);
8899
8900         rp_len = sizeof(*rp);
8901         rp = kmalloc(rp_len, GFP_KERNEL);
8902         if (!rp) {
8903                 err = -ENOMEM;
8904                 goto unlock;
8905         }
8906
8907         rp->adv_tx_power = hdev->adv_tx_power;
8908
8909         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_TX_POWER, 0, rp,
8910                                 rp_len);
8911
8912         kfree(rp);
8913
8914 unlock:
8915         hci_dev_unlock(hdev);
8916
8917         return err;
8918 }
8919
8920 void mgmt_hardware_error(struct hci_dev *hdev, u8 err_code)
8921 {
8922         struct mgmt_ev_hardware_error ev;
8923
8924         ev.error_code = err_code;
8925         mgmt_event(MGMT_EV_HARDWARE_ERROR, hdev, &ev, sizeof(ev), NULL);
8926 }
8927
8928 void mgmt_tx_timeout_error(struct hci_dev *hdev)
8929 {
8930         mgmt_event(MGMT_EV_TX_TIMEOUT_ERROR, hdev, NULL, 0, NULL);
8931 }
8932
8933 void mgmt_multi_adv_state_change_evt(struct hci_dev *hdev, u8 adv_instance,
8934                 u8 state_change_reason, u16 connection_handle)
8935 {
8936         struct mgmt_ev_vendor_specific_multi_adv_state_changed mgmt_ev;
8937
8938         BT_DBG("Multi adv state changed [%2.2X %2.2X %2.2X]",
8939                adv_instance, state_change_reason, connection_handle);
8940
8941         mgmt_ev.adv_instance = adv_instance;
8942         mgmt_ev.state_change_reason = state_change_reason;
8943         mgmt_ev.connection_handle = connection_handle;
8944
8945         mgmt_event(MGMT_EV_MULTI_ADV_STATE_CHANGED, hdev, &mgmt_ev,
8946                 sizeof(struct mgmt_ev_vendor_specific_multi_adv_state_changed),
8947                 NULL);
8948 }
8949 #endif /* TIZEN_BT */
8950
8951 static bool ltk_is_valid(struct mgmt_ltk_info *key)
8952 {
8953         if (key->initiator != 0x00 && key->initiator != 0x01)
8954                 return false;
8955
8956         switch (key->addr.type) {
8957         case BDADDR_LE_PUBLIC:
8958                 return true;
8959
8960         case BDADDR_LE_RANDOM:
8961                 /* Two most significant bits shall be set */
8962                 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
8963                         return false;
8964                 return true;
8965         }
8966
8967         return false;
8968 }
8969
8970 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
8971                                void *cp_data, u16 len)
8972 {
8973         struct mgmt_cp_load_long_term_keys *cp = cp_data;
8974         const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
8975                                    sizeof(struct mgmt_ltk_info));
8976         u16 key_count, expected_len;
8977         int i, err;
8978
8979         bt_dev_dbg(hdev, "sock %p", sk);
8980
8981         if (!lmp_le_capable(hdev))
8982                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8983                                        MGMT_STATUS_NOT_SUPPORTED);
8984
8985         key_count = __le16_to_cpu(cp->key_count);
8986         if (key_count > max_key_count) {
8987                 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
8988                            key_count);
8989                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8990                                        MGMT_STATUS_INVALID_PARAMS);
8991         }
8992
8993         expected_len = struct_size(cp, keys, key_count);
8994         if (expected_len != len) {
8995                 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
8996                            expected_len, len);
8997                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8998                                        MGMT_STATUS_INVALID_PARAMS);
8999         }
9000
9001         bt_dev_dbg(hdev, "key_count %u", key_count);
9002
9003         for (i = 0; i < key_count; i++) {
9004                 struct mgmt_ltk_info *key = &cp->keys[i];
9005
9006                 if (!ltk_is_valid(key))
9007                         return mgmt_cmd_status(sk, hdev->id,
9008                                                MGMT_OP_LOAD_LONG_TERM_KEYS,
9009                                                MGMT_STATUS_INVALID_PARAMS);
9010         }
9011
9012         hci_dev_lock(hdev);
9013
9014         hci_smp_ltks_clear(hdev);
9015
9016         for (i = 0; i < key_count; i++) {
9017                 struct mgmt_ltk_info *key = &cp->keys[i];
9018                 u8 type, authenticated;
9019                 u8 addr_type = le_addr_type(key->addr.type);
9020
9021                 if (hci_is_blocked_key(hdev,
9022                                        HCI_BLOCKED_KEY_TYPE_LTK,
9023                                        key->val)) {
9024                         bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
9025                                     &key->addr.bdaddr);
9026                         continue;
9027                 }
9028
9029                 switch (key->type) {
9030                 case MGMT_LTK_UNAUTHENTICATED:
9031                         authenticated = 0x00;
9032                         type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
9033                         break;
9034                 case MGMT_LTK_AUTHENTICATED:
9035                         authenticated = 0x01;
9036                         type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
9037                         break;
9038                 case MGMT_LTK_P256_UNAUTH:
9039                         authenticated = 0x00;
9040                         type = SMP_LTK_P256;
9041                         break;
9042                 case MGMT_LTK_P256_AUTH:
9043                         authenticated = 0x01;
9044                         type = SMP_LTK_P256;
9045                         break;
9046                 case MGMT_LTK_P256_DEBUG:
9047                         authenticated = 0x00;
9048                         type = SMP_LTK_P256_DEBUG;
9049                         fallthrough;
9050                 default:
9051                         continue;
9052                 }
9053
9054                 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
9055                 if (key->addr.type == BDADDR_BREDR)
9056                         addr_type = BDADDR_BREDR;
9057
9058                 hci_add_ltk(hdev, &key->addr.bdaddr,
9059                             addr_type, type, authenticated,
9060                             key->val, key->enc_size, key->ediv, key->rand);
9061         }
9062
9063         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
9064                            NULL, 0);
9065
9066         hci_dev_unlock(hdev);
9067
9068         return err;
9069 }
9070
9071 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
9072 {
9073         struct mgmt_pending_cmd *cmd = data;
9074         struct hci_conn *conn = cmd->user_data;
9075         struct mgmt_cp_get_conn_info *cp = cmd->param;
9076         struct mgmt_rp_get_conn_info rp;
9077         u8 status;
9078
9079         bt_dev_dbg(hdev, "err %d", err);
9080
9081         memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
9082
9083         status = mgmt_status(err);
9084         if (status == MGMT_STATUS_SUCCESS) {
9085                 rp.rssi = conn->rssi;
9086                 rp.tx_power = conn->tx_power;
9087                 rp.max_tx_power = conn->max_tx_power;
9088         } else {
9089                 rp.rssi = HCI_RSSI_INVALID;
9090                 rp.tx_power = HCI_TX_POWER_INVALID;
9091                 rp.max_tx_power = HCI_TX_POWER_INVALID;
9092         }
9093
9094         mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
9095                           &rp, sizeof(rp));
9096
9097         mgmt_pending_free(cmd);
9098 }
9099
9100 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
9101 {
9102         struct mgmt_pending_cmd *cmd = data;
9103         struct mgmt_cp_get_conn_info *cp = cmd->param;
9104         struct hci_conn *conn;
9105         int err;
9106         __le16   handle;
9107
9108         /* Make sure we are still connected */
9109         if (cp->addr.type == BDADDR_BREDR)
9110                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
9111                                                &cp->addr.bdaddr);
9112         else
9113                 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
9114
9115         if (!conn || conn->state != BT_CONNECTED)
9116                 return MGMT_STATUS_NOT_CONNECTED;
9117
9118         cmd->user_data = conn;
9119         handle = cpu_to_le16(conn->handle);
9120
9121         /* Refresh RSSI each time */
9122         err = hci_read_rssi_sync(hdev, handle);
9123
9124         /* For LE links TX power does not change thus we don't need to
9125          * query for it once value is known.
9126          */
9127         if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
9128                      conn->tx_power == HCI_TX_POWER_INVALID))
9129                 err = hci_read_tx_power_sync(hdev, handle, 0x00);
9130
9131         /* Max TX power needs to be read only once per connection */
9132         if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
9133                 err = hci_read_tx_power_sync(hdev, handle, 0x01);
9134
9135         return err;
9136 }
9137
9138 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
9139                          u16 len)
9140 {
9141         struct mgmt_cp_get_conn_info *cp = data;
9142         struct mgmt_rp_get_conn_info rp;
9143         struct hci_conn *conn;
9144         unsigned long conn_info_age;
9145         int err = 0;
9146
9147         bt_dev_dbg(hdev, "sock %p", sk);
9148
9149         memset(&rp, 0, sizeof(rp));
9150         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
9151         rp.addr.type = cp->addr.type;
9152
9153         if (!bdaddr_type_is_valid(cp->addr.type))
9154                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9155                                          MGMT_STATUS_INVALID_PARAMS,
9156                                          &rp, sizeof(rp));
9157
9158         hci_dev_lock(hdev);
9159
9160         if (!hdev_is_powered(hdev)) {
9161                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9162                                         MGMT_STATUS_NOT_POWERED, &rp,
9163                                         sizeof(rp));
9164                 goto unlock;
9165         }
9166
9167         if (cp->addr.type == BDADDR_BREDR)
9168                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
9169                                                &cp->addr.bdaddr);
9170         else
9171                 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
9172
9173         if (!conn || conn->state != BT_CONNECTED) {
9174                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9175                                         MGMT_STATUS_NOT_CONNECTED, &rp,
9176                                         sizeof(rp));
9177                 goto unlock;
9178         }
9179
9180         /* To avoid client trying to guess when to poll again for information we
9181          * calculate conn info age as random value between min/max set in hdev.
9182          */
9183         conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
9184                                                  hdev->conn_info_max_age - 1);
9185
9186         /* Query controller to refresh cached values if they are too old or were
9187          * never read.
9188          */
9189         if (time_after(jiffies, conn->conn_info_timestamp +
9190                        msecs_to_jiffies(conn_info_age)) ||
9191             !conn->conn_info_timestamp) {
9192                 struct mgmt_pending_cmd *cmd;
9193
9194                 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
9195                                        len);
9196                 if (!cmd) {
9197                         err = -ENOMEM;
9198                 } else {
9199                         err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
9200                                                  cmd, get_conn_info_complete);
9201                 }
9202
9203                 if (err < 0) {
9204                         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9205                                           MGMT_STATUS_FAILED, &rp, sizeof(rp));
9206
9207                         if (cmd)
9208                                 mgmt_pending_free(cmd);
9209
9210                         goto unlock;
9211                 }
9212
9213                 conn->conn_info_timestamp = jiffies;
9214         } else {
9215                 /* Cache is valid, just reply with values cached in hci_conn */
9216                 rp.rssi = conn->rssi;
9217                 rp.tx_power = conn->tx_power;
9218                 rp.max_tx_power = conn->max_tx_power;
9219
9220                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9221                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9222         }
9223
9224 unlock:
9225         hci_dev_unlock(hdev);
9226         return err;
9227 }
9228
9229 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
9230 {
9231         struct mgmt_pending_cmd *cmd = data;
9232         struct mgmt_cp_get_clock_info *cp = cmd->param;
9233         struct mgmt_rp_get_clock_info rp;
9234         struct hci_conn *conn = cmd->user_data;
9235         u8 status = mgmt_status(err);
9236
9237         bt_dev_dbg(hdev, "err %d", err);
9238
9239         memset(&rp, 0, sizeof(rp));
9240         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
9241         rp.addr.type = cp->addr.type;
9242
9243         if (err)
9244                 goto complete;
9245
9246         rp.local_clock = cpu_to_le32(hdev->clock);
9247
9248         if (conn) {
9249                 rp.piconet_clock = cpu_to_le32(conn->clock);
9250                 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
9251         }
9252
9253 complete:
9254         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
9255                           sizeof(rp));
9256
9257         mgmt_pending_free(cmd);
9258 }
9259
9260 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
9261 {
9262         struct mgmt_pending_cmd *cmd = data;
9263         struct mgmt_cp_get_clock_info *cp = cmd->param;
9264         struct hci_cp_read_clock hci_cp;
9265         struct hci_conn *conn;
9266
9267         memset(&hci_cp, 0, sizeof(hci_cp));
9268         hci_read_clock_sync(hdev, &hci_cp);
9269
9270         /* Make sure connection still exists */
9271         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
9272         if (!conn || conn->state != BT_CONNECTED)
9273                 return MGMT_STATUS_NOT_CONNECTED;
9274
9275         cmd->user_data = conn;
9276         hci_cp.handle = cpu_to_le16(conn->handle);
9277         hci_cp.which = 0x01; /* Piconet clock */
9278
9279         return hci_read_clock_sync(hdev, &hci_cp);
9280 }
9281
9282 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
9283                                                                 u16 len)
9284 {
9285         struct mgmt_cp_get_clock_info *cp = data;
9286         struct mgmt_rp_get_clock_info rp;
9287         struct mgmt_pending_cmd *cmd;
9288         struct hci_conn *conn;
9289         int err;
9290
9291         bt_dev_dbg(hdev, "sock %p", sk);
9292
9293         memset(&rp, 0, sizeof(rp));
9294         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
9295         rp.addr.type = cp->addr.type;
9296
9297         if (cp->addr.type != BDADDR_BREDR)
9298                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
9299                                          MGMT_STATUS_INVALID_PARAMS,
9300                                          &rp, sizeof(rp));
9301
9302         hci_dev_lock(hdev);
9303
9304         if (!hdev_is_powered(hdev)) {
9305                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
9306                                         MGMT_STATUS_NOT_POWERED, &rp,
9307                                         sizeof(rp));
9308                 goto unlock;
9309         }
9310
9311         if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
9312                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
9313                                                &cp->addr.bdaddr);
9314                 if (!conn || conn->state != BT_CONNECTED) {
9315                         err = mgmt_cmd_complete(sk, hdev->id,
9316                                                 MGMT_OP_GET_CLOCK_INFO,
9317                                                 MGMT_STATUS_NOT_CONNECTED,
9318                                                 &rp, sizeof(rp));
9319                         goto unlock;
9320                 }
9321         } else {
9322                 conn = NULL;
9323         }
9324
9325         cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
9326         if (!cmd)
9327                 err = -ENOMEM;
9328         else
9329                 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
9330                                          get_clock_info_complete);
9331
9332         if (err < 0) {
9333                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
9334                                         MGMT_STATUS_FAILED, &rp, sizeof(rp));
9335
9336                 if (cmd)
9337                         mgmt_pending_free(cmd);
9338         }
9339
9340
9341 unlock:
9342         hci_dev_unlock(hdev);
9343         return err;
9344 }
9345
9346 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
9347 {
9348         struct hci_conn *conn;
9349
9350         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
9351         if (!conn)
9352                 return false;
9353
9354         if (conn->dst_type != type)
9355                 return false;
9356
9357         if (conn->state != BT_CONNECTED)
9358                 return false;
9359
9360         return true;
9361 }
9362
9363 /* This function requires the caller holds hdev->lock */
9364 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
9365                                u8 addr_type, u8 auto_connect)
9366 {
9367         struct hci_conn_params *params;
9368
9369         params = hci_conn_params_add(hdev, addr, addr_type);
9370         if (!params)
9371                 return -EIO;
9372
9373         if (params->auto_connect == auto_connect)
9374                 return 0;
9375
9376         hci_pend_le_list_del_init(params);
9377
9378         switch (auto_connect) {
9379         case HCI_AUTO_CONN_DISABLED:
9380         case HCI_AUTO_CONN_LINK_LOSS:
9381                 /* If auto connect is being disabled when we're trying to
9382                  * connect to device, keep connecting.
9383                  */
9384                 if (params->explicit_connect)
9385                         hci_pend_le_list_add(params, &hdev->pend_le_conns);
9386                 break;
9387         case HCI_AUTO_CONN_REPORT:
9388                 if (params->explicit_connect)
9389                         hci_pend_le_list_add(params, &hdev->pend_le_conns);
9390                 else
9391                         hci_pend_le_list_add(params, &hdev->pend_le_reports);
9392                 break;
9393         case HCI_AUTO_CONN_DIRECT:
9394         case HCI_AUTO_CONN_ALWAYS:
9395                 if (!is_connected(hdev, addr, addr_type))
9396                         hci_pend_le_list_add(params, &hdev->pend_le_conns);
9397                 break;
9398         }
9399
9400         params->auto_connect = auto_connect;
9401
9402         bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
9403                    addr, addr_type, auto_connect);
9404
9405         return 0;
9406 }
9407
9408 static void device_added(struct sock *sk, struct hci_dev *hdev,
9409                          bdaddr_t *bdaddr, u8 type, u8 action)
9410 {
9411         struct mgmt_ev_device_added ev;
9412
9413         bacpy(&ev.addr.bdaddr, bdaddr);
9414         ev.addr.type = type;
9415         ev.action = action;
9416
9417         mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
9418 }
9419
9420 static int add_device_sync(struct hci_dev *hdev, void *data)
9421 {
9422         return hci_update_passive_scan_sync(hdev);
9423 }
9424
9425 static int add_device(struct sock *sk, struct hci_dev *hdev,
9426                       void *data, u16 len)
9427 {
9428         struct mgmt_cp_add_device *cp = data;
9429         u8 auto_conn, addr_type;
9430         struct hci_conn_params *params;
9431         int err;
9432         u32 current_flags = 0;
9433         u32 supported_flags;
9434
9435         bt_dev_dbg(hdev, "sock %p", sk);
9436
9437         if (!bdaddr_type_is_valid(cp->addr.type) ||
9438             !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
9439                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9440                                          MGMT_STATUS_INVALID_PARAMS,
9441                                          &cp->addr, sizeof(cp->addr));
9442
9443         if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
9444                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9445                                          MGMT_STATUS_INVALID_PARAMS,
9446                                          &cp->addr, sizeof(cp->addr));
9447
9448         hci_dev_lock(hdev);
9449
9450         if (cp->addr.type == BDADDR_BREDR) {
9451                 /* Only incoming connections action is supported for now */
9452                 if (cp->action != 0x01) {
9453                         err = mgmt_cmd_complete(sk, hdev->id,
9454                                                 MGMT_OP_ADD_DEVICE,
9455                                                 MGMT_STATUS_INVALID_PARAMS,
9456                                                 &cp->addr, sizeof(cp->addr));
9457                         goto unlock;
9458                 }
9459
9460                 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
9461                                                      &cp->addr.bdaddr,
9462                                                      cp->addr.type, 0);
9463                 if (err)
9464                         goto unlock;
9465
9466                 hci_update_scan(hdev);
9467
9468                 goto added;
9469         }
9470
9471         addr_type = le_addr_type(cp->addr.type);
9472
9473         if (cp->action == 0x02)
9474                 auto_conn = HCI_AUTO_CONN_ALWAYS;
9475         else if (cp->action == 0x01)
9476                 auto_conn = HCI_AUTO_CONN_DIRECT;
9477         else
9478                 auto_conn = HCI_AUTO_CONN_REPORT;
9479
9480         /* Kernel internally uses conn_params with resolvable private
9481          * address, but Add Device allows only identity addresses.
9482          * Make sure it is enforced before calling
9483          * hci_conn_params_lookup.
9484          */
9485         if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
9486                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9487                                         MGMT_STATUS_INVALID_PARAMS,
9488                                         &cp->addr, sizeof(cp->addr));
9489                 goto unlock;
9490         }
9491
9492         /* If the connection parameters don't exist for this device,
9493          * they will be created and configured with defaults.
9494          */
9495         if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
9496                                 auto_conn) < 0) {
9497                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9498                                         MGMT_STATUS_FAILED, &cp->addr,
9499                                         sizeof(cp->addr));
9500                 goto unlock;
9501         } else {
9502                 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
9503                                                 addr_type);
9504                 if (params)
9505                         current_flags = params->flags;
9506         }
9507
9508         err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
9509         if (err < 0)
9510                 goto unlock;
9511
9512 added:
9513         device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
9514         supported_flags = hdev->conn_flags;
9515         device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
9516                              supported_flags, current_flags);
9517
9518         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9519                                 MGMT_STATUS_SUCCESS, &cp->addr,
9520                                 sizeof(cp->addr));
9521
9522 unlock:
9523         hci_dev_unlock(hdev);
9524         return err;
9525 }
9526
9527 static void device_removed(struct sock *sk, struct hci_dev *hdev,
9528                            bdaddr_t *bdaddr, u8 type)
9529 {
9530         struct mgmt_ev_device_removed ev;
9531
9532         bacpy(&ev.addr.bdaddr, bdaddr);
9533         ev.addr.type = type;
9534
9535         mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
9536 }
9537
9538 static int remove_device_sync(struct hci_dev *hdev, void *data)
9539 {
9540         return hci_update_passive_scan_sync(hdev);
9541 }
9542
9543 static int remove_device(struct sock *sk, struct hci_dev *hdev,
9544                          void *data, u16 len)
9545 {
9546         struct mgmt_cp_remove_device *cp = data;
9547         int err;
9548
9549         bt_dev_dbg(hdev, "sock %p", sk);
9550
9551         hci_dev_lock(hdev);
9552
9553         if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
9554                 struct hci_conn_params *params;
9555                 u8 addr_type;
9556
9557                 if (!bdaddr_type_is_valid(cp->addr.type)) {
9558                         err = mgmt_cmd_complete(sk, hdev->id,
9559                                                 MGMT_OP_REMOVE_DEVICE,
9560                                                 MGMT_STATUS_INVALID_PARAMS,
9561                                                 &cp->addr, sizeof(cp->addr));
9562                         goto unlock;
9563                 }
9564
9565                 if (cp->addr.type == BDADDR_BREDR) {
9566                         err = hci_bdaddr_list_del(&hdev->accept_list,
9567                                                   &cp->addr.bdaddr,
9568                                                   cp->addr.type);
9569                         if (err) {
9570                                 err = mgmt_cmd_complete(sk, hdev->id,
9571                                                         MGMT_OP_REMOVE_DEVICE,
9572                                                         MGMT_STATUS_INVALID_PARAMS,
9573                                                         &cp->addr,
9574                                                         sizeof(cp->addr));
9575                                 goto unlock;
9576                         }
9577
9578                         hci_update_scan(hdev);
9579
9580                         device_removed(sk, hdev, &cp->addr.bdaddr,
9581                                        cp->addr.type);
9582                         goto complete;
9583                 }
9584
9585                 addr_type = le_addr_type(cp->addr.type);
9586
9587                 /* Kernel internally uses conn_params with resolvable private
9588                  * address, but Remove Device allows only identity addresses.
9589                  * Make sure it is enforced before calling
9590                  * hci_conn_params_lookup.
9591                  */
9592                 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
9593                         err = mgmt_cmd_complete(sk, hdev->id,
9594                                                 MGMT_OP_REMOVE_DEVICE,
9595                                                 MGMT_STATUS_INVALID_PARAMS,
9596                                                 &cp->addr, sizeof(cp->addr));
9597                         goto unlock;
9598                 }
9599
9600                 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
9601                                                 addr_type);
9602                 if (!params) {
9603                         err = mgmt_cmd_complete(sk, hdev->id,
9604                                                 MGMT_OP_REMOVE_DEVICE,
9605                                                 MGMT_STATUS_INVALID_PARAMS,
9606                                                 &cp->addr, sizeof(cp->addr));
9607                         goto unlock;
9608                 }
9609
9610                 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
9611                     params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
9612                         err = mgmt_cmd_complete(sk, hdev->id,
9613                                                 MGMT_OP_REMOVE_DEVICE,
9614                                                 MGMT_STATUS_INVALID_PARAMS,
9615                                                 &cp->addr, sizeof(cp->addr));
9616                         goto unlock;
9617                 }
9618
9619                 hci_conn_params_free(params);
9620
9621                 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
9622         } else {
9623                 struct hci_conn_params *p, *tmp;
9624                 struct bdaddr_list *b, *btmp;
9625
9626                 if (cp->addr.type) {
9627                         err = mgmt_cmd_complete(sk, hdev->id,
9628                                                 MGMT_OP_REMOVE_DEVICE,
9629                                                 MGMT_STATUS_INVALID_PARAMS,
9630                                                 &cp->addr, sizeof(cp->addr));
9631                         goto unlock;
9632                 }
9633
9634                 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
9635                         device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
9636                         list_del(&b->list);
9637                         kfree(b);
9638                 }
9639
9640                 hci_update_scan(hdev);
9641
9642                 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
9643                         if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
9644                                 continue;
9645                         device_removed(sk, hdev, &p->addr, p->addr_type);
9646                         if (p->explicit_connect) {
9647                                 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
9648                                 continue;
9649                         }
9650                         hci_conn_params_free(p);
9651                 }
9652
9653                 bt_dev_dbg(hdev, "All LE connection parameters were removed");
9654         }
9655
9656         hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
9657
9658 complete:
9659         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
9660                                 MGMT_STATUS_SUCCESS, &cp->addr,
9661                                 sizeof(cp->addr));
9662 unlock:
9663         hci_dev_unlock(hdev);
9664         return err;
9665 }
9666
9667 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
9668                            u16 len)
9669 {
9670         struct mgmt_cp_load_conn_param *cp = data;
9671         const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
9672                                      sizeof(struct mgmt_conn_param));
9673         u16 param_count, expected_len;
9674         int i;
9675
9676         if (!lmp_le_capable(hdev))
9677                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9678                                        MGMT_STATUS_NOT_SUPPORTED);
9679
9680         param_count = __le16_to_cpu(cp->param_count);
9681         if (param_count > max_param_count) {
9682                 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
9683                            param_count);
9684                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9685                                        MGMT_STATUS_INVALID_PARAMS);
9686         }
9687
9688         expected_len = struct_size(cp, params, param_count);
9689         if (expected_len != len) {
9690                 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
9691                            expected_len, len);
9692                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9693                                        MGMT_STATUS_INVALID_PARAMS);
9694         }
9695
9696         bt_dev_dbg(hdev, "param_count %u", param_count);
9697
9698         hci_dev_lock(hdev);
9699
9700         hci_conn_params_clear_disabled(hdev);
9701
9702         for (i = 0; i < param_count; i++) {
9703                 struct mgmt_conn_param *param = &cp->params[i];
9704                 struct hci_conn_params *hci_param;
9705                 u16 min, max, latency, timeout;
9706                 u8 addr_type;
9707
9708                 bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
9709                            param->addr.type);
9710
9711                 if (param->addr.type == BDADDR_LE_PUBLIC) {
9712                         addr_type = ADDR_LE_DEV_PUBLIC;
9713                 } else if (param->addr.type == BDADDR_LE_RANDOM) {
9714                         addr_type = ADDR_LE_DEV_RANDOM;
9715                 } else {
9716                         bt_dev_err(hdev, "ignoring invalid connection parameters");
9717                         continue;
9718                 }
9719
9720                 min = le16_to_cpu(param->min_interval);
9721                 max = le16_to_cpu(param->max_interval);
9722                 latency = le16_to_cpu(param->latency);
9723                 timeout = le16_to_cpu(param->timeout);
9724
9725                 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
9726                            min, max, latency, timeout);
9727
9728                 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
9729                         bt_dev_err(hdev, "ignoring invalid connection parameters");
9730                         continue;
9731                 }
9732
9733                 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
9734                                                 addr_type);
9735                 if (!hci_param) {
9736                         bt_dev_err(hdev, "failed to add connection parameters");
9737                         continue;
9738                 }
9739
9740                 hci_param->conn_min_interval = min;
9741                 hci_param->conn_max_interval = max;
9742                 hci_param->conn_latency = latency;
9743                 hci_param->supervision_timeout = timeout;
9744         }
9745
9746         hci_dev_unlock(hdev);
9747
9748         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
9749                                  NULL, 0);
9750 }
9751
9752 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
9753                                void *data, u16 len)
9754 {
9755         struct mgmt_cp_set_external_config *cp = data;
9756         bool changed;
9757         int err;
9758
9759         bt_dev_dbg(hdev, "sock %p", sk);
9760
9761         if (hdev_is_powered(hdev))
9762                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9763                                        MGMT_STATUS_REJECTED);
9764
9765         if (cp->config != 0x00 && cp->config != 0x01)
9766                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9767                                          MGMT_STATUS_INVALID_PARAMS);
9768
9769         if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
9770                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9771                                        MGMT_STATUS_NOT_SUPPORTED);
9772
9773         hci_dev_lock(hdev);
9774
9775         if (cp->config)
9776                 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
9777         else
9778                 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
9779
9780         err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
9781         if (err < 0)
9782                 goto unlock;
9783
9784         if (!changed)
9785                 goto unlock;
9786
9787         err = new_options(hdev, sk);
9788
9789         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
9790                 mgmt_index_removed(hdev);
9791
9792                 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
9793                         hci_dev_set_flag(hdev, HCI_CONFIG);
9794                         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
9795
9796                         queue_work(hdev->req_workqueue, &hdev->power_on);
9797                 } else {
9798                         set_bit(HCI_RAW, &hdev->flags);
9799                         mgmt_index_added(hdev);
9800                 }
9801         }
9802
9803 unlock:
9804         hci_dev_unlock(hdev);
9805         return err;
9806 }
9807
9808 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
9809                               void *data, u16 len)
9810 {
9811         struct mgmt_cp_set_public_address *cp = data;
9812         bool changed;
9813         int err;
9814
9815         bt_dev_dbg(hdev, "sock %p", sk);
9816
9817         if (hdev_is_powered(hdev))
9818                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9819                                        MGMT_STATUS_REJECTED);
9820
9821         if (!bacmp(&cp->bdaddr, BDADDR_ANY))
9822                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9823                                        MGMT_STATUS_INVALID_PARAMS);
9824
9825         if (!hdev->set_bdaddr)
9826                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9827                                        MGMT_STATUS_NOT_SUPPORTED);
9828
9829         hci_dev_lock(hdev);
9830
9831         changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
9832         bacpy(&hdev->public_addr, &cp->bdaddr);
9833
9834         err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
9835         if (err < 0)
9836                 goto unlock;
9837
9838         if (!changed)
9839                 goto unlock;
9840
9841         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
9842                 err = new_options(hdev, sk);
9843
9844         if (is_configured(hdev)) {
9845                 mgmt_index_removed(hdev);
9846
9847                 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
9848
9849                 hci_dev_set_flag(hdev, HCI_CONFIG);
9850                 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
9851
9852                 queue_work(hdev->req_workqueue, &hdev->power_on);
9853         }
9854
9855 unlock:
9856         hci_dev_unlock(hdev);
9857         return err;
9858 }
9859
9860 #ifdef TIZEN_BT
9861 int mgmt_device_name_update(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name,
9862                             u8 name_len)
9863 {
9864         char buf[512];
9865         struct mgmt_ev_device_name_update *ev = (void *)buf;
9866         u16 eir_len = 0;
9867
9868         if (name_len <= 0)
9869                 return -EINVAL;
9870
9871         bacpy(&ev->addr.bdaddr, bdaddr);
9872         ev->addr.type = BDADDR_BREDR;
9873
9874         eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9875                                   name_len);
9876
9877         ev->eir_len = cpu_to_le16(eir_len);
9878
9879         return mgmt_event(MGMT_EV_DEVICE_NAME_UPDATE, hdev, buf,
9880                           sizeof(*ev) + eir_len, NULL);
9881 }
9882
9883 int mgmt_le_conn_update_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9884                                u8 link_type, u8 addr_type, u8 status)
9885 {
9886         struct mgmt_ev_conn_update_failed ev;
9887
9888         bacpy(&ev.addr.bdaddr, bdaddr);
9889         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9890         ev.status = status;
9891
9892         return mgmt_event(MGMT_EV_CONN_UPDATE_FAILED, hdev,
9893                                 &ev, sizeof(ev), NULL);
9894 }
9895
9896 int mgmt_le_conn_updated(struct hci_dev *hdev, bdaddr_t *bdaddr,
9897                          u8 link_type, u8 addr_type, u16 conn_interval,
9898                          u16 conn_latency, u16 supervision_timeout)
9899 {
9900         struct mgmt_ev_conn_updated ev;
9901
9902         bacpy(&ev.addr.bdaddr, bdaddr);
9903         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9904         ev.conn_interval = cpu_to_le16(conn_interval);
9905         ev.conn_latency = cpu_to_le16(conn_latency);
9906         ev.supervision_timeout = cpu_to_le16(supervision_timeout);
9907
9908         return mgmt_event(MGMT_EV_CONN_UPDATED, hdev,
9909                                 &ev, sizeof(ev), NULL);
9910 }
9911
9912 /* le device found event - Pass adv type */
9913 void mgmt_le_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9914                 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir,
9915                 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, u8 adv_type)
9916 {
9917         char buf[512];
9918         struct mgmt_ev_le_device_found *ev = (void *)buf;
9919         size_t ev_size;
9920
9921         if (!hci_discovery_active(hdev) && !hci_le_discovery_active(hdev))
9922                 return;
9923
9924         /* Make sure that the buffer is big enough. The 5 extra bytes
9925          * are for the potential CoD field.
9926          */
9927         if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9928                 return;
9929
9930         memset(buf, 0, sizeof(buf));
9931
9932         bacpy(&ev->addr.bdaddr, bdaddr);
9933         ev->addr.type = link_to_bdaddr(link_type, addr_type);
9934         ev->rssi = rssi;
9935         ev->flags = cpu_to_le32(flags);
9936         ev->adv_type = adv_type;
9937
9938         if (eir_len > 0)
9939                 memcpy(ev->eir, eir, eir_len);
9940
9941         if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, NULL))
9942                 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9943                                           dev_class, 3);
9944
9945         if (scan_rsp_len > 0)
9946                 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9947
9948         ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9949         ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9950
9951         mgmt_event(MGMT_EV_LE_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9952 }
9953 #endif
9954
9955 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
9956                                              int err)
9957 {
9958         const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
9959         struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
9960         u8 *h192, *r192, *h256, *r256;
9961         struct mgmt_pending_cmd *cmd = data;
9962         struct sk_buff *skb = cmd->skb;
9963         u8 status = mgmt_status(err);
9964         u16 eir_len;
9965
9966         if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
9967                 return;
9968
9969         if (!status) {
9970                 if (!skb)
9971                         status = MGMT_STATUS_FAILED;
9972                 else if (IS_ERR(skb))
9973                         status = mgmt_status(PTR_ERR(skb));
9974                 else
9975                         status = mgmt_status(skb->data[0]);
9976         }
9977
9978         bt_dev_dbg(hdev, "status %u", status);
9979
9980         mgmt_cp = cmd->param;
9981
9982         if (status) {
9983                 status = mgmt_status(status);
9984                 eir_len = 0;
9985
9986                 h192 = NULL;
9987                 r192 = NULL;
9988                 h256 = NULL;
9989                 r256 = NULL;
9990         } else if (!bredr_sc_enabled(hdev)) {
9991                 struct hci_rp_read_local_oob_data *rp;
9992
9993                 if (skb->len != sizeof(*rp)) {
9994                         status = MGMT_STATUS_FAILED;
9995                         eir_len = 0;
9996                 } else {
9997                         status = MGMT_STATUS_SUCCESS;
9998                         rp = (void *)skb->data;
9999
10000                         eir_len = 5 + 18 + 18;
10001                         h192 = rp->hash;
10002                         r192 = rp->rand;
10003                         h256 = NULL;
10004                         r256 = NULL;
10005                 }
10006         } else {
10007                 struct hci_rp_read_local_oob_ext_data *rp;
10008
10009                 if (skb->len != sizeof(*rp)) {
10010                         status = MGMT_STATUS_FAILED;
10011                         eir_len = 0;
10012                 } else {
10013                         status = MGMT_STATUS_SUCCESS;
10014                         rp = (void *)skb->data;
10015
10016                         if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
10017                                 eir_len = 5 + 18 + 18;
10018                                 h192 = NULL;
10019                                 r192 = NULL;
10020                         } else {
10021                                 eir_len = 5 + 18 + 18 + 18 + 18;
10022                                 h192 = rp->hash192;
10023                                 r192 = rp->rand192;
10024                         }
10025
10026                         h256 = rp->hash256;
10027                         r256 = rp->rand256;
10028                 }
10029         }
10030
10031         mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
10032         if (!mgmt_rp)
10033                 goto done;
10034
10035         if (eir_len == 0)
10036                 goto send_rsp;
10037
10038         eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
10039                                   hdev->dev_class, 3);
10040
10041         if (h192 && r192) {
10042                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10043                                           EIR_SSP_HASH_C192, h192, 16);
10044                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10045                                           EIR_SSP_RAND_R192, r192, 16);
10046         }
10047
10048         if (h256 && r256) {
10049                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10050                                           EIR_SSP_HASH_C256, h256, 16);
10051                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10052                                           EIR_SSP_RAND_R256, r256, 16);
10053         }
10054
10055 send_rsp:
10056         mgmt_rp->type = mgmt_cp->type;
10057         mgmt_rp->eir_len = cpu_to_le16(eir_len);
10058
10059         err = mgmt_cmd_complete(cmd->sk, hdev->id,
10060                                 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
10061                                 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
10062         if (err < 0 || status)
10063                 goto done;
10064
10065         hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
10066
10067         err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
10068                                  mgmt_rp, sizeof(*mgmt_rp) + eir_len,
10069                                  HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
10070 done:
10071         if (skb && !IS_ERR(skb))
10072                 kfree_skb(skb);
10073
10074         kfree(mgmt_rp);
10075         mgmt_pending_remove(cmd);
10076 }
10077
10078 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
10079                                   struct mgmt_cp_read_local_oob_ext_data *cp)
10080 {
10081         struct mgmt_pending_cmd *cmd;
10082         int err;
10083
10084         cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
10085                                cp, sizeof(*cp));
10086         if (!cmd)
10087                 return -ENOMEM;
10088
10089         err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
10090                                  read_local_oob_ext_data_complete);
10091
10092         if (err < 0) {
10093                 mgmt_pending_remove(cmd);
10094                 return err;
10095         }
10096
10097         return 0;
10098 }
10099
10100 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
10101                                    void *data, u16 data_len)
10102 {
10103         struct mgmt_cp_read_local_oob_ext_data *cp = data;
10104         struct mgmt_rp_read_local_oob_ext_data *rp;
10105         size_t rp_len;
10106         u16 eir_len;
10107         u8 status, flags, role, addr[7], hash[16], rand[16];
10108         int err;
10109
10110         bt_dev_dbg(hdev, "sock %p", sk);
10111
10112         if (hdev_is_powered(hdev)) {
10113                 switch (cp->type) {
10114                 case BIT(BDADDR_BREDR):
10115                         status = mgmt_bredr_support(hdev);
10116                         if (status)
10117                                 eir_len = 0;
10118                         else
10119                                 eir_len = 5;
10120                         break;
10121                 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
10122                         status = mgmt_le_support(hdev);
10123                         if (status)
10124                                 eir_len = 0;
10125                         else
10126                                 eir_len = 9 + 3 + 18 + 18 + 3;
10127                         break;
10128                 default:
10129                         status = MGMT_STATUS_INVALID_PARAMS;
10130                         eir_len = 0;
10131                         break;
10132                 }
10133         } else {
10134                 status = MGMT_STATUS_NOT_POWERED;
10135                 eir_len = 0;
10136         }
10137
10138         rp_len = sizeof(*rp) + eir_len;
10139         rp = kmalloc(rp_len, GFP_ATOMIC);
10140         if (!rp)
10141                 return -ENOMEM;
10142
10143         if (!status && !lmp_ssp_capable(hdev)) {
10144                 status = MGMT_STATUS_NOT_SUPPORTED;
10145                 eir_len = 0;
10146         }
10147
10148         if (status)
10149                 goto complete;
10150
10151         hci_dev_lock(hdev);
10152
10153         eir_len = 0;
10154         switch (cp->type) {
10155         case BIT(BDADDR_BREDR):
10156                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
10157                         err = read_local_ssp_oob_req(hdev, sk, cp);
10158                         hci_dev_unlock(hdev);
10159                         if (!err)
10160                                 goto done;
10161
10162                         status = MGMT_STATUS_FAILED;
10163                         goto complete;
10164                 } else {
10165                         eir_len = eir_append_data(rp->eir, eir_len,
10166                                                   EIR_CLASS_OF_DEV,
10167                                                   hdev->dev_class, 3);
10168                 }
10169                 break;
10170         case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
10171                 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
10172                     smp_generate_oob(hdev, hash, rand) < 0) {
10173                         hci_dev_unlock(hdev);
10174                         status = MGMT_STATUS_FAILED;
10175                         goto complete;
10176                 }
10177
10178                 /* This should return the active RPA, but since the RPA
10179                  * is only programmed on demand, it is really hard to fill
10180                  * this in at the moment. For now disallow retrieving
10181                  * local out-of-band data when privacy is in use.
10182                  *
10183                  * Returning the identity address will not help here since
10184                  * pairing happens before the identity resolving key is
10185                  * known and thus the connection establishment happens
10186                  * based on the RPA and not the identity address.
10187                  */
10188                 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
10189                         hci_dev_unlock(hdev);
10190                         status = MGMT_STATUS_REJECTED;
10191                         goto complete;
10192                 }
10193
10194                 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
10195                    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
10196                    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
10197                     bacmp(&hdev->static_addr, BDADDR_ANY))) {
10198                         memcpy(addr, &hdev->static_addr, 6);
10199                         addr[6] = 0x01;
10200                 } else {
10201                         memcpy(addr, &hdev->bdaddr, 6);
10202                         addr[6] = 0x00;
10203                 }
10204
10205                 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
10206                                           addr, sizeof(addr));
10207
10208                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
10209                         role = 0x02;
10210                 else
10211                         role = 0x01;
10212
10213                 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
10214                                           &role, sizeof(role));
10215
10216                 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
10217                         eir_len = eir_append_data(rp->eir, eir_len,
10218                                                   EIR_LE_SC_CONFIRM,
10219                                                   hash, sizeof(hash));
10220
10221                         eir_len = eir_append_data(rp->eir, eir_len,
10222                                                   EIR_LE_SC_RANDOM,
10223                                                   rand, sizeof(rand));
10224                 }
10225
10226                 flags = mgmt_get_adv_discov_flags(hdev);
10227
10228                 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
10229                         flags |= LE_AD_NO_BREDR;
10230
10231                 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
10232                                           &flags, sizeof(flags));
10233                 break;
10234         }
10235
10236         hci_dev_unlock(hdev);
10237
10238         hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
10239
10240         status = MGMT_STATUS_SUCCESS;
10241
10242 complete:
10243         rp->type = cp->type;
10244         rp->eir_len = cpu_to_le16(eir_len);
10245
10246         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
10247                                 status, rp, sizeof(*rp) + eir_len);
10248         if (err < 0 || status)
10249                 goto done;
10250
10251         err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
10252                                  rp, sizeof(*rp) + eir_len,
10253                                  HCI_MGMT_OOB_DATA_EVENTS, sk);
10254
10255 done:
10256         kfree(rp);
10257
10258         return err;
10259 }
10260
10261 static u32 get_supported_adv_flags(struct hci_dev *hdev)
10262 {
10263         u32 flags = 0;
10264
10265         flags |= MGMT_ADV_FLAG_CONNECTABLE;
10266         flags |= MGMT_ADV_FLAG_DISCOV;
10267         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
10268         flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
10269         flags |= MGMT_ADV_FLAG_APPEARANCE;
10270         flags |= MGMT_ADV_FLAG_LOCAL_NAME;
10271         flags |= MGMT_ADV_PARAM_DURATION;
10272         flags |= MGMT_ADV_PARAM_TIMEOUT;
10273         flags |= MGMT_ADV_PARAM_INTERVALS;
10274         flags |= MGMT_ADV_PARAM_TX_POWER;
10275         flags |= MGMT_ADV_PARAM_SCAN_RSP;
10276
10277         /* In extended adv TX_POWER returned from Set Adv Param
10278          * will be always valid.
10279          */
10280         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
10281                 flags |= MGMT_ADV_FLAG_TX_POWER;
10282
10283         if (ext_adv_capable(hdev)) {
10284                 flags |= MGMT_ADV_FLAG_SEC_1M;
10285                 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
10286                 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
10287
10288                 if (le_2m_capable(hdev))
10289                         flags |= MGMT_ADV_FLAG_SEC_2M;
10290
10291                 if (le_coded_capable(hdev))
10292                         flags |= MGMT_ADV_FLAG_SEC_CODED;
10293         }
10294
10295         return flags;
10296 }
10297
10298 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
10299                              void *data, u16 data_len)
10300 {
10301         struct mgmt_rp_read_adv_features *rp;
10302         size_t rp_len;
10303         int err;
10304         struct adv_info *adv_instance;
10305         u32 supported_flags;
10306         u8 *instance;
10307
10308         bt_dev_dbg(hdev, "sock %p", sk);
10309
10310         if (!lmp_le_capable(hdev))
10311                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
10312                                        MGMT_STATUS_REJECTED);
10313
10314         hci_dev_lock(hdev);
10315
10316         rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
10317         rp = kmalloc(rp_len, GFP_ATOMIC);
10318         if (!rp) {
10319                 hci_dev_unlock(hdev);
10320                 return -ENOMEM;
10321         }
10322
10323         supported_flags = get_supported_adv_flags(hdev);
10324
10325         rp->supported_flags = cpu_to_le32(supported_flags);
10326         rp->max_adv_data_len = max_adv_len(hdev);
10327         rp->max_scan_rsp_len = max_adv_len(hdev);
10328         rp->max_instances = hdev->le_num_of_adv_sets;
10329         rp->num_instances = hdev->adv_instance_cnt;
10330
10331         instance = rp->instance;
10332         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
10333                 /* Only instances 1-le_num_of_adv_sets are externally visible */
10334                 if (adv_instance->instance <= hdev->adv_instance_cnt) {
10335                         *instance = adv_instance->instance;
10336                         instance++;
10337                 } else {
10338                         rp->num_instances--;
10339                         rp_len--;
10340                 }
10341         }
10342
10343         hci_dev_unlock(hdev);
10344
10345         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
10346                                 MGMT_STATUS_SUCCESS, rp, rp_len);
10347
10348         kfree(rp);
10349
10350         return err;
10351 }
10352
10353 static u8 calculate_name_len(struct hci_dev *hdev)
10354 {
10355         u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
10356
10357         return eir_append_local_name(hdev, buf, 0);
10358 }
10359
10360 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
10361                            bool is_adv_data)
10362 {
10363         u8 max_len = max_adv_len(hdev);
10364
10365         if (is_adv_data) {
10366                 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
10367                                  MGMT_ADV_FLAG_LIMITED_DISCOV |
10368                                  MGMT_ADV_FLAG_MANAGED_FLAGS))
10369                         max_len -= 3;
10370
10371                 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
10372                         max_len -= 3;
10373         } else {
10374                 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
10375                         max_len -= calculate_name_len(hdev);
10376
10377                 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
10378                         max_len -= 4;
10379         }
10380
10381         return max_len;
10382 }
10383
10384 static bool flags_managed(u32 adv_flags)
10385 {
10386         return adv_flags & (MGMT_ADV_FLAG_DISCOV |
10387                             MGMT_ADV_FLAG_LIMITED_DISCOV |
10388                             MGMT_ADV_FLAG_MANAGED_FLAGS);
10389 }
10390
10391 static bool tx_power_managed(u32 adv_flags)
10392 {
10393         return adv_flags & MGMT_ADV_FLAG_TX_POWER;
10394 }
10395
10396 static bool name_managed(u32 adv_flags)
10397 {
10398         return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
10399 }
10400
10401 static bool appearance_managed(u32 adv_flags)
10402 {
10403         return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
10404 }
10405
10406 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
10407                               u8 len, bool is_adv_data)
10408 {
10409         int i, cur_len;
10410         u8 max_len;
10411
10412         max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
10413
10414         if (len > max_len)
10415                 return false;
10416
10417         /* Make sure that the data is correctly formatted. */
10418         for (i = 0; i < len; i += (cur_len + 1)) {
10419                 cur_len = data[i];
10420
10421                 if (!cur_len)
10422                         continue;
10423
10424                 if (data[i + 1] == EIR_FLAGS &&
10425                     (!is_adv_data || flags_managed(adv_flags)))
10426                         return false;
10427
10428                 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
10429                         return false;
10430
10431                 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
10432                         return false;
10433
10434                 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
10435                         return false;
10436
10437                 if (data[i + 1] == EIR_APPEARANCE &&
10438                     appearance_managed(adv_flags))
10439                         return false;
10440
10441                 /* If the current field length would exceed the total data
10442                  * length, then it's invalid.
10443                  */
10444                 if (i + cur_len >= len)
10445                         return false;
10446         }
10447
10448         return true;
10449 }
10450
10451 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
10452 {
10453         u32 supported_flags, phy_flags;
10454
10455         /* The current implementation only supports a subset of the specified
10456          * flags. Also need to check mutual exclusiveness of sec flags.
10457          */
10458         supported_flags = get_supported_adv_flags(hdev);
10459         phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
10460         if (adv_flags & ~supported_flags ||
10461             ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
10462                 return false;
10463
10464         return true;
10465 }
10466
10467 static bool adv_busy(struct hci_dev *hdev)
10468 {
10469         return pending_find(MGMT_OP_SET_LE, hdev);
10470 }
10471
10472 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
10473                              int err)
10474 {
10475         struct adv_info *adv, *n;
10476
10477         bt_dev_dbg(hdev, "err %d", err);
10478
10479         hci_dev_lock(hdev);
10480
10481         list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
10482                 u8 instance;
10483
10484                 if (!adv->pending)
10485                         continue;
10486
10487                 if (!err) {
10488                         adv->pending = false;
10489                         continue;
10490                 }
10491
10492                 instance = adv->instance;
10493
10494                 if (hdev->cur_adv_instance == instance)
10495                         cancel_adv_timeout(hdev);
10496
10497                 hci_remove_adv_instance(hdev, instance);
10498                 mgmt_advertising_removed(sk, hdev, instance);
10499         }
10500
10501         hci_dev_unlock(hdev);
10502 }
10503
10504 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
10505 {
10506         struct mgmt_pending_cmd *cmd = data;
10507         struct mgmt_cp_add_advertising *cp = cmd->param;
10508         struct mgmt_rp_add_advertising rp;
10509
10510         memset(&rp, 0, sizeof(rp));
10511
10512         rp.instance = cp->instance;
10513
10514         if (err)
10515                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
10516                                 mgmt_status(err));
10517         else
10518                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
10519                                   mgmt_status(err), &rp, sizeof(rp));
10520
10521         add_adv_complete(hdev, cmd->sk, cp->instance, err);
10522
10523         mgmt_pending_free(cmd);
10524 }
10525
10526 static int add_advertising_sync(struct hci_dev *hdev, void *data)
10527 {
10528         struct mgmt_pending_cmd *cmd = data;
10529         struct mgmt_cp_add_advertising *cp = cmd->param;
10530
10531         return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
10532 }
10533
10534 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
10535                            void *data, u16 data_len)
10536 {
10537         struct mgmt_cp_add_advertising *cp = data;
10538         struct mgmt_rp_add_advertising rp;
10539         u32 flags;
10540         u8 status;
10541         u16 timeout, duration;
10542         unsigned int prev_instance_cnt;
10543         u8 schedule_instance = 0;
10544         struct adv_info *adv, *next_instance;
10545         int err;
10546         struct mgmt_pending_cmd *cmd;
10547
10548         bt_dev_dbg(hdev, "sock %p", sk);
10549
10550         status = mgmt_le_support(hdev);
10551         if (status)
10552                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10553                                        status);
10554
10555         if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10556                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10557                                        MGMT_STATUS_INVALID_PARAMS);
10558
10559         if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
10560                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10561                                        MGMT_STATUS_INVALID_PARAMS);
10562
10563         flags = __le32_to_cpu(cp->flags);
10564         timeout = __le16_to_cpu(cp->timeout);
10565         duration = __le16_to_cpu(cp->duration);
10566
10567         if (!requested_adv_flags_are_valid(hdev, flags))
10568                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10569                                        MGMT_STATUS_INVALID_PARAMS);
10570
10571         hci_dev_lock(hdev);
10572
10573         if (timeout && !hdev_is_powered(hdev)) {
10574                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10575                                       MGMT_STATUS_REJECTED);
10576                 goto unlock;
10577         }
10578
10579         if (adv_busy(hdev)) {
10580                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10581                                       MGMT_STATUS_BUSY);
10582                 goto unlock;
10583         }
10584
10585         if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
10586             !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
10587                                cp->scan_rsp_len, false)) {
10588                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10589                                       MGMT_STATUS_INVALID_PARAMS);
10590                 goto unlock;
10591         }
10592
10593         prev_instance_cnt = hdev->adv_instance_cnt;
10594
10595         adv = hci_add_adv_instance(hdev, cp->instance, flags,
10596                                    cp->adv_data_len, cp->data,
10597                                    cp->scan_rsp_len,
10598                                    cp->data + cp->adv_data_len,
10599                                    timeout, duration,
10600                                    HCI_ADV_TX_POWER_NO_PREFERENCE,
10601                                    hdev->le_adv_min_interval,
10602                                    hdev->le_adv_max_interval, 0);
10603         if (IS_ERR(adv)) {
10604                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10605                                       MGMT_STATUS_FAILED);
10606                 goto unlock;
10607         }
10608
10609         /* Only trigger an advertising added event if a new instance was
10610          * actually added.
10611          */
10612         if (hdev->adv_instance_cnt > prev_instance_cnt)
10613                 mgmt_advertising_added(sk, hdev, cp->instance);
10614
10615         if (hdev->cur_adv_instance == cp->instance) {
10616                 /* If the currently advertised instance is being changed then
10617                  * cancel the current advertising and schedule the next
10618                  * instance. If there is only one instance then the overridden
10619                  * advertising data will be visible right away.
10620                  */
10621                 cancel_adv_timeout(hdev);
10622
10623                 next_instance = hci_get_next_instance(hdev, cp->instance);
10624                 if (next_instance)
10625                         schedule_instance = next_instance->instance;
10626         } else if (!hdev->adv_instance_timeout) {
10627                 /* Immediately advertise the new instance if no other
10628                  * instance is currently being advertised.
10629                  */
10630                 schedule_instance = cp->instance;
10631         }
10632
10633         /* If the HCI_ADVERTISING flag is set or the device isn't powered or
10634          * there is no instance to be advertised then we have no HCI
10635          * communication to make. Simply return.
10636          */
10637         if (!hdev_is_powered(hdev) ||
10638             hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
10639             !schedule_instance) {
10640                 rp.instance = cp->instance;
10641                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10642                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10643                 goto unlock;
10644         }
10645
10646         /* We're good to go, update advertising data, parameters, and start
10647          * advertising.
10648          */
10649         cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
10650                                data_len);
10651         if (!cmd) {
10652                 err = -ENOMEM;
10653                 goto unlock;
10654         }
10655
10656         cp->instance = schedule_instance;
10657
10658         err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
10659                                  add_advertising_complete);
10660         if (err < 0)
10661                 mgmt_pending_free(cmd);
10662
10663 unlock:
10664         hci_dev_unlock(hdev);
10665
10666         return err;
10667 }
10668
10669 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
10670                                         int err)
10671 {
10672         struct mgmt_pending_cmd *cmd = data;
10673         struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
10674         struct mgmt_rp_add_ext_adv_params rp;
10675         struct adv_info *adv;
10676         u32 flags;
10677
10678         BT_DBG("%s", hdev->name);
10679
10680         hci_dev_lock(hdev);
10681
10682         adv = hci_find_adv_instance(hdev, cp->instance);
10683         if (!adv)
10684                 goto unlock;
10685
10686         rp.instance = cp->instance;
10687         rp.tx_power = adv->tx_power;
10688
10689         /* While we're at it, inform userspace of the available space for this
10690          * advertisement, given the flags that will be used.
10691          */
10692         flags = __le32_to_cpu(cp->flags);
10693         rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10694         rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10695
10696         if (err) {
10697                 /* If this advertisement was previously advertising and we
10698                  * failed to update it, we signal that it has been removed and
10699                  * delete its structure
10700                  */
10701                 if (!adv->pending)
10702                         mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
10703
10704                 hci_remove_adv_instance(hdev, cp->instance);
10705
10706                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
10707                                 mgmt_status(err));
10708         } else {
10709                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
10710                                   mgmt_status(err), &rp, sizeof(rp));
10711         }
10712
10713 unlock:
10714         if (cmd)
10715                 mgmt_pending_free(cmd);
10716
10717         hci_dev_unlock(hdev);
10718 }
10719
10720 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
10721 {
10722         struct mgmt_pending_cmd *cmd = data;
10723         struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
10724
10725         return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
10726 }
10727
10728 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
10729                               void *data, u16 data_len)
10730 {
10731         struct mgmt_cp_add_ext_adv_params *cp = data;
10732         struct mgmt_rp_add_ext_adv_params rp;
10733         struct mgmt_pending_cmd *cmd = NULL;
10734         struct adv_info *adv;
10735         u32 flags, min_interval, max_interval;
10736         u16 timeout, duration;
10737         u8 status;
10738         s8 tx_power;
10739         int err;
10740
10741         BT_DBG("%s", hdev->name);
10742
10743         status = mgmt_le_support(hdev);
10744         if (status)
10745                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10746                                        status);
10747
10748         if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10749                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10750                                        MGMT_STATUS_INVALID_PARAMS);
10751
10752         /* The purpose of breaking add_advertising into two separate MGMT calls
10753          * for params and data is to allow more parameters to be added to this
10754          * structure in the future. For this reason, we verify that we have the
10755          * bare minimum structure we know of when the interface was defined. Any
10756          * extra parameters we don't know about will be ignored in this request.
10757          */
10758         if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
10759                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10760                                        MGMT_STATUS_INVALID_PARAMS);
10761
10762         flags = __le32_to_cpu(cp->flags);
10763
10764         if (!requested_adv_flags_are_valid(hdev, flags))
10765                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10766                                        MGMT_STATUS_INVALID_PARAMS);
10767
10768         hci_dev_lock(hdev);
10769
10770         /* In new interface, we require that we are powered to register */
10771         if (!hdev_is_powered(hdev)) {
10772                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10773                                       MGMT_STATUS_REJECTED);
10774                 goto unlock;
10775         }
10776
10777         if (adv_busy(hdev)) {
10778                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10779                                       MGMT_STATUS_BUSY);
10780                 goto unlock;
10781         }
10782
10783         /* Parse defined parameters from request, use defaults otherwise */
10784         timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
10785                   __le16_to_cpu(cp->timeout) : 0;
10786
10787         duration = (flags & MGMT_ADV_PARAM_DURATION) ?
10788                    __le16_to_cpu(cp->duration) :
10789                    hdev->def_multi_adv_rotation_duration;
10790
10791         min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
10792                        __le32_to_cpu(cp->min_interval) :
10793                        hdev->le_adv_min_interval;
10794
10795         max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
10796                        __le32_to_cpu(cp->max_interval) :
10797                        hdev->le_adv_max_interval;
10798
10799         tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
10800                    cp->tx_power :
10801                    HCI_ADV_TX_POWER_NO_PREFERENCE;
10802
10803         /* Create advertising instance with no advertising or response data */
10804         adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
10805                                    timeout, duration, tx_power, min_interval,
10806                                    max_interval, 0);
10807
10808         if (IS_ERR(adv)) {
10809                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10810                                       MGMT_STATUS_FAILED);
10811                 goto unlock;
10812         }
10813
10814         /* Submit request for advertising params if ext adv available */
10815         if (ext_adv_capable(hdev)) {
10816                 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
10817                                        data, data_len);
10818                 if (!cmd) {
10819                         err = -ENOMEM;
10820                         hci_remove_adv_instance(hdev, cp->instance);
10821                         goto unlock;
10822                 }
10823
10824                 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
10825                                          add_ext_adv_params_complete);
10826                 if (err < 0)
10827                         mgmt_pending_free(cmd);
10828         } else {
10829                 rp.instance = cp->instance;
10830                 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
10831                 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10832                 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10833                 err = mgmt_cmd_complete(sk, hdev->id,
10834                                         MGMT_OP_ADD_EXT_ADV_PARAMS,
10835                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10836         }
10837
10838 unlock:
10839         hci_dev_unlock(hdev);
10840
10841         return err;
10842 }
10843
10844 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
10845 {
10846         struct mgmt_pending_cmd *cmd = data;
10847         struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
10848         struct mgmt_rp_add_advertising rp;
10849
10850         add_adv_complete(hdev, cmd->sk, cp->instance, err);
10851
10852         memset(&rp, 0, sizeof(rp));
10853
10854         rp.instance = cp->instance;
10855
10856         if (err)
10857                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
10858                                 mgmt_status(err));
10859         else
10860                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
10861                                   mgmt_status(err), &rp, sizeof(rp));
10862
10863         mgmt_pending_free(cmd);
10864 }
10865
10866 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
10867 {
10868         struct mgmt_pending_cmd *cmd = data;
10869         struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
10870         int err;
10871
10872         if (ext_adv_capable(hdev)) {
10873                 err = hci_update_adv_data_sync(hdev, cp->instance);
10874                 if (err)
10875                         return err;
10876
10877                 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
10878                 if (err)
10879                         return err;
10880
10881                 return hci_enable_ext_advertising_sync(hdev, cp->instance);
10882         }
10883
10884         return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
10885 }
10886
10887 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
10888                             u16 data_len)
10889 {
10890         struct mgmt_cp_add_ext_adv_data *cp = data;
10891         struct mgmt_rp_add_ext_adv_data rp;
10892         u8 schedule_instance = 0;
10893         struct adv_info *next_instance;
10894         struct adv_info *adv_instance;
10895         int err = 0;
10896         struct mgmt_pending_cmd *cmd;
10897
10898         BT_DBG("%s", hdev->name);
10899
10900         hci_dev_lock(hdev);
10901
10902         adv_instance = hci_find_adv_instance(hdev, cp->instance);
10903
10904         if (!adv_instance) {
10905                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10906                                       MGMT_STATUS_INVALID_PARAMS);
10907                 goto unlock;
10908         }
10909
10910         /* In new interface, we require that we are powered to register */
10911         if (!hdev_is_powered(hdev)) {
10912                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10913                                       MGMT_STATUS_REJECTED);
10914                 goto clear_new_instance;
10915         }
10916
10917         if (adv_busy(hdev)) {
10918                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10919                                       MGMT_STATUS_BUSY);
10920                 goto clear_new_instance;
10921         }
10922
10923         /* Validate new data */
10924         if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
10925                                cp->adv_data_len, true) ||
10926             !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
10927                                cp->adv_data_len, cp->scan_rsp_len, false)) {
10928                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10929                                       MGMT_STATUS_INVALID_PARAMS);
10930                 goto clear_new_instance;
10931         }
10932
10933         /* Set the data in the advertising instance */
10934         hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
10935                                   cp->data, cp->scan_rsp_len,
10936                                   cp->data + cp->adv_data_len);
10937
10938         /* If using software rotation, determine next instance to use */
10939         if (hdev->cur_adv_instance == cp->instance) {
10940                 /* If the currently advertised instance is being changed
10941                  * then cancel the current advertising and schedule the
10942                  * next instance. If there is only one instance then the
10943                  * overridden advertising data will be visible right
10944                  * away
10945                  */
10946                 cancel_adv_timeout(hdev);
10947
10948                 next_instance = hci_get_next_instance(hdev, cp->instance);
10949                 if (next_instance)
10950                         schedule_instance = next_instance->instance;
10951         } else if (!hdev->adv_instance_timeout) {
10952                 /* Immediately advertise the new instance if no other
10953                  * instance is currently being advertised.
10954                  */
10955                 schedule_instance = cp->instance;
10956         }
10957
10958         /* If the HCI_ADVERTISING flag is set or there is no instance to
10959          * be advertised then we have no HCI communication to make.
10960          * Simply return.
10961          */
10962         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
10963                 if (adv_instance->pending) {
10964                         mgmt_advertising_added(sk, hdev, cp->instance);
10965                         adv_instance->pending = false;
10966                 }
10967                 rp.instance = cp->instance;
10968                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10969                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10970                 goto unlock;
10971         }
10972
10973         cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
10974                                data_len);
10975         if (!cmd) {
10976                 err = -ENOMEM;
10977                 goto clear_new_instance;
10978         }
10979
10980         err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
10981                                  add_ext_adv_data_complete);
10982         if (err < 0) {
10983                 mgmt_pending_free(cmd);
10984                 goto clear_new_instance;
10985         }
10986
10987         /* We were successful in updating data, so trigger advertising_added
10988          * event if this is an instance that wasn't previously advertising. If
10989          * a failure occurs in the requests we initiated, we will remove the
10990          * instance again in add_advertising_complete
10991          */
10992         if (adv_instance->pending)
10993                 mgmt_advertising_added(sk, hdev, cp->instance);
10994
10995         goto unlock;
10996
10997 clear_new_instance:
10998         hci_remove_adv_instance(hdev, cp->instance);
10999
11000 unlock:
11001         hci_dev_unlock(hdev);
11002
11003         return err;
11004 }
11005
11006 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
11007                                         int err)
11008 {
11009         struct mgmt_pending_cmd *cmd = data;
11010         struct mgmt_cp_remove_advertising *cp = cmd->param;
11011         struct mgmt_rp_remove_advertising rp;
11012
11013         bt_dev_dbg(hdev, "err %d", err);
11014
11015         memset(&rp, 0, sizeof(rp));
11016         rp.instance = cp->instance;
11017
11018         if (err)
11019                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
11020                                 mgmt_status(err));
11021         else
11022                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
11023                                   MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
11024
11025         mgmt_pending_free(cmd);
11026 }
11027
11028 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
11029 {
11030         struct mgmt_pending_cmd *cmd = data;
11031         struct mgmt_cp_remove_advertising *cp = cmd->param;
11032         int err;
11033
11034         err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
11035         if (err)
11036                 return err;
11037
11038         if (list_empty(&hdev->adv_instances))
11039                 err = hci_disable_advertising_sync(hdev);
11040
11041         return err;
11042 }
11043
11044 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
11045                               void *data, u16 data_len)
11046 {
11047         struct mgmt_cp_remove_advertising *cp = data;
11048         struct mgmt_pending_cmd *cmd;
11049         int err;
11050
11051         bt_dev_dbg(hdev, "sock %p", sk);
11052
11053         hci_dev_lock(hdev);
11054
11055         if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
11056                 err = mgmt_cmd_status(sk, hdev->id,
11057                                       MGMT_OP_REMOVE_ADVERTISING,
11058                                       MGMT_STATUS_INVALID_PARAMS);
11059                 goto unlock;
11060         }
11061
11062         if (pending_find(MGMT_OP_SET_LE, hdev)) {
11063                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
11064                                       MGMT_STATUS_BUSY);
11065                 goto unlock;
11066         }
11067
11068         if (list_empty(&hdev->adv_instances)) {
11069                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
11070                                       MGMT_STATUS_INVALID_PARAMS);
11071                 goto unlock;
11072         }
11073
11074         cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
11075                                data_len);
11076         if (!cmd) {
11077                 err = -ENOMEM;
11078                 goto unlock;
11079         }
11080
11081         err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
11082                                  remove_advertising_complete);
11083         if (err < 0)
11084                 mgmt_pending_free(cmd);
11085
11086 unlock:
11087         hci_dev_unlock(hdev);
11088
11089         return err;
11090 }
11091
11092 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
11093                              void *data, u16 data_len)
11094 {
11095         struct mgmt_cp_get_adv_size_info *cp = data;
11096         struct mgmt_rp_get_adv_size_info rp;
11097         u32 flags, supported_flags;
11098
11099         bt_dev_dbg(hdev, "sock %p", sk);
11100
11101         if (!lmp_le_capable(hdev))
11102                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11103                                        MGMT_STATUS_REJECTED);
11104
11105         if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
11106                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11107                                        MGMT_STATUS_INVALID_PARAMS);
11108
11109         flags = __le32_to_cpu(cp->flags);
11110
11111         /* The current implementation only supports a subset of the specified
11112          * flags.
11113          */
11114         supported_flags = get_supported_adv_flags(hdev);
11115         if (flags & ~supported_flags)
11116                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11117                                        MGMT_STATUS_INVALID_PARAMS);
11118
11119         rp.instance = cp->instance;
11120         rp.flags = cp->flags;
11121         rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
11122         rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
11123
11124         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11125                                  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
11126 }
11127
11128 static const struct hci_mgmt_handler mgmt_handlers[] = {
11129         { NULL }, /* 0x0000 (no command) */
11130         { read_version,            MGMT_READ_VERSION_SIZE,
11131                                                 HCI_MGMT_NO_HDEV |
11132                                                 HCI_MGMT_UNTRUSTED },
11133         { read_commands,           MGMT_READ_COMMANDS_SIZE,
11134                                                 HCI_MGMT_NO_HDEV |
11135                                                 HCI_MGMT_UNTRUSTED },
11136         { read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
11137                                                 HCI_MGMT_NO_HDEV |
11138                                                 HCI_MGMT_UNTRUSTED },
11139         { read_controller_info,    MGMT_READ_INFO_SIZE,
11140                                                 HCI_MGMT_UNTRUSTED },
11141         { set_powered,             MGMT_SETTING_SIZE },
11142         { set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
11143         { set_connectable,         MGMT_SETTING_SIZE },
11144         { set_fast_connectable,    MGMT_SETTING_SIZE },
11145         { set_bondable,            MGMT_SETTING_SIZE },
11146         { set_link_security,       MGMT_SETTING_SIZE },
11147         { set_ssp,                 MGMT_SETTING_SIZE },
11148         { set_hs,                  MGMT_SETTING_SIZE },
11149         { set_le,                  MGMT_SETTING_SIZE },
11150         { set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
11151         { set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
11152         { add_uuid,                MGMT_ADD_UUID_SIZE },
11153         { remove_uuid,             MGMT_REMOVE_UUID_SIZE },
11154         { load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
11155                                                 HCI_MGMT_VAR_LEN },
11156         { load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
11157                                                 HCI_MGMT_VAR_LEN },
11158         { disconnect,              MGMT_DISCONNECT_SIZE },
11159         { get_connections,         MGMT_GET_CONNECTIONS_SIZE },
11160         { pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
11161         { pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
11162         { set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
11163         { pair_device,             MGMT_PAIR_DEVICE_SIZE },
11164         { cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
11165         { unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
11166         { user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
11167         { user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
11168         { user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
11169         { user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
11170         { read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
11171         { add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
11172                                                 HCI_MGMT_VAR_LEN },
11173         { remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
11174         { start_discovery,         MGMT_START_DISCOVERY_SIZE },
11175         { stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
11176         { confirm_name,            MGMT_CONFIRM_NAME_SIZE },
11177         { block_device,            MGMT_BLOCK_DEVICE_SIZE },
11178         { unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
11179         { set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
11180         { set_advertising,         MGMT_SETTING_SIZE },
11181         { set_bredr,               MGMT_SETTING_SIZE },
11182         { set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
11183         { set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
11184         { set_secure_conn,         MGMT_SETTING_SIZE },
11185         { set_debug_keys,          MGMT_SETTING_SIZE },
11186         { set_privacy,             MGMT_SET_PRIVACY_SIZE },
11187         { load_irks,               MGMT_LOAD_IRKS_SIZE,
11188                                                 HCI_MGMT_VAR_LEN },
11189         { get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
11190         { get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
11191         { add_device,              MGMT_ADD_DEVICE_SIZE },
11192         { remove_device,           MGMT_REMOVE_DEVICE_SIZE },
11193         { load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
11194                                                 HCI_MGMT_VAR_LEN },
11195         { read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
11196                                                 HCI_MGMT_NO_HDEV |
11197                                                 HCI_MGMT_UNTRUSTED },
11198         { read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
11199                                                 HCI_MGMT_UNCONFIGURED |
11200                                                 HCI_MGMT_UNTRUSTED },
11201         { set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
11202                                                 HCI_MGMT_UNCONFIGURED },
11203         { set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
11204                                                 HCI_MGMT_UNCONFIGURED },
11205         { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
11206                                                 HCI_MGMT_VAR_LEN },
11207         { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
11208         { read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
11209                                                 HCI_MGMT_NO_HDEV |
11210                                                 HCI_MGMT_UNTRUSTED },
11211         { read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
11212         { add_advertising,         MGMT_ADD_ADVERTISING_SIZE,
11213                                                 HCI_MGMT_VAR_LEN },
11214         { remove_advertising,      MGMT_REMOVE_ADVERTISING_SIZE },
11215         { get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
11216         { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
11217         { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
11218                                                 HCI_MGMT_UNTRUSTED },
11219         { set_appearance,          MGMT_SET_APPEARANCE_SIZE },
11220         { get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
11221         { set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
11222         { set_blocked_keys,        MGMT_OP_SET_BLOCKED_KEYS_SIZE,
11223                                                 HCI_MGMT_VAR_LEN },
11224         { set_wideband_speech,     MGMT_SETTING_SIZE },
11225         { read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
11226                                                 HCI_MGMT_UNTRUSTED },
11227         { read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
11228                                                 HCI_MGMT_UNTRUSTED |
11229                                                 HCI_MGMT_HDEV_OPTIONAL },
11230         { set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
11231                                                 HCI_MGMT_VAR_LEN |
11232                                                 HCI_MGMT_HDEV_OPTIONAL },
11233         { read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
11234                                                 HCI_MGMT_UNTRUSTED },
11235         { set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
11236                                                 HCI_MGMT_VAR_LEN },
11237         { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
11238                                                 HCI_MGMT_UNTRUSTED },
11239         { set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
11240                                                 HCI_MGMT_VAR_LEN },
11241         { get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
11242         { set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
11243         { read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
11244         { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
11245                                                 HCI_MGMT_VAR_LEN },
11246         { remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
11247         { add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
11248                                                 HCI_MGMT_VAR_LEN },
11249         { add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
11250                                                 HCI_MGMT_VAR_LEN },
11251         { add_adv_patterns_monitor_rssi,
11252                                    MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
11253                                                 HCI_MGMT_VAR_LEN },
11254         { set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
11255                                                 HCI_MGMT_VAR_LEN },
11256         { mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
11257         { mesh_send,               MGMT_MESH_SEND_SIZE,
11258                                                 HCI_MGMT_VAR_LEN },
11259         { mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
11260 };
11261
11262 #ifdef TIZEN_BT
11263 static const struct hci_mgmt_handler tizen_mgmt_handlers[] = {
11264         { NULL }, /* 0x0000 (no command) */
11265         { set_advertising_params,  MGMT_SET_ADVERTISING_PARAMS_SIZE },
11266         { set_advertising_data,    MGMT_SET_ADV_MIN_APP_DATA_SIZE,
11267                                                 HCI_MGMT_VAR_LEN },
11268         { set_scan_rsp_data,       MGMT_SET_SCAN_RSP_MIN_APP_DATA_SIZE,
11269                                                 HCI_MGMT_VAR_LEN },
11270         { add_white_list,          MGMT_ADD_DEV_WHITE_LIST_SIZE },
11271         { remove_from_white_list,  MGMT_REMOVE_DEV_FROM_WHITE_LIST_SIZE },
11272         { clear_white_list,        MGMT_OP_CLEAR_DEV_WHITE_LIST_SIZE },
11273         { set_enable_rssi,         MGMT_SET_RSSI_ENABLE_SIZE },
11274         { get_raw_rssi,            MGMT_GET_RAW_RSSI_SIZE },
11275         { set_disable_threshold,   MGMT_SET_RSSI_DISABLE_SIZE },
11276         { start_le_discovery,      MGMT_START_LE_DISCOVERY_SIZE },
11277         { stop_le_discovery,       MGMT_STOP_LE_DISCOVERY_SIZE },
11278         { disable_le_auto_connect, MGMT_DISABLE_LE_AUTO_CONNECT_SIZE },
11279         { le_conn_update,          MGMT_LE_CONN_UPDATE_SIZE },
11280         { set_manufacturer_data,   MGMT_SET_MANUFACTURER_DATA_SIZE },
11281         { le_set_scan_params,      MGMT_LE_SET_SCAN_PARAMS_SIZE },
11282         { set_voice_setting,       MGMT_SET_VOICE_SETTING_SIZE },
11283         { get_adv_tx_power,        MGMT_GET_ADV_TX_POWER_SIZE },
11284 };
11285 #endif
11286
11287 void mgmt_index_added(struct hci_dev *hdev)
11288 {
11289         struct mgmt_ev_ext_index ev;
11290
11291         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
11292                 return;
11293
11294         switch (hdev->dev_type) {
11295         case HCI_PRIMARY:
11296                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
11297                         mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
11298                                          NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
11299                         ev.type = 0x01;
11300                 } else {
11301                         mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
11302                                          HCI_MGMT_INDEX_EVENTS);
11303                         ev.type = 0x00;
11304                 }
11305                 break;
11306         case HCI_AMP:
11307                 ev.type = 0x02;
11308                 break;
11309         default:
11310                 return;
11311         }
11312
11313         ev.bus = hdev->bus;
11314
11315         mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
11316                          HCI_MGMT_EXT_INDEX_EVENTS);
11317 }
11318
11319 void mgmt_index_removed(struct hci_dev *hdev)
11320 {
11321         struct mgmt_ev_ext_index ev;
11322         u8 status = MGMT_STATUS_INVALID_INDEX;
11323
11324         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
11325                 return;
11326
11327         switch (hdev->dev_type) {
11328         case HCI_PRIMARY:
11329                 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
11330
11331                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
11332                         mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
11333                                          NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
11334                         ev.type = 0x01;
11335                 } else {
11336                         mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
11337                                          HCI_MGMT_INDEX_EVENTS);
11338                         ev.type = 0x00;
11339                 }
11340                 break;
11341         case HCI_AMP:
11342                 ev.type = 0x02;
11343                 break;
11344         default:
11345                 return;
11346         }
11347
11348         ev.bus = hdev->bus;
11349
11350         mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
11351                          HCI_MGMT_EXT_INDEX_EVENTS);
11352
11353         /* Cancel any remaining timed work */
11354         if (!hci_dev_test_flag(hdev, HCI_MGMT))
11355                 return;
11356         cancel_delayed_work_sync(&hdev->discov_off);
11357         cancel_delayed_work_sync(&hdev->service_cache);
11358         cancel_delayed_work_sync(&hdev->rpa_expired);
11359 }
11360
11361 void mgmt_power_on(struct hci_dev *hdev, int err)
11362 {
11363         struct cmd_lookup match = { NULL, hdev };
11364
11365         bt_dev_dbg(hdev, "err %d", err);
11366
11367         hci_dev_lock(hdev);
11368
11369         if (!err) {
11370                 restart_le_actions(hdev);
11371                 hci_update_passive_scan(hdev);
11372         }
11373
11374         mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
11375
11376         new_settings(hdev, match.sk);
11377
11378         if (match.sk)
11379                 sock_put(match.sk);
11380
11381         hci_dev_unlock(hdev);
11382 }
11383
11384 void __mgmt_power_off(struct hci_dev *hdev)
11385 {
11386         struct cmd_lookup match = { NULL, hdev };
11387         u8 status, zero_cod[] = { 0, 0, 0 };
11388
11389         mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
11390
11391         /* If the power off is because of hdev unregistration let
11392          * use the appropriate INVALID_INDEX status. Otherwise use
11393          * NOT_POWERED. We cover both scenarios here since later in
11394          * mgmt_index_removed() any hci_conn callbacks will have already
11395          * been triggered, potentially causing misleading DISCONNECTED
11396          * status responses.
11397          */
11398         if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
11399                 status = MGMT_STATUS_INVALID_INDEX;
11400         else
11401                 status = MGMT_STATUS_NOT_POWERED;
11402
11403         mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
11404
11405         if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
11406                 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
11407                                    zero_cod, sizeof(zero_cod),
11408                                    HCI_MGMT_DEV_CLASS_EVENTS, NULL);
11409                 ext_info_changed(hdev, NULL);
11410         }
11411
11412         new_settings(hdev, match.sk);
11413
11414         if (match.sk)
11415                 sock_put(match.sk);
11416 }
11417
11418 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
11419 {
11420         struct mgmt_pending_cmd *cmd;
11421         u8 status;
11422
11423         cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
11424         if (!cmd)
11425                 return;
11426
11427         if (err == -ERFKILL)
11428                 status = MGMT_STATUS_RFKILLED;
11429         else
11430                 status = MGMT_STATUS_FAILED;
11431
11432         mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
11433
11434         mgmt_pending_remove(cmd);
11435 }
11436
11437 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
11438                        bool persistent)
11439 {
11440         struct mgmt_ev_new_link_key ev;
11441
11442         memset(&ev, 0, sizeof(ev));
11443
11444         ev.store_hint = persistent;
11445         bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
11446         ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
11447         ev.key.type = key->type;
11448         memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
11449         ev.key.pin_len = key->pin_len;
11450
11451         mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
11452 }
11453
11454 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
11455 {
11456         switch (ltk->type) {
11457         case SMP_LTK:
11458         case SMP_LTK_RESPONDER:
11459                 if (ltk->authenticated)
11460                         return MGMT_LTK_AUTHENTICATED;
11461                 return MGMT_LTK_UNAUTHENTICATED;
11462         case SMP_LTK_P256:
11463                 if (ltk->authenticated)
11464                         return MGMT_LTK_P256_AUTH;
11465                 return MGMT_LTK_P256_UNAUTH;
11466         case SMP_LTK_P256_DEBUG:
11467                 return MGMT_LTK_P256_DEBUG;
11468         }
11469
11470         return MGMT_LTK_UNAUTHENTICATED;
11471 }
11472
11473 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
11474 {
11475         struct mgmt_ev_new_long_term_key ev;
11476
11477         memset(&ev, 0, sizeof(ev));
11478
11479         /* Devices using resolvable or non-resolvable random addresses
11480          * without providing an identity resolving key don't require
11481          * to store long term keys. Their addresses will change the
11482          * next time around.
11483          *
11484          * Only when a remote device provides an identity address
11485          * make sure the long term key is stored. If the remote
11486          * identity is known, the long term keys are internally
11487          * mapped to the identity address. So allow static random
11488          * and public addresses here.
11489          */
11490         if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
11491             (key->bdaddr.b[5] & 0xc0) != 0xc0)
11492                 ev.store_hint = 0x00;
11493         else
11494                 ev.store_hint = persistent;
11495
11496         bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
11497         ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
11498         ev.key.type = mgmt_ltk_type(key);
11499         ev.key.enc_size = key->enc_size;
11500         ev.key.ediv = key->ediv;
11501         ev.key.rand = key->rand;
11502
11503         if (key->type == SMP_LTK)
11504                 ev.key.initiator = 1;
11505
11506         /* Make sure we copy only the significant bytes based on the
11507          * encryption key size, and set the rest of the value to zeroes.
11508          */
11509         memcpy(ev.key.val, key->val, key->enc_size);
11510         memset(ev.key.val + key->enc_size, 0,
11511                sizeof(ev.key.val) - key->enc_size);
11512
11513         mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
11514 }
11515
11516 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
11517 {
11518         struct mgmt_ev_new_irk ev;
11519
11520         memset(&ev, 0, sizeof(ev));
11521
11522         ev.store_hint = persistent;
11523
11524         bacpy(&ev.rpa, &irk->rpa);
11525         bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
11526         ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
11527         memcpy(ev.irk.val, irk->val, sizeof(irk->val));
11528
11529         mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
11530 }
11531
11532 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
11533                    bool persistent)
11534 {
11535         struct mgmt_ev_new_csrk ev;
11536
11537         memset(&ev, 0, sizeof(ev));
11538
11539         /* Devices using resolvable or non-resolvable random addresses
11540          * without providing an identity resolving key don't require
11541          * to store signature resolving keys. Their addresses will change
11542          * the next time around.
11543          *
11544          * Only when a remote device provides an identity address
11545          * make sure the signature resolving key is stored. So allow
11546          * static random and public addresses here.
11547          */
11548         if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
11549             (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
11550                 ev.store_hint = 0x00;
11551         else
11552                 ev.store_hint = persistent;
11553
11554         bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
11555         ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
11556         ev.key.type = csrk->type;
11557         memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
11558
11559         mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
11560 }
11561
11562 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
11563                          u8 bdaddr_type, u8 store_hint, u16 min_interval,
11564                          u16 max_interval, u16 latency, u16 timeout)
11565 {
11566         struct mgmt_ev_new_conn_param ev;
11567
11568         if (!hci_is_identity_address(bdaddr, bdaddr_type))
11569                 return;
11570
11571         memset(&ev, 0, sizeof(ev));
11572         bacpy(&ev.addr.bdaddr, bdaddr);
11573         ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
11574         ev.store_hint = store_hint;
11575         ev.min_interval = cpu_to_le16(min_interval);
11576         ev.max_interval = cpu_to_le16(max_interval);
11577         ev.latency = cpu_to_le16(latency);
11578         ev.timeout = cpu_to_le16(timeout);
11579
11580         mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
11581 }
11582
11583 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
11584                            u8 *name, u8 name_len)
11585 {
11586         struct sk_buff *skb;
11587         struct mgmt_ev_device_connected *ev;
11588         u16 eir_len = 0;
11589         u32 flags = 0;
11590
11591         /* allocate buff for LE or BR/EDR adv */
11592         if (conn->le_adv_data_len > 0)
11593                 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
11594                                      sizeof(*ev) + conn->le_adv_data_len);
11595         else
11596                 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
11597                                      sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
11598                                      eir_precalc_len(sizeof(conn->dev_class)));
11599
11600         ev = skb_put(skb, sizeof(*ev));
11601         bacpy(&ev->addr.bdaddr, &conn->dst);
11602         ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
11603
11604         if (conn->out)
11605                 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
11606
11607         ev->flags = __cpu_to_le32(flags);
11608
11609         /* We must ensure that the EIR Data fields are ordered and
11610          * unique. Keep it simple for now and avoid the problem by not
11611          * adding any BR/EDR data to the LE adv.
11612          */
11613         if (conn->le_adv_data_len > 0) {
11614                 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
11615                 eir_len = conn->le_adv_data_len;
11616         } else {
11617                 if (name)
11618                         eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
11619
11620                 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
11621                         eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
11622                                                     conn->dev_class, sizeof(conn->dev_class));
11623         }
11624
11625         ev->eir_len = cpu_to_le16(eir_len);
11626
11627         mgmt_event_skb(skb, NULL);
11628 }
11629
11630 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
11631 {
11632         struct sock **sk = data;
11633
11634         cmd->cmd_complete(cmd, 0);
11635
11636         *sk = cmd->sk;
11637         sock_hold(*sk);
11638
11639         mgmt_pending_remove(cmd);
11640 }
11641
11642 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
11643 {
11644         struct hci_dev *hdev = data;
11645         struct mgmt_cp_unpair_device *cp = cmd->param;
11646
11647         device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
11648
11649         cmd->cmd_complete(cmd, 0);
11650         mgmt_pending_remove(cmd);
11651 }
11652
11653 bool mgmt_powering_down(struct hci_dev *hdev)
11654 {
11655         struct mgmt_pending_cmd *cmd;
11656         struct mgmt_mode *cp;
11657
11658         cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
11659         if (!cmd)
11660                 return false;
11661
11662         cp = cmd->param;
11663         if (!cp->val)
11664                 return true;
11665
11666         return false;
11667 }
11668
11669 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
11670                               u8 link_type, u8 addr_type, u8 reason,
11671                               bool mgmt_connected)
11672 {
11673         struct mgmt_ev_device_disconnected ev;
11674         struct sock *sk = NULL;
11675
11676         /* The connection is still in hci_conn_hash so test for 1
11677          * instead of 0 to know if this is the last one.
11678          */
11679         if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
11680                 cancel_delayed_work(&hdev->power_off);
11681                 queue_work(hdev->req_workqueue, &hdev->power_off.work);
11682         }
11683
11684         if (!mgmt_connected)
11685                 return;
11686
11687         if (link_type != ACL_LINK && link_type != LE_LINK)
11688                 return;
11689
11690         mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
11691
11692         bacpy(&ev.addr.bdaddr, bdaddr);
11693         ev.addr.type = link_to_bdaddr(link_type, addr_type);
11694         ev.reason = reason;
11695
11696         /* Report disconnects due to suspend */
11697         if (hdev->suspended)
11698                 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
11699
11700         mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
11701
11702         if (sk)
11703                 sock_put(sk);
11704
11705         mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
11706                              hdev);
11707 }
11708
11709 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
11710                             u8 link_type, u8 addr_type, u8 status)
11711 {
11712         u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
11713         struct mgmt_cp_disconnect *cp;
11714         struct mgmt_pending_cmd *cmd;
11715
11716         mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
11717                              hdev);
11718
11719         cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
11720         if (!cmd)
11721                 return;
11722
11723         cp = cmd->param;
11724
11725         if (bacmp(bdaddr, &cp->addr.bdaddr))
11726                 return;
11727
11728         if (cp->addr.type != bdaddr_type)
11729                 return;
11730
11731         cmd->cmd_complete(cmd, mgmt_status(status));
11732         mgmt_pending_remove(cmd);
11733 }
11734
11735 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
11736                          u8 addr_type, u8 status)
11737 {
11738         struct mgmt_ev_connect_failed ev;
11739
11740         /* The connection is still in hci_conn_hash so test for 1
11741          * instead of 0 to know if this is the last one.
11742          */
11743         if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
11744                 cancel_delayed_work(&hdev->power_off);
11745                 queue_work(hdev->req_workqueue, &hdev->power_off.work);
11746         }
11747
11748         bacpy(&ev.addr.bdaddr, bdaddr);
11749         ev.addr.type = link_to_bdaddr(link_type, addr_type);
11750         ev.status = mgmt_status(status);
11751
11752         mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
11753 }
11754
11755 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
11756 {
11757         struct mgmt_ev_pin_code_request ev;
11758
11759         bacpy(&ev.addr.bdaddr, bdaddr);
11760         ev.addr.type = BDADDR_BREDR;
11761         ev.secure = secure;
11762
11763         mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
11764 }
11765
11766 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11767                                   u8 status)
11768 {
11769         struct mgmt_pending_cmd *cmd;
11770
11771         cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
11772         if (!cmd)
11773                 return;
11774
11775         cmd->cmd_complete(cmd, mgmt_status(status));
11776         mgmt_pending_remove(cmd);
11777 }
11778
11779 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11780                                       u8 status)
11781 {
11782         struct mgmt_pending_cmd *cmd;
11783
11784         cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
11785         if (!cmd)
11786                 return;
11787
11788         cmd->cmd_complete(cmd, mgmt_status(status));
11789         mgmt_pending_remove(cmd);
11790 }
11791
11792 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
11793                               u8 link_type, u8 addr_type, u32 value,
11794                               u8 confirm_hint)
11795 {
11796         struct mgmt_ev_user_confirm_request ev;
11797
11798         bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11799
11800         bacpy(&ev.addr.bdaddr, bdaddr);
11801         ev.addr.type = link_to_bdaddr(link_type, addr_type);
11802         ev.confirm_hint = confirm_hint;
11803         ev.value = cpu_to_le32(value);
11804
11805         return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
11806                           NULL);
11807 }
11808
11809 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
11810                               u8 link_type, u8 addr_type)
11811 {
11812         struct mgmt_ev_user_passkey_request ev;
11813
11814         bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11815
11816         bacpy(&ev.addr.bdaddr, bdaddr);
11817         ev.addr.type = link_to_bdaddr(link_type, addr_type);
11818
11819         return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
11820                           NULL);
11821 }
11822
11823 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11824                                       u8 link_type, u8 addr_type, u8 status,
11825                                       u8 opcode)
11826 {
11827         struct mgmt_pending_cmd *cmd;
11828
11829         cmd = pending_find(opcode, hdev);
11830         if (!cmd)
11831                 return -ENOENT;
11832
11833         cmd->cmd_complete(cmd, mgmt_status(status));
11834         mgmt_pending_remove(cmd);
11835
11836         return 0;
11837 }
11838
11839 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11840                                      u8 link_type, u8 addr_type, u8 status)
11841 {
11842         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11843                                           status, MGMT_OP_USER_CONFIRM_REPLY);
11844 }
11845
11846 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11847                                          u8 link_type, u8 addr_type, u8 status)
11848 {
11849         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11850                                           status,
11851                                           MGMT_OP_USER_CONFIRM_NEG_REPLY);
11852 }
11853
11854 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11855                                      u8 link_type, u8 addr_type, u8 status)
11856 {
11857         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11858                                           status, MGMT_OP_USER_PASSKEY_REPLY);
11859 }
11860
11861 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11862                                          u8 link_type, u8 addr_type, u8 status)
11863 {
11864         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11865                                           status,
11866                                           MGMT_OP_USER_PASSKEY_NEG_REPLY);
11867 }
11868
11869 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
11870                              u8 link_type, u8 addr_type, u32 passkey,
11871                              u8 entered)
11872 {
11873         struct mgmt_ev_passkey_notify ev;
11874
11875         bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11876
11877         bacpy(&ev.addr.bdaddr, bdaddr);
11878         ev.addr.type = link_to_bdaddr(link_type, addr_type);
11879         ev.passkey = __cpu_to_le32(passkey);
11880         ev.entered = entered;
11881
11882         return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
11883 }
11884
11885 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
11886 {
11887         struct mgmt_ev_auth_failed ev;
11888         struct mgmt_pending_cmd *cmd;
11889         u8 status = mgmt_status(hci_status);
11890
11891         bacpy(&ev.addr.bdaddr, &conn->dst);
11892         ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
11893         ev.status = status;
11894
11895         cmd = find_pairing(conn);
11896
11897         mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
11898                     cmd ? cmd->sk : NULL);
11899
11900         if (cmd) {
11901                 cmd->cmd_complete(cmd, status);
11902                 mgmt_pending_remove(cmd);
11903         }
11904 }
11905
11906 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
11907 {
11908         struct cmd_lookup match = { NULL, hdev };
11909         bool changed;
11910
11911         if (status) {
11912                 u8 mgmt_err = mgmt_status(status);
11913                 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
11914                                      cmd_status_rsp, &mgmt_err);
11915                 return;
11916         }
11917
11918         if (test_bit(HCI_AUTH, &hdev->flags))
11919                 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
11920         else
11921                 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
11922
11923         mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
11924                              &match);
11925
11926         if (changed)
11927                 new_settings(hdev, match.sk);
11928
11929         if (match.sk)
11930                 sock_put(match.sk);
11931 }
11932
11933 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
11934 {
11935         struct cmd_lookup *match = data;
11936
11937         if (match->sk == NULL) {
11938                 match->sk = cmd->sk;
11939                 sock_hold(match->sk);
11940         }
11941 }
11942
11943 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
11944                                     u8 status)
11945 {
11946         struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
11947
11948         mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
11949         mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
11950         mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
11951
11952         if (!status) {
11953                 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
11954                                    3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
11955                 ext_info_changed(hdev, NULL);
11956         }
11957
11958         if (match.sk)
11959                 sock_put(match.sk);
11960 }
11961
11962 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
11963 {
11964         struct mgmt_cp_set_local_name ev;
11965         struct mgmt_pending_cmd *cmd;
11966
11967         if (status)
11968                 return;
11969
11970         memset(&ev, 0, sizeof(ev));
11971         memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
11972         memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
11973
11974         cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
11975         if (!cmd) {
11976                 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
11977
11978                 /* If this is a HCI command related to powering on the
11979                  * HCI dev don't send any mgmt signals.
11980                  */
11981                 if (pending_find(MGMT_OP_SET_POWERED, hdev))
11982                         return;
11983         }
11984
11985         mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
11986                            HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
11987         ext_info_changed(hdev, cmd ? cmd->sk : NULL);
11988 }
11989
11990 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
11991 {
11992         int i;
11993
11994         for (i = 0; i < uuid_count; i++) {
11995                 if (!memcmp(uuid, uuids[i], 16))
11996                         return true;
11997         }
11998
11999         return false;
12000 }
12001
12002 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
12003 {
12004         u16 parsed = 0;
12005
12006         while (parsed < eir_len) {
12007                 u8 field_len = eir[0];
12008                 u8 uuid[16];
12009                 int i;
12010
12011                 if (field_len == 0)
12012                         break;
12013
12014                 if (eir_len - parsed < field_len + 1)
12015                         break;
12016
12017                 switch (eir[1]) {
12018                 case EIR_UUID16_ALL:
12019                 case EIR_UUID16_SOME:
12020                         for (i = 0; i + 3 <= field_len; i += 2) {
12021                                 memcpy(uuid, bluetooth_base_uuid, 16);
12022                                 uuid[13] = eir[i + 3];
12023                                 uuid[12] = eir[i + 2];
12024                                 if (has_uuid(uuid, uuid_count, uuids))
12025                                         return true;
12026                         }
12027                         break;
12028                 case EIR_UUID32_ALL:
12029                 case EIR_UUID32_SOME:
12030                         for (i = 0; i + 5 <= field_len; i += 4) {
12031                                 memcpy(uuid, bluetooth_base_uuid, 16);
12032                                 uuid[15] = eir[i + 5];
12033                                 uuid[14] = eir[i + 4];
12034                                 uuid[13] = eir[i + 3];
12035                                 uuid[12] = eir[i + 2];
12036                                 if (has_uuid(uuid, uuid_count, uuids))
12037                                         return true;
12038                         }
12039                         break;
12040                 case EIR_UUID128_ALL:
12041                 case EIR_UUID128_SOME:
12042                         for (i = 0; i + 17 <= field_len; i += 16) {
12043                                 memcpy(uuid, eir + i + 2, 16);
12044                                 if (has_uuid(uuid, uuid_count, uuids))
12045                                         return true;
12046                         }
12047                         break;
12048                 }
12049
12050                 parsed += field_len + 1;
12051                 eir += field_len + 1;
12052         }
12053
12054         return false;
12055 }
12056
12057 static void restart_le_scan(struct hci_dev *hdev)
12058 {
12059         /* If controller is not scanning we are done. */
12060         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
12061                 return;
12062
12063         if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
12064                        hdev->discovery.scan_start +
12065                        hdev->discovery.scan_duration))
12066                 return;
12067
12068         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
12069                            DISCOV_LE_RESTART_DELAY);
12070 }
12071
12072 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
12073                             u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
12074 {
12075         /* If a RSSI threshold has been specified, and
12076          * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
12077          * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
12078          * is set, let it through for further processing, as we might need to
12079          * restart the scan.
12080          *
12081          * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
12082          * the results are also dropped.
12083          */
12084         if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
12085             (rssi == HCI_RSSI_INVALID ||
12086             (rssi < hdev->discovery.rssi &&
12087              !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
12088                 return  false;
12089
12090         if (hdev->discovery.uuid_count != 0) {
12091                 /* If a list of UUIDs is provided in filter, results with no
12092                  * matching UUID should be dropped.
12093                  */
12094                 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
12095                                    hdev->discovery.uuids) &&
12096                     !eir_has_uuids(scan_rsp, scan_rsp_len,
12097                                    hdev->discovery.uuid_count,
12098                                    hdev->discovery.uuids))
12099                         return false;
12100         }
12101
12102         /* If duplicate filtering does not report RSSI changes, then restart
12103          * scanning to ensure updated result with updated RSSI values.
12104          */
12105         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
12106                 restart_le_scan(hdev);
12107
12108                 /* Validate RSSI value against the RSSI threshold once more. */
12109                 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
12110                     rssi < hdev->discovery.rssi)
12111                         return false;
12112         }
12113
12114         return true;
12115 }
12116
12117 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
12118                                   bdaddr_t *bdaddr, u8 addr_type)
12119 {
12120         struct mgmt_ev_adv_monitor_device_lost ev;
12121
12122         ev.monitor_handle = cpu_to_le16(handle);
12123         bacpy(&ev.addr.bdaddr, bdaddr);
12124         ev.addr.type = addr_type;
12125
12126         mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
12127                    NULL);
12128 }
12129
12130 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
12131                                                struct sk_buff *skb,
12132                                                struct sock *skip_sk,
12133                                                u16 handle)
12134 {
12135         struct sk_buff *advmon_skb;
12136         size_t advmon_skb_len;
12137         __le16 *monitor_handle;
12138
12139         if (!skb)
12140                 return;
12141
12142         advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
12143                           sizeof(struct mgmt_ev_device_found)) + skb->len;
12144         advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
12145                                     advmon_skb_len);
12146         if (!advmon_skb)
12147                 return;
12148
12149         /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
12150          * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
12151          * store monitor_handle of the matched monitor.
12152          */
12153         monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
12154         *monitor_handle = cpu_to_le16(handle);
12155         skb_put_data(advmon_skb, skb->data, skb->len);
12156
12157         mgmt_event_skb(advmon_skb, skip_sk);
12158 }
12159
12160 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
12161                                           bdaddr_t *bdaddr, bool report_device,
12162                                           struct sk_buff *skb,
12163                                           struct sock *skip_sk)
12164 {
12165         struct monitored_device *dev, *tmp;
12166         bool matched = false;
12167         bool notified = false;
12168
12169         /* We have received the Advertisement Report because:
12170          * 1. the kernel has initiated active discovery
12171          * 2. if not, we have pend_le_reports > 0 in which case we are doing
12172          *    passive scanning
12173          * 3. if none of the above is true, we have one or more active
12174          *    Advertisement Monitor
12175          *
12176          * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
12177          * and report ONLY one advertisement per device for the matched Monitor
12178          * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
12179          *
12180          * For case 3, since we are not active scanning and all advertisements
12181          * received are due to a matched Advertisement Monitor, report all
12182          * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
12183          */
12184         if (report_device && !hdev->advmon_pend_notify) {
12185                 mgmt_event_skb(skb, skip_sk);
12186                 return;
12187         }
12188
12189         hdev->advmon_pend_notify = false;
12190
12191         list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
12192                 if (!bacmp(&dev->bdaddr, bdaddr)) {
12193                         matched = true;
12194
12195                         if (!dev->notified) {
12196                                 mgmt_send_adv_monitor_device_found(hdev, skb,
12197                                                                    skip_sk,
12198                                                                    dev->handle);
12199                                 notified = true;
12200                                 dev->notified = true;
12201                         }
12202                 }
12203
12204                 if (!dev->notified)
12205                         hdev->advmon_pend_notify = true;
12206         }
12207
12208         if (!report_device &&
12209             ((matched && !notified) || !msft_monitor_supported(hdev))) {
12210                 /* Handle 0 indicates that we are not active scanning and this
12211                  * is a subsequent advertisement report for an already matched
12212                  * Advertisement Monitor or the controller offloading support
12213                  * is not available.
12214                  */
12215                 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
12216         }
12217
12218         if (report_device)
12219                 mgmt_event_skb(skb, skip_sk);
12220         else
12221                 kfree_skb(skb);
12222 }
12223
12224 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
12225                               u8 addr_type, s8 rssi, u32 flags, u8 *eir,
12226                               u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
12227                               u64 instant)
12228 {
12229         struct sk_buff *skb;
12230         struct mgmt_ev_mesh_device_found *ev;
12231         int i, j;
12232
12233         if (!hdev->mesh_ad_types[0])
12234                 goto accepted;
12235
12236         /* Scan for requested AD types */
12237         if (eir_len > 0) {
12238                 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
12239                         for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
12240                                 if (!hdev->mesh_ad_types[j])
12241                                         break;
12242
12243                                 if (hdev->mesh_ad_types[j] == eir[i + 1])
12244                                         goto accepted;
12245                         }
12246                 }
12247         }
12248
12249         if (scan_rsp_len > 0) {
12250                 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
12251                         for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
12252                                 if (!hdev->mesh_ad_types[j])
12253                                         break;
12254
12255                                 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
12256                                         goto accepted;
12257                         }
12258                 }
12259         }
12260
12261         return;
12262
12263 accepted:
12264         skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
12265                              sizeof(*ev) + eir_len + scan_rsp_len);
12266         if (!skb)
12267                 return;
12268
12269         ev = skb_put(skb, sizeof(*ev));
12270
12271         bacpy(&ev->addr.bdaddr, bdaddr);
12272         ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
12273         ev->rssi = rssi;
12274         ev->flags = cpu_to_le32(flags);
12275         ev->instant = cpu_to_le64(instant);
12276
12277         if (eir_len > 0)
12278                 /* Copy EIR or advertising data into event */
12279                 skb_put_data(skb, eir, eir_len);
12280
12281         if (scan_rsp_len > 0)
12282                 /* Append scan response data to event */
12283                 skb_put_data(skb, scan_rsp, scan_rsp_len);
12284
12285         ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
12286
12287         mgmt_event_skb(skb, NULL);
12288 }
12289
12290 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
12291                        u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
12292                        u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
12293                        u64 instant)
12294 {
12295         struct sk_buff *skb;
12296         struct mgmt_ev_device_found *ev;
12297         bool report_device = hci_discovery_active(hdev);
12298
12299         if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
12300                 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
12301                                   eir, eir_len, scan_rsp, scan_rsp_len,
12302                                   instant);
12303
12304         /* Don't send events for a non-kernel initiated discovery. With
12305          * LE one exception is if we have pend_le_reports > 0 in which
12306          * case we're doing passive scanning and want these events.
12307          */
12308         if (!hci_discovery_active(hdev)) {
12309                 if (link_type == ACL_LINK)
12310                         return;
12311                 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
12312                         report_device = true;
12313                 else if (!hci_is_adv_monitoring(hdev))
12314                         return;
12315         }
12316
12317         if (hdev->discovery.result_filtering) {
12318                 /* We are using service discovery */
12319                 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
12320                                      scan_rsp_len))
12321                         return;
12322         }
12323
12324         if (hdev->discovery.limited) {
12325                 /* Check for limited discoverable bit */
12326                 if (dev_class) {
12327                         if (!(dev_class[1] & 0x20))
12328                                 return;
12329                 } else {
12330                         u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
12331                         if (!flags || !(flags[0] & LE_AD_LIMITED))
12332                                 return;
12333                 }
12334         }
12335
12336         /* Allocate skb. The 5 extra bytes are for the potential CoD field */
12337         skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
12338                              sizeof(*ev) + eir_len + scan_rsp_len + 5);
12339         if (!skb)
12340                 return;
12341
12342         ev = skb_put(skb, sizeof(*ev));
12343
12344         /* In case of device discovery with BR/EDR devices (pre 1.2), the
12345          * RSSI value was reported as 0 when not available. This behavior
12346          * is kept when using device discovery. This is required for full
12347          * backwards compatibility with the API.
12348          *
12349          * However when using service discovery, the value 127 will be
12350          * returned when the RSSI is not available.
12351          */
12352         if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
12353             link_type == ACL_LINK)
12354                 rssi = 0;
12355
12356         bacpy(&ev->addr.bdaddr, bdaddr);
12357         ev->addr.type = link_to_bdaddr(link_type, addr_type);
12358         ev->rssi = rssi;
12359         ev->flags = cpu_to_le32(flags);
12360
12361         if (eir_len > 0)
12362                 /* Copy EIR or advertising data into event */
12363                 skb_put_data(skb, eir, eir_len);
12364
12365         if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
12366                 u8 eir_cod[5];
12367
12368                 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
12369                                            dev_class, 3);
12370                 skb_put_data(skb, eir_cod, sizeof(eir_cod));
12371         }
12372
12373         if (scan_rsp_len > 0)
12374                 /* Append scan response data to event */
12375                 skb_put_data(skb, scan_rsp, scan_rsp_len);
12376
12377         ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
12378
12379         mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
12380 }
12381
12382 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
12383                       u8 addr_type, s8 rssi, u8 *name, u8 name_len)
12384 {
12385         struct sk_buff *skb;
12386         struct mgmt_ev_device_found *ev;
12387         u16 eir_len = 0;
12388         u32 flags = 0;
12389
12390         skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
12391                              sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
12392
12393         ev = skb_put(skb, sizeof(*ev));
12394         bacpy(&ev->addr.bdaddr, bdaddr);
12395         ev->addr.type = link_to_bdaddr(link_type, addr_type);
12396         ev->rssi = rssi;
12397
12398         if (name)
12399                 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
12400         else
12401                 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
12402
12403         ev->eir_len = cpu_to_le16(eir_len);
12404         ev->flags = cpu_to_le32(flags);
12405
12406         mgmt_event_skb(skb, NULL);
12407 }
12408
12409 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
12410 {
12411         struct mgmt_ev_discovering ev;
12412
12413         bt_dev_dbg(hdev, "discovering %u", discovering);
12414
12415         memset(&ev, 0, sizeof(ev));
12416         ev.type = hdev->discovery.type;
12417         ev.discovering = discovering;
12418
12419         mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
12420 }
12421
12422 void mgmt_suspending(struct hci_dev *hdev, u8 state)
12423 {
12424         struct mgmt_ev_controller_suspend ev;
12425
12426         ev.suspend_state = state;
12427         mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
12428 }
12429
12430 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
12431                    u8 addr_type)
12432 {
12433         struct mgmt_ev_controller_resume ev;
12434
12435         ev.wake_reason = reason;
12436         if (bdaddr) {
12437                 bacpy(&ev.addr.bdaddr, bdaddr);
12438                 ev.addr.type = addr_type;
12439         } else {
12440                 memset(&ev.addr, 0, sizeof(ev.addr));
12441         }
12442
12443         mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
12444 }
12445
12446 static struct hci_mgmt_chan chan = {
12447         .channel        = HCI_CHANNEL_CONTROL,
12448         .handler_count  = ARRAY_SIZE(mgmt_handlers),
12449         .handlers       = mgmt_handlers,
12450 #ifdef TIZEN_BT
12451         .tizen_handler_count    = ARRAY_SIZE(tizen_mgmt_handlers),
12452         .tizen_handlers = tizen_mgmt_handlers,
12453 #endif
12454         .hdev_init      = mgmt_init_hdev,
12455 };
12456
12457 int mgmt_init(void)
12458 {
12459         return hci_mgmt_chan_register(&chan);
12460 }
12461
12462 void mgmt_exit(void)
12463 {
12464         hci_mgmt_chan_unregister(&chan);
12465 }
12466
12467 void mgmt_cleanup(struct sock *sk)
12468 {
12469         struct mgmt_mesh_tx *mesh_tx;
12470         struct hci_dev *hdev;
12471
12472         read_lock(&hci_dev_list_lock);
12473
12474         list_for_each_entry(hdev, &hci_dev_list, list) {
12475                 do {
12476                         mesh_tx = mgmt_mesh_next(hdev, sk);
12477
12478                         if (mesh_tx)
12479                                 mesh_send_complete(hdev, mesh_tx, true);
12480                 } while (mesh_tx);
12481         }
12482
12483         read_unlock(&hci_dev_list_lock);
12484 }