Bluetooth: Functions to modify WhiteList
[platform/kernel/linux-starfive.git] / net / bluetooth / mgmt.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 #ifdef TIZEN_BT
36 #include <net/bluetooth/mgmt_tizen.h>
37 #endif
38
39 #include "hci_request.h"
40 #include "smp.h"
41 #include "mgmt_util.h"
42 #include "mgmt_config.h"
43 #include "msft.h"
44 #include "eir.h"
45 #include "aosp.h"
46
47 #define MGMT_VERSION    1
48 #define MGMT_REVISION   22
49
50 static const u16 mgmt_commands[] = {
51         MGMT_OP_READ_INDEX_LIST,
52         MGMT_OP_READ_INFO,
53         MGMT_OP_SET_POWERED,
54         MGMT_OP_SET_DISCOVERABLE,
55         MGMT_OP_SET_CONNECTABLE,
56         MGMT_OP_SET_FAST_CONNECTABLE,
57         MGMT_OP_SET_BONDABLE,
58         MGMT_OP_SET_LINK_SECURITY,
59         MGMT_OP_SET_SSP,
60         MGMT_OP_SET_HS,
61         MGMT_OP_SET_LE,
62         MGMT_OP_SET_DEV_CLASS,
63         MGMT_OP_SET_LOCAL_NAME,
64         MGMT_OP_ADD_UUID,
65         MGMT_OP_REMOVE_UUID,
66         MGMT_OP_LOAD_LINK_KEYS,
67         MGMT_OP_LOAD_LONG_TERM_KEYS,
68         MGMT_OP_DISCONNECT,
69         MGMT_OP_GET_CONNECTIONS,
70         MGMT_OP_PIN_CODE_REPLY,
71         MGMT_OP_PIN_CODE_NEG_REPLY,
72         MGMT_OP_SET_IO_CAPABILITY,
73         MGMT_OP_PAIR_DEVICE,
74         MGMT_OP_CANCEL_PAIR_DEVICE,
75         MGMT_OP_UNPAIR_DEVICE,
76         MGMT_OP_USER_CONFIRM_REPLY,
77         MGMT_OP_USER_CONFIRM_NEG_REPLY,
78         MGMT_OP_USER_PASSKEY_REPLY,
79         MGMT_OP_USER_PASSKEY_NEG_REPLY,
80         MGMT_OP_READ_LOCAL_OOB_DATA,
81         MGMT_OP_ADD_REMOTE_OOB_DATA,
82         MGMT_OP_REMOVE_REMOTE_OOB_DATA,
83         MGMT_OP_START_DISCOVERY,
84         MGMT_OP_STOP_DISCOVERY,
85         MGMT_OP_CONFIRM_NAME,
86         MGMT_OP_BLOCK_DEVICE,
87         MGMT_OP_UNBLOCK_DEVICE,
88         MGMT_OP_SET_DEVICE_ID,
89         MGMT_OP_SET_ADVERTISING,
90         MGMT_OP_SET_BREDR,
91         MGMT_OP_SET_STATIC_ADDRESS,
92         MGMT_OP_SET_SCAN_PARAMS,
93         MGMT_OP_SET_SECURE_CONN,
94         MGMT_OP_SET_DEBUG_KEYS,
95         MGMT_OP_SET_PRIVACY,
96         MGMT_OP_LOAD_IRKS,
97         MGMT_OP_GET_CONN_INFO,
98         MGMT_OP_GET_CLOCK_INFO,
99         MGMT_OP_ADD_DEVICE,
100         MGMT_OP_REMOVE_DEVICE,
101         MGMT_OP_LOAD_CONN_PARAM,
102         MGMT_OP_READ_UNCONF_INDEX_LIST,
103         MGMT_OP_READ_CONFIG_INFO,
104         MGMT_OP_SET_EXTERNAL_CONFIG,
105         MGMT_OP_SET_PUBLIC_ADDRESS,
106         MGMT_OP_START_SERVICE_DISCOVERY,
107         MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
108         MGMT_OP_READ_EXT_INDEX_LIST,
109         MGMT_OP_READ_ADV_FEATURES,
110         MGMT_OP_ADD_ADVERTISING,
111         MGMT_OP_REMOVE_ADVERTISING,
112         MGMT_OP_GET_ADV_SIZE_INFO,
113         MGMT_OP_START_LIMITED_DISCOVERY,
114         MGMT_OP_READ_EXT_INFO,
115         MGMT_OP_SET_APPEARANCE,
116         MGMT_OP_GET_PHY_CONFIGURATION,
117         MGMT_OP_SET_PHY_CONFIGURATION,
118         MGMT_OP_SET_BLOCKED_KEYS,
119         MGMT_OP_SET_WIDEBAND_SPEECH,
120         MGMT_OP_READ_CONTROLLER_CAP,
121         MGMT_OP_READ_EXP_FEATURES_INFO,
122         MGMT_OP_SET_EXP_FEATURE,
123         MGMT_OP_READ_DEF_SYSTEM_CONFIG,
124         MGMT_OP_SET_DEF_SYSTEM_CONFIG,
125         MGMT_OP_READ_DEF_RUNTIME_CONFIG,
126         MGMT_OP_SET_DEF_RUNTIME_CONFIG,
127         MGMT_OP_GET_DEVICE_FLAGS,
128         MGMT_OP_SET_DEVICE_FLAGS,
129         MGMT_OP_READ_ADV_MONITOR_FEATURES,
130         MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
131         MGMT_OP_REMOVE_ADV_MONITOR,
132         MGMT_OP_ADD_EXT_ADV_PARAMS,
133         MGMT_OP_ADD_EXT_ADV_DATA,
134         MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
135         MGMT_OP_SET_MESH_RECEIVER,
136         MGMT_OP_MESH_READ_FEATURES,
137         MGMT_OP_MESH_SEND,
138         MGMT_OP_MESH_SEND_CANCEL,
139 };
140
141 static const u16 mgmt_events[] = {
142         MGMT_EV_CONTROLLER_ERROR,
143         MGMT_EV_INDEX_ADDED,
144         MGMT_EV_INDEX_REMOVED,
145         MGMT_EV_NEW_SETTINGS,
146         MGMT_EV_CLASS_OF_DEV_CHANGED,
147         MGMT_EV_LOCAL_NAME_CHANGED,
148         MGMT_EV_NEW_LINK_KEY,
149         MGMT_EV_NEW_LONG_TERM_KEY,
150         MGMT_EV_DEVICE_CONNECTED,
151         MGMT_EV_DEVICE_DISCONNECTED,
152         MGMT_EV_CONNECT_FAILED,
153         MGMT_EV_PIN_CODE_REQUEST,
154         MGMT_EV_USER_CONFIRM_REQUEST,
155         MGMT_EV_USER_PASSKEY_REQUEST,
156         MGMT_EV_AUTH_FAILED,
157         MGMT_EV_DEVICE_FOUND,
158         MGMT_EV_DISCOVERING,
159         MGMT_EV_DEVICE_BLOCKED,
160         MGMT_EV_DEVICE_UNBLOCKED,
161         MGMT_EV_DEVICE_UNPAIRED,
162         MGMT_EV_PASSKEY_NOTIFY,
163         MGMT_EV_NEW_IRK,
164         MGMT_EV_NEW_CSRK,
165         MGMT_EV_DEVICE_ADDED,
166         MGMT_EV_DEVICE_REMOVED,
167         MGMT_EV_NEW_CONN_PARAM,
168         MGMT_EV_UNCONF_INDEX_ADDED,
169         MGMT_EV_UNCONF_INDEX_REMOVED,
170         MGMT_EV_NEW_CONFIG_OPTIONS,
171         MGMT_EV_EXT_INDEX_ADDED,
172         MGMT_EV_EXT_INDEX_REMOVED,
173         MGMT_EV_LOCAL_OOB_DATA_UPDATED,
174         MGMT_EV_ADVERTISING_ADDED,
175         MGMT_EV_ADVERTISING_REMOVED,
176         MGMT_EV_EXT_INFO_CHANGED,
177         MGMT_EV_PHY_CONFIGURATION_CHANGED,
178         MGMT_EV_EXP_FEATURE_CHANGED,
179         MGMT_EV_DEVICE_FLAGS_CHANGED,
180         MGMT_EV_ADV_MONITOR_ADDED,
181         MGMT_EV_ADV_MONITOR_REMOVED,
182         MGMT_EV_CONTROLLER_SUSPEND,
183         MGMT_EV_CONTROLLER_RESUME,
184         MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
185         MGMT_EV_ADV_MONITOR_DEVICE_LOST,
186 };
187
188 static const u16 mgmt_untrusted_commands[] = {
189         MGMT_OP_READ_INDEX_LIST,
190         MGMT_OP_READ_INFO,
191         MGMT_OP_READ_UNCONF_INDEX_LIST,
192         MGMT_OP_READ_CONFIG_INFO,
193         MGMT_OP_READ_EXT_INDEX_LIST,
194         MGMT_OP_READ_EXT_INFO,
195         MGMT_OP_READ_CONTROLLER_CAP,
196         MGMT_OP_READ_EXP_FEATURES_INFO,
197         MGMT_OP_READ_DEF_SYSTEM_CONFIG,
198         MGMT_OP_READ_DEF_RUNTIME_CONFIG,
199 };
200
201 static const u16 mgmt_untrusted_events[] = {
202         MGMT_EV_INDEX_ADDED,
203         MGMT_EV_INDEX_REMOVED,
204         MGMT_EV_NEW_SETTINGS,
205         MGMT_EV_CLASS_OF_DEV_CHANGED,
206         MGMT_EV_LOCAL_NAME_CHANGED,
207         MGMT_EV_UNCONF_INDEX_ADDED,
208         MGMT_EV_UNCONF_INDEX_REMOVED,
209         MGMT_EV_NEW_CONFIG_OPTIONS,
210         MGMT_EV_EXT_INDEX_ADDED,
211         MGMT_EV_EXT_INDEX_REMOVED,
212         MGMT_EV_EXT_INFO_CHANGED,
213         MGMT_EV_EXP_FEATURE_CHANGED,
214 };
215
216 #define CACHE_TIMEOUT   msecs_to_jiffies(2 * 1000)
217
218 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
219                  "\x00\x00\x00\x00\x00\x00\x00\x00"
220
221 /* HCI to MGMT error code conversion table */
222 static const u8 mgmt_status_table[] = {
223         MGMT_STATUS_SUCCESS,
224         MGMT_STATUS_UNKNOWN_COMMAND,    /* Unknown Command */
225         MGMT_STATUS_NOT_CONNECTED,      /* No Connection */
226         MGMT_STATUS_FAILED,             /* Hardware Failure */
227         MGMT_STATUS_CONNECT_FAILED,     /* Page Timeout */
228         MGMT_STATUS_AUTH_FAILED,        /* Authentication Failed */
229         MGMT_STATUS_AUTH_FAILED,        /* PIN or Key Missing */
230         MGMT_STATUS_NO_RESOURCES,       /* Memory Full */
231         MGMT_STATUS_TIMEOUT,            /* Connection Timeout */
232         MGMT_STATUS_NO_RESOURCES,       /* Max Number of Connections */
233         MGMT_STATUS_NO_RESOURCES,       /* Max Number of SCO Connections */
234         MGMT_STATUS_ALREADY_CONNECTED,  /* ACL Connection Exists */
235         MGMT_STATUS_BUSY,               /* Command Disallowed */
236         MGMT_STATUS_NO_RESOURCES,       /* Rejected Limited Resources */
237         MGMT_STATUS_REJECTED,           /* Rejected Security */
238         MGMT_STATUS_REJECTED,           /* Rejected Personal */
239         MGMT_STATUS_TIMEOUT,            /* Host Timeout */
240         MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported Feature */
241         MGMT_STATUS_INVALID_PARAMS,     /* Invalid Parameters */
242         MGMT_STATUS_DISCONNECTED,       /* OE User Ended Connection */
243         MGMT_STATUS_NO_RESOURCES,       /* OE Low Resources */
244         MGMT_STATUS_DISCONNECTED,       /* OE Power Off */
245         MGMT_STATUS_DISCONNECTED,       /* Connection Terminated */
246         MGMT_STATUS_BUSY,               /* Repeated Attempts */
247         MGMT_STATUS_REJECTED,           /* Pairing Not Allowed */
248         MGMT_STATUS_FAILED,             /* Unknown LMP PDU */
249         MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported Remote Feature */
250         MGMT_STATUS_REJECTED,           /* SCO Offset Rejected */
251         MGMT_STATUS_REJECTED,           /* SCO Interval Rejected */
252         MGMT_STATUS_REJECTED,           /* Air Mode Rejected */
253         MGMT_STATUS_INVALID_PARAMS,     /* Invalid LMP Parameters */
254         MGMT_STATUS_FAILED,             /* Unspecified Error */
255         MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported LMP Parameter Value */
256         MGMT_STATUS_FAILED,             /* Role Change Not Allowed */
257         MGMT_STATUS_TIMEOUT,            /* LMP Response Timeout */
258         MGMT_STATUS_FAILED,             /* LMP Error Transaction Collision */
259         MGMT_STATUS_FAILED,             /* LMP PDU Not Allowed */
260         MGMT_STATUS_REJECTED,           /* Encryption Mode Not Accepted */
261         MGMT_STATUS_FAILED,             /* Unit Link Key Used */
262         MGMT_STATUS_NOT_SUPPORTED,      /* QoS Not Supported */
263         MGMT_STATUS_TIMEOUT,            /* Instant Passed */
264         MGMT_STATUS_NOT_SUPPORTED,      /* Pairing Not Supported */
265         MGMT_STATUS_FAILED,             /* Transaction Collision */
266         MGMT_STATUS_FAILED,             /* Reserved for future use */
267         MGMT_STATUS_INVALID_PARAMS,     /* Unacceptable Parameter */
268         MGMT_STATUS_REJECTED,           /* QoS Rejected */
269         MGMT_STATUS_NOT_SUPPORTED,      /* Classification Not Supported */
270         MGMT_STATUS_REJECTED,           /* Insufficient Security */
271         MGMT_STATUS_INVALID_PARAMS,     /* Parameter Out Of Range */
272         MGMT_STATUS_FAILED,             /* Reserved for future use */
273         MGMT_STATUS_BUSY,               /* Role Switch Pending */
274         MGMT_STATUS_FAILED,             /* Reserved for future use */
275         MGMT_STATUS_FAILED,             /* Slot Violation */
276         MGMT_STATUS_FAILED,             /* Role Switch Failed */
277         MGMT_STATUS_INVALID_PARAMS,     /* EIR Too Large */
278         MGMT_STATUS_NOT_SUPPORTED,      /* Simple Pairing Not Supported */
279         MGMT_STATUS_BUSY,               /* Host Busy Pairing */
280         MGMT_STATUS_REJECTED,           /* Rejected, No Suitable Channel */
281         MGMT_STATUS_BUSY,               /* Controller Busy */
282         MGMT_STATUS_INVALID_PARAMS,     /* Unsuitable Connection Interval */
283         MGMT_STATUS_TIMEOUT,            /* Directed Advertising Timeout */
284         MGMT_STATUS_AUTH_FAILED,        /* Terminated Due to MIC Failure */
285         MGMT_STATUS_CONNECT_FAILED,     /* Connection Establishment Failed */
286         MGMT_STATUS_CONNECT_FAILED,     /* MAC Connection Failed */
287 };
288
289 static u8 mgmt_errno_status(int err)
290 {
291         switch (err) {
292         case 0:
293                 return MGMT_STATUS_SUCCESS;
294         case -EPERM:
295                 return MGMT_STATUS_REJECTED;
296         case -EINVAL:
297                 return MGMT_STATUS_INVALID_PARAMS;
298         case -EOPNOTSUPP:
299                 return MGMT_STATUS_NOT_SUPPORTED;
300         case -EBUSY:
301                 return MGMT_STATUS_BUSY;
302         case -ETIMEDOUT:
303                 return MGMT_STATUS_AUTH_FAILED;
304         case -ENOMEM:
305                 return MGMT_STATUS_NO_RESOURCES;
306         case -EISCONN:
307                 return MGMT_STATUS_ALREADY_CONNECTED;
308         case -ENOTCONN:
309                 return MGMT_STATUS_DISCONNECTED;
310         }
311
312         return MGMT_STATUS_FAILED;
313 }
314
315 static u8 mgmt_status(int err)
316 {
317         if (err < 0)
318                 return mgmt_errno_status(err);
319
320         if (err < ARRAY_SIZE(mgmt_status_table))
321                 return mgmt_status_table[err];
322
323         return MGMT_STATUS_FAILED;
324 }
325
326 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
327                             u16 len, int flag)
328 {
329         return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
330                                flag, NULL);
331 }
332
333 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
334                               u16 len, int flag, struct sock *skip_sk)
335 {
336         return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
337                                flag, skip_sk);
338 }
339
340 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
341                       struct sock *skip_sk)
342 {
343         return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
344                                HCI_SOCK_TRUSTED, skip_sk);
345 }
346
347 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
348 {
349         return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
350                                    skip_sk);
351 }
352
353 static u8 le_addr_type(u8 mgmt_addr_type)
354 {
355         if (mgmt_addr_type == BDADDR_LE_PUBLIC)
356                 return ADDR_LE_DEV_PUBLIC;
357         else
358                 return ADDR_LE_DEV_RANDOM;
359 }
360
361 void mgmt_fill_version_info(void *ver)
362 {
363         struct mgmt_rp_read_version *rp = ver;
364
365         rp->version = MGMT_VERSION;
366         rp->revision = cpu_to_le16(MGMT_REVISION);
367 }
368
369 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
370                         u16 data_len)
371 {
372         struct mgmt_rp_read_version rp;
373
374         bt_dev_dbg(hdev, "sock %p", sk);
375
376         mgmt_fill_version_info(&rp);
377
378         return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
379                                  &rp, sizeof(rp));
380 }
381
382 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
383                          u16 data_len)
384 {
385         struct mgmt_rp_read_commands *rp;
386         u16 num_commands, num_events;
387         size_t rp_size;
388         int i, err;
389
390         bt_dev_dbg(hdev, "sock %p", sk);
391
392         if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
393                 num_commands = ARRAY_SIZE(mgmt_commands);
394                 num_events = ARRAY_SIZE(mgmt_events);
395         } else {
396                 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
397                 num_events = ARRAY_SIZE(mgmt_untrusted_events);
398         }
399
400         rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
401
402         rp = kmalloc(rp_size, GFP_KERNEL);
403         if (!rp)
404                 return -ENOMEM;
405
406         rp->num_commands = cpu_to_le16(num_commands);
407         rp->num_events = cpu_to_le16(num_events);
408
409         if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
410                 __le16 *opcode = rp->opcodes;
411
412                 for (i = 0; i < num_commands; i++, opcode++)
413                         put_unaligned_le16(mgmt_commands[i], opcode);
414
415                 for (i = 0; i < num_events; i++, opcode++)
416                         put_unaligned_le16(mgmt_events[i], opcode);
417         } else {
418                 __le16 *opcode = rp->opcodes;
419
420                 for (i = 0; i < num_commands; i++, opcode++)
421                         put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
422
423                 for (i = 0; i < num_events; i++, opcode++)
424                         put_unaligned_le16(mgmt_untrusted_events[i], opcode);
425         }
426
427         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
428                                 rp, rp_size);
429         kfree(rp);
430
431         return err;
432 }
433
434 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
435                            u16 data_len)
436 {
437         struct mgmt_rp_read_index_list *rp;
438         struct hci_dev *d;
439         size_t rp_len;
440         u16 count;
441         int err;
442
443         bt_dev_dbg(hdev, "sock %p", sk);
444
445         read_lock(&hci_dev_list_lock);
446
447         count = 0;
448         list_for_each_entry(d, &hci_dev_list, list) {
449                 if (d->dev_type == HCI_PRIMARY &&
450                     !hci_dev_test_flag(d, HCI_UNCONFIGURED))
451                         count++;
452         }
453
454         rp_len = sizeof(*rp) + (2 * count);
455         rp = kmalloc(rp_len, GFP_ATOMIC);
456         if (!rp) {
457                 read_unlock(&hci_dev_list_lock);
458                 return -ENOMEM;
459         }
460
461         count = 0;
462         list_for_each_entry(d, &hci_dev_list, list) {
463                 if (hci_dev_test_flag(d, HCI_SETUP) ||
464                     hci_dev_test_flag(d, HCI_CONFIG) ||
465                     hci_dev_test_flag(d, HCI_USER_CHANNEL))
466                         continue;
467
468                 /* Devices marked as raw-only are neither configured
469                  * nor unconfigured controllers.
470                  */
471                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
472                         continue;
473
474                 if (d->dev_type == HCI_PRIMARY &&
475                     !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
476                         rp->index[count++] = cpu_to_le16(d->id);
477                         bt_dev_dbg(hdev, "Added hci%u", d->id);
478                 }
479         }
480
481         rp->num_controllers = cpu_to_le16(count);
482         rp_len = sizeof(*rp) + (2 * count);
483
484         read_unlock(&hci_dev_list_lock);
485
486         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
487                                 0, rp, rp_len);
488
489         kfree(rp);
490
491         return err;
492 }
493
494 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
495                                   void *data, u16 data_len)
496 {
497         struct mgmt_rp_read_unconf_index_list *rp;
498         struct hci_dev *d;
499         size_t rp_len;
500         u16 count;
501         int err;
502
503         bt_dev_dbg(hdev, "sock %p", sk);
504
505         read_lock(&hci_dev_list_lock);
506
507         count = 0;
508         list_for_each_entry(d, &hci_dev_list, list) {
509                 if (d->dev_type == HCI_PRIMARY &&
510                     hci_dev_test_flag(d, HCI_UNCONFIGURED))
511                         count++;
512         }
513
514         rp_len = sizeof(*rp) + (2 * count);
515         rp = kmalloc(rp_len, GFP_ATOMIC);
516         if (!rp) {
517                 read_unlock(&hci_dev_list_lock);
518                 return -ENOMEM;
519         }
520
521         count = 0;
522         list_for_each_entry(d, &hci_dev_list, list) {
523                 if (hci_dev_test_flag(d, HCI_SETUP) ||
524                     hci_dev_test_flag(d, HCI_CONFIG) ||
525                     hci_dev_test_flag(d, HCI_USER_CHANNEL))
526                         continue;
527
528                 /* Devices marked as raw-only are neither configured
529                  * nor unconfigured controllers.
530                  */
531                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
532                         continue;
533
534                 if (d->dev_type == HCI_PRIMARY &&
535                     hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
536                         rp->index[count++] = cpu_to_le16(d->id);
537                         bt_dev_dbg(hdev, "Added hci%u", d->id);
538                 }
539         }
540
541         rp->num_controllers = cpu_to_le16(count);
542         rp_len = sizeof(*rp) + (2 * count);
543
544         read_unlock(&hci_dev_list_lock);
545
546         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
547                                 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
548
549         kfree(rp);
550
551         return err;
552 }
553
554 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
555                                void *data, u16 data_len)
556 {
557         struct mgmt_rp_read_ext_index_list *rp;
558         struct hci_dev *d;
559         u16 count;
560         int err;
561
562         bt_dev_dbg(hdev, "sock %p", sk);
563
564         read_lock(&hci_dev_list_lock);
565
566         count = 0;
567         list_for_each_entry(d, &hci_dev_list, list) {
568                 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
569                         count++;
570         }
571
572         rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
573         if (!rp) {
574                 read_unlock(&hci_dev_list_lock);
575                 return -ENOMEM;
576         }
577
578         count = 0;
579         list_for_each_entry(d, &hci_dev_list, list) {
580                 if (hci_dev_test_flag(d, HCI_SETUP) ||
581                     hci_dev_test_flag(d, HCI_CONFIG) ||
582                     hci_dev_test_flag(d, HCI_USER_CHANNEL))
583                         continue;
584
585                 /* Devices marked as raw-only are neither configured
586                  * nor unconfigured controllers.
587                  */
588                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
589                         continue;
590
591                 if (d->dev_type == HCI_PRIMARY) {
592                         if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
593                                 rp->entry[count].type = 0x01;
594                         else
595                                 rp->entry[count].type = 0x00;
596                 } else if (d->dev_type == HCI_AMP) {
597                         rp->entry[count].type = 0x02;
598                 } else {
599                         continue;
600                 }
601
602                 rp->entry[count].bus = d->bus;
603                 rp->entry[count++].index = cpu_to_le16(d->id);
604                 bt_dev_dbg(hdev, "Added hci%u", d->id);
605         }
606
607         rp->num_controllers = cpu_to_le16(count);
608
609         read_unlock(&hci_dev_list_lock);
610
611         /* If this command is called at least once, then all the
612          * default index and unconfigured index events are disabled
613          * and from now on only extended index events are used.
614          */
615         hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
616         hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
617         hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
618
619         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
620                                 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
621                                 struct_size(rp, entry, count));
622
623         kfree(rp);
624
625         return err;
626 }
627
628 static bool is_configured(struct hci_dev *hdev)
629 {
630         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
631             !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
632                 return false;
633
634         if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
635              test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
636             !bacmp(&hdev->public_addr, BDADDR_ANY))
637                 return false;
638
639         return true;
640 }
641
642 static __le32 get_missing_options(struct hci_dev *hdev)
643 {
644         u32 options = 0;
645
646         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
647             !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
648                 options |= MGMT_OPTION_EXTERNAL_CONFIG;
649
650         if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
651              test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
652             !bacmp(&hdev->public_addr, BDADDR_ANY))
653                 options |= MGMT_OPTION_PUBLIC_ADDRESS;
654
655         return cpu_to_le32(options);
656 }
657
658 static int new_options(struct hci_dev *hdev, struct sock *skip)
659 {
660         __le32 options = get_missing_options(hdev);
661
662         return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
663                                   sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
664 }
665
666 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
667 {
668         __le32 options = get_missing_options(hdev);
669
670         return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
671                                  sizeof(options));
672 }
673
674 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
675                             void *data, u16 data_len)
676 {
677         struct mgmt_rp_read_config_info rp;
678         u32 options = 0;
679
680         bt_dev_dbg(hdev, "sock %p", sk);
681
682         hci_dev_lock(hdev);
683
684         memset(&rp, 0, sizeof(rp));
685         rp.manufacturer = cpu_to_le16(hdev->manufacturer);
686
687         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
688                 options |= MGMT_OPTION_EXTERNAL_CONFIG;
689
690         if (hdev->set_bdaddr)
691                 options |= MGMT_OPTION_PUBLIC_ADDRESS;
692
693         rp.supported_options = cpu_to_le32(options);
694         rp.missing_options = get_missing_options(hdev);
695
696         hci_dev_unlock(hdev);
697
698         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
699                                  &rp, sizeof(rp));
700 }
701
702 static u32 get_supported_phys(struct hci_dev *hdev)
703 {
704         u32 supported_phys = 0;
705
706         if (lmp_bredr_capable(hdev)) {
707                 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
708
709                 if (hdev->features[0][0] & LMP_3SLOT)
710                         supported_phys |= MGMT_PHY_BR_1M_3SLOT;
711
712                 if (hdev->features[0][0] & LMP_5SLOT)
713                         supported_phys |= MGMT_PHY_BR_1M_5SLOT;
714
715                 if (lmp_edr_2m_capable(hdev)) {
716                         supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
717
718                         if (lmp_edr_3slot_capable(hdev))
719                                 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
720
721                         if (lmp_edr_5slot_capable(hdev))
722                                 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
723
724                         if (lmp_edr_3m_capable(hdev)) {
725                                 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
726
727                                 if (lmp_edr_3slot_capable(hdev))
728                                         supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
729
730                                 if (lmp_edr_5slot_capable(hdev))
731                                         supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
732                         }
733                 }
734         }
735
736         if (lmp_le_capable(hdev)) {
737                 supported_phys |= MGMT_PHY_LE_1M_TX;
738                 supported_phys |= MGMT_PHY_LE_1M_RX;
739
740                 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
741                         supported_phys |= MGMT_PHY_LE_2M_TX;
742                         supported_phys |= MGMT_PHY_LE_2M_RX;
743                 }
744
745                 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
746                         supported_phys |= MGMT_PHY_LE_CODED_TX;
747                         supported_phys |= MGMT_PHY_LE_CODED_RX;
748                 }
749         }
750
751         return supported_phys;
752 }
753
754 static u32 get_selected_phys(struct hci_dev *hdev)
755 {
756         u32 selected_phys = 0;
757
758         if (lmp_bredr_capable(hdev)) {
759                 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
760
761                 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
762                         selected_phys |= MGMT_PHY_BR_1M_3SLOT;
763
764                 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
765                         selected_phys |= MGMT_PHY_BR_1M_5SLOT;
766
767                 if (lmp_edr_2m_capable(hdev)) {
768                         if (!(hdev->pkt_type & HCI_2DH1))
769                                 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
770
771                         if (lmp_edr_3slot_capable(hdev) &&
772                             !(hdev->pkt_type & HCI_2DH3))
773                                 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
774
775                         if (lmp_edr_5slot_capable(hdev) &&
776                             !(hdev->pkt_type & HCI_2DH5))
777                                 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
778
779                         if (lmp_edr_3m_capable(hdev)) {
780                                 if (!(hdev->pkt_type & HCI_3DH1))
781                                         selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
782
783                                 if (lmp_edr_3slot_capable(hdev) &&
784                                     !(hdev->pkt_type & HCI_3DH3))
785                                         selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
786
787                                 if (lmp_edr_5slot_capable(hdev) &&
788                                     !(hdev->pkt_type & HCI_3DH5))
789                                         selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
790                         }
791                 }
792         }
793
794         if (lmp_le_capable(hdev)) {
795                 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
796                         selected_phys |= MGMT_PHY_LE_1M_TX;
797
798                 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
799                         selected_phys |= MGMT_PHY_LE_1M_RX;
800
801                 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
802                         selected_phys |= MGMT_PHY_LE_2M_TX;
803
804                 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
805                         selected_phys |= MGMT_PHY_LE_2M_RX;
806
807                 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
808                         selected_phys |= MGMT_PHY_LE_CODED_TX;
809
810                 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
811                         selected_phys |= MGMT_PHY_LE_CODED_RX;
812         }
813
814         return selected_phys;
815 }
816
817 static u32 get_configurable_phys(struct hci_dev *hdev)
818 {
819         return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
820                 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
821 }
822
823 static u32 get_supported_settings(struct hci_dev *hdev)
824 {
825         u32 settings = 0;
826
827         settings |= MGMT_SETTING_POWERED;
828         settings |= MGMT_SETTING_BONDABLE;
829         settings |= MGMT_SETTING_DEBUG_KEYS;
830         settings |= MGMT_SETTING_CONNECTABLE;
831         settings |= MGMT_SETTING_DISCOVERABLE;
832
833         if (lmp_bredr_capable(hdev)) {
834                 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
835                         settings |= MGMT_SETTING_FAST_CONNECTABLE;
836                 settings |= MGMT_SETTING_BREDR;
837                 settings |= MGMT_SETTING_LINK_SECURITY;
838
839                 if (lmp_ssp_capable(hdev)) {
840                         settings |= MGMT_SETTING_SSP;
841                         if (IS_ENABLED(CONFIG_BT_HS))
842                                 settings |= MGMT_SETTING_HS;
843                 }
844
845                 if (lmp_sc_capable(hdev))
846                         settings |= MGMT_SETTING_SECURE_CONN;
847
848                 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
849                              &hdev->quirks))
850                         settings |= MGMT_SETTING_WIDEBAND_SPEECH;
851         }
852
853         if (lmp_le_capable(hdev)) {
854                 settings |= MGMT_SETTING_LE;
855                 settings |= MGMT_SETTING_SECURE_CONN;
856                 settings |= MGMT_SETTING_PRIVACY;
857                 settings |= MGMT_SETTING_STATIC_ADDRESS;
858                 settings |= MGMT_SETTING_ADVERTISING;
859         }
860
861         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
862             hdev->set_bdaddr)
863                 settings |= MGMT_SETTING_CONFIGURATION;
864
865         if (cis_central_capable(hdev))
866                 settings |= MGMT_SETTING_CIS_CENTRAL;
867
868         if (cis_peripheral_capable(hdev))
869                 settings |= MGMT_SETTING_CIS_PERIPHERAL;
870
871         settings |= MGMT_SETTING_PHY_CONFIGURATION;
872
873         return settings;
874 }
875
876 static u32 get_current_settings(struct hci_dev *hdev)
877 {
878         u32 settings = 0;
879
880         if (hdev_is_powered(hdev))
881                 settings |= MGMT_SETTING_POWERED;
882
883         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
884                 settings |= MGMT_SETTING_CONNECTABLE;
885
886         if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
887                 settings |= MGMT_SETTING_FAST_CONNECTABLE;
888
889         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
890                 settings |= MGMT_SETTING_DISCOVERABLE;
891
892         if (hci_dev_test_flag(hdev, HCI_BONDABLE))
893                 settings |= MGMT_SETTING_BONDABLE;
894
895         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
896                 settings |= MGMT_SETTING_BREDR;
897
898         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
899                 settings |= MGMT_SETTING_LE;
900
901         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
902                 settings |= MGMT_SETTING_LINK_SECURITY;
903
904         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
905                 settings |= MGMT_SETTING_SSP;
906
907         if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
908                 settings |= MGMT_SETTING_HS;
909
910         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
911                 settings |= MGMT_SETTING_ADVERTISING;
912
913         if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
914                 settings |= MGMT_SETTING_SECURE_CONN;
915
916         if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
917                 settings |= MGMT_SETTING_DEBUG_KEYS;
918
919         if (hci_dev_test_flag(hdev, HCI_PRIVACY))
920                 settings |= MGMT_SETTING_PRIVACY;
921
922         /* The current setting for static address has two purposes. The
923          * first is to indicate if the static address will be used and
924          * the second is to indicate if it is actually set.
925          *
926          * This means if the static address is not configured, this flag
927          * will never be set. If the address is configured, then if the
928          * address is actually used decides if the flag is set or not.
929          *
930          * For single mode LE only controllers and dual-mode controllers
931          * with BR/EDR disabled, the existence of the static address will
932          * be evaluated.
933          */
934         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
935             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
936             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
937                 if (bacmp(&hdev->static_addr, BDADDR_ANY))
938                         settings |= MGMT_SETTING_STATIC_ADDRESS;
939         }
940
941         if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
942                 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
943
944         if (cis_central_capable(hdev))
945                 settings |= MGMT_SETTING_CIS_CENTRAL;
946
947         if (cis_peripheral_capable(hdev))
948                 settings |= MGMT_SETTING_CIS_PERIPHERAL;
949
950         return settings;
951 }
952
953 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
954 {
955         return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
956 }
957
958 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
959 {
960         struct mgmt_pending_cmd *cmd;
961
962         /* If there's a pending mgmt command the flags will not yet have
963          * their final values, so check for this first.
964          */
965         cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
966         if (cmd) {
967                 struct mgmt_mode *cp = cmd->param;
968                 if (cp->val == 0x01)
969                         return LE_AD_GENERAL;
970                 else if (cp->val == 0x02)
971                         return LE_AD_LIMITED;
972         } else {
973                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
974                         return LE_AD_LIMITED;
975                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
976                         return LE_AD_GENERAL;
977         }
978
979         return 0;
980 }
981
982 bool mgmt_get_connectable(struct hci_dev *hdev)
983 {
984         struct mgmt_pending_cmd *cmd;
985
986         /* If there's a pending mgmt command the flag will not yet have
987          * it's final value, so check for this first.
988          */
989         cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
990         if (cmd) {
991                 struct mgmt_mode *cp = cmd->param;
992
993                 return cp->val;
994         }
995
996         return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
997 }
998
999 static int service_cache_sync(struct hci_dev *hdev, void *data)
1000 {
1001         hci_update_eir_sync(hdev);
1002         hci_update_class_sync(hdev);
1003
1004         return 0;
1005 }
1006
1007 static void service_cache_off(struct work_struct *work)
1008 {
1009         struct hci_dev *hdev = container_of(work, struct hci_dev,
1010                                             service_cache.work);
1011
1012         if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1013                 return;
1014
1015         hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1016 }
1017
1018 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1019 {
1020         /* The generation of a new RPA and programming it into the
1021          * controller happens in the hci_req_enable_advertising()
1022          * function.
1023          */
1024         if (ext_adv_capable(hdev))
1025                 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1026         else
1027                 return hci_enable_advertising_sync(hdev);
1028 }
1029
1030 static void rpa_expired(struct work_struct *work)
1031 {
1032         struct hci_dev *hdev = container_of(work, struct hci_dev,
1033                                             rpa_expired.work);
1034
1035         bt_dev_dbg(hdev, "");
1036
1037         hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1038
1039         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1040                 return;
1041
1042         hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1043 }
1044
1045 static void discov_off(struct work_struct *work)
1046 {
1047         struct hci_dev *hdev = container_of(work, struct hci_dev,
1048                                             discov_off.work);
1049
1050         bt_dev_dbg(hdev, "");
1051
1052         hci_dev_lock(hdev);
1053
1054         /* When discoverable timeout triggers, then just make sure
1055          * the limited discoverable flag is cleared. Even in the case
1056          * of a timeout triggered from general discoverable, it is
1057          * safe to unconditionally clear the flag.
1058          */
1059         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1060         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1061         hdev->discov_timeout = 0;
1062
1063         hci_update_discoverable(hdev);
1064
1065         mgmt_new_settings(hdev);
1066
1067         hci_dev_unlock(hdev);
1068 }
1069
1070 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1071
1072 static void mesh_send_complete(struct hci_dev *hdev,
1073                                struct mgmt_mesh_tx *mesh_tx, bool silent)
1074 {
1075         u8 handle = mesh_tx->handle;
1076
1077         if (!silent)
1078                 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1079                            sizeof(handle), NULL);
1080
1081         mgmt_mesh_remove(mesh_tx);
1082 }
1083
1084 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1085 {
1086         struct mgmt_mesh_tx *mesh_tx;
1087
1088         hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1089         hci_disable_advertising_sync(hdev);
1090         mesh_tx = mgmt_mesh_next(hdev, NULL);
1091
1092         if (mesh_tx)
1093                 mesh_send_complete(hdev, mesh_tx, false);
1094
1095         return 0;
1096 }
1097
1098 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1099 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1100 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1101 {
1102         struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1103
1104         if (!mesh_tx)
1105                 return;
1106
1107         err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1108                                  mesh_send_start_complete);
1109
1110         if (err < 0)
1111                 mesh_send_complete(hdev, mesh_tx, false);
1112         else
1113                 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1114 }
1115
1116 static void mesh_send_done(struct work_struct *work)
1117 {
1118         struct hci_dev *hdev = container_of(work, struct hci_dev,
1119                                             mesh_send_done.work);
1120
1121         if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1122                 return;
1123
1124         hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1125 }
1126
1127 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1128 {
1129         if (hci_dev_test_flag(hdev, HCI_MGMT))
1130                 return;
1131
1132         BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1133
1134         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1135         INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1136         INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1137         INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1138
1139         /* Non-mgmt controlled devices get this bit set
1140          * implicitly so that pairing works for them, however
1141          * for mgmt we require user-space to explicitly enable
1142          * it
1143          */
1144         hci_dev_clear_flag(hdev, HCI_BONDABLE);
1145
1146         hci_dev_set_flag(hdev, HCI_MGMT);
1147 }
1148
1149 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1150                                 void *data, u16 data_len)
1151 {
1152         struct mgmt_rp_read_info rp;
1153
1154         bt_dev_dbg(hdev, "sock %p", sk);
1155
1156         hci_dev_lock(hdev);
1157
1158         memset(&rp, 0, sizeof(rp));
1159
1160         bacpy(&rp.bdaddr, &hdev->bdaddr);
1161
1162         rp.version = hdev->hci_ver;
1163         rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1164
1165         rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1166         rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1167
1168         memcpy(rp.dev_class, hdev->dev_class, 3);
1169
1170         memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1171         memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1172
1173         hci_dev_unlock(hdev);
1174
1175         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1176                                  sizeof(rp));
1177 }
1178
1179 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1180 {
1181         u16 eir_len = 0;
1182         size_t name_len;
1183
1184         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1185                 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1186                                           hdev->dev_class, 3);
1187
1188         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1189                 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1190                                           hdev->appearance);
1191
1192         name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1193         eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1194                                   hdev->dev_name, name_len);
1195
1196         name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1197         eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1198                                   hdev->short_name, name_len);
1199
1200         return eir_len;
1201 }
1202
1203 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1204                                     void *data, u16 data_len)
1205 {
1206         char buf[512];
1207         struct mgmt_rp_read_ext_info *rp = (void *)buf;
1208         u16 eir_len;
1209
1210         bt_dev_dbg(hdev, "sock %p", sk);
1211
1212         memset(&buf, 0, sizeof(buf));
1213
1214         hci_dev_lock(hdev);
1215
1216         bacpy(&rp->bdaddr, &hdev->bdaddr);
1217
1218         rp->version = hdev->hci_ver;
1219         rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1220
1221         rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1222         rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1223
1224
1225         eir_len = append_eir_data_to_buf(hdev, rp->eir);
1226         rp->eir_len = cpu_to_le16(eir_len);
1227
1228         hci_dev_unlock(hdev);
1229
1230         /* If this command is called at least once, then the events
1231          * for class of device and local name changes are disabled
1232          * and only the new extended controller information event
1233          * is used.
1234          */
1235         hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1236         hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1237         hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1238
1239         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1240                                  sizeof(*rp) + eir_len);
1241 }
1242
1243 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1244 {
1245         char buf[512];
1246         struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1247         u16 eir_len;
1248
1249         memset(buf, 0, sizeof(buf));
1250
1251         eir_len = append_eir_data_to_buf(hdev, ev->eir);
1252         ev->eir_len = cpu_to_le16(eir_len);
1253
1254         return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1255                                   sizeof(*ev) + eir_len,
1256                                   HCI_MGMT_EXT_INFO_EVENTS, skip);
1257 }
1258
1259 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1260 {
1261         __le32 settings = cpu_to_le32(get_current_settings(hdev));
1262
1263         return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1264                                  sizeof(settings));
1265 }
1266
1267 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1268 {
1269         struct mgmt_ev_advertising_added ev;
1270
1271         ev.instance = instance;
1272
1273         mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1274 }
1275
1276 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1277                               u8 instance)
1278 {
1279         struct mgmt_ev_advertising_removed ev;
1280
1281         ev.instance = instance;
1282
1283         mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1284 }
1285
1286 static void cancel_adv_timeout(struct hci_dev *hdev)
1287 {
1288         if (hdev->adv_instance_timeout) {
1289                 hdev->adv_instance_timeout = 0;
1290                 cancel_delayed_work(&hdev->adv_instance_expire);
1291         }
1292 }
1293
1294 /* This function requires the caller holds hdev->lock */
1295 static void restart_le_actions(struct hci_dev *hdev)
1296 {
1297         struct hci_conn_params *p;
1298
1299         list_for_each_entry(p, &hdev->le_conn_params, list) {
1300                 /* Needed for AUTO_OFF case where might not "really"
1301                  * have been powered off.
1302                  */
1303                 list_del_init(&p->action);
1304
1305                 switch (p->auto_connect) {
1306                 case HCI_AUTO_CONN_DIRECT:
1307                 case HCI_AUTO_CONN_ALWAYS:
1308                         list_add(&p->action, &hdev->pend_le_conns);
1309                         break;
1310                 case HCI_AUTO_CONN_REPORT:
1311                         list_add(&p->action, &hdev->pend_le_reports);
1312                         break;
1313                 default:
1314                         break;
1315                 }
1316         }
1317 }
1318
1319 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1320 {
1321         __le32 ev = cpu_to_le32(get_current_settings(hdev));
1322
1323         return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1324                                   sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1325 }
1326
1327 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1328 {
1329         struct mgmt_pending_cmd *cmd = data;
1330         struct mgmt_mode *cp;
1331
1332         /* Make sure cmd still outstanding. */
1333         if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1334                 return;
1335
1336         cp = cmd->param;
1337
1338         bt_dev_dbg(hdev, "err %d", err);
1339
1340         if (!err) {
1341                 if (cp->val) {
1342                         hci_dev_lock(hdev);
1343                         restart_le_actions(hdev);
1344                         hci_update_passive_scan(hdev);
1345                         hci_dev_unlock(hdev);
1346                 }
1347
1348                 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1349
1350                 /* Only call new_setting for power on as power off is deferred
1351                  * to hdev->power_off work which does call hci_dev_do_close.
1352                  */
1353                 if (cp->val)
1354                         new_settings(hdev, cmd->sk);
1355         } else {
1356                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1357                                 mgmt_status(err));
1358         }
1359
1360         mgmt_pending_remove(cmd);
1361 }
1362
1363 static int set_powered_sync(struct hci_dev *hdev, void *data)
1364 {
1365         struct mgmt_pending_cmd *cmd = data;
1366         struct mgmt_mode *cp = cmd->param;
1367
1368         BT_DBG("%s", hdev->name);
1369
1370         return hci_set_powered_sync(hdev, cp->val);
1371 }
1372
1373 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1374                        u16 len)
1375 {
1376         struct mgmt_mode *cp = data;
1377         struct mgmt_pending_cmd *cmd;
1378         int err;
1379
1380         bt_dev_dbg(hdev, "sock %p", sk);
1381
1382         if (cp->val != 0x00 && cp->val != 0x01)
1383                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1384                                        MGMT_STATUS_INVALID_PARAMS);
1385
1386         hci_dev_lock(hdev);
1387
1388         if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1389                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1390                                       MGMT_STATUS_BUSY);
1391                 goto failed;
1392         }
1393
1394         if (!!cp->val == hdev_is_powered(hdev)) {
1395                 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1396                 goto failed;
1397         }
1398
1399         cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1400         if (!cmd) {
1401                 err = -ENOMEM;
1402                 goto failed;
1403         }
1404
1405         err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1406                                  mgmt_set_powered_complete);
1407
1408         if (err < 0)
1409                 mgmt_pending_remove(cmd);
1410
1411 failed:
1412         hci_dev_unlock(hdev);
1413         return err;
1414 }
1415
1416 int mgmt_new_settings(struct hci_dev *hdev)
1417 {
1418         return new_settings(hdev, NULL);
1419 }
1420
1421 struct cmd_lookup {
1422         struct sock *sk;
1423         struct hci_dev *hdev;
1424         u8 mgmt_status;
1425 };
1426
1427 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1428 {
1429         struct cmd_lookup *match = data;
1430
1431         send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1432
1433         list_del(&cmd->list);
1434
1435         if (match->sk == NULL) {
1436                 match->sk = cmd->sk;
1437                 sock_hold(match->sk);
1438         }
1439
1440         mgmt_pending_free(cmd);
1441 }
1442
1443 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1444 {
1445         u8 *status = data;
1446
1447         mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1448         mgmt_pending_remove(cmd);
1449 }
1450
1451 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1452 {
1453         if (cmd->cmd_complete) {
1454                 u8 *status = data;
1455
1456                 cmd->cmd_complete(cmd, *status);
1457                 mgmt_pending_remove(cmd);
1458
1459                 return;
1460         }
1461
1462         cmd_status_rsp(cmd, data);
1463 }
1464
1465 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1466 {
1467         return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1468                                  cmd->param, cmd->param_len);
1469 }
1470
1471 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1472 {
1473         return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1474                                  cmd->param, sizeof(struct mgmt_addr_info));
1475 }
1476
1477 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1478 {
1479         if (!lmp_bredr_capable(hdev))
1480                 return MGMT_STATUS_NOT_SUPPORTED;
1481         else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1482                 return MGMT_STATUS_REJECTED;
1483         else
1484                 return MGMT_STATUS_SUCCESS;
1485 }
1486
1487 static u8 mgmt_le_support(struct hci_dev *hdev)
1488 {
1489         if (!lmp_le_capable(hdev))
1490                 return MGMT_STATUS_NOT_SUPPORTED;
1491         else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1492                 return MGMT_STATUS_REJECTED;
1493         else
1494                 return MGMT_STATUS_SUCCESS;
1495 }
1496
1497 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1498                                            int err)
1499 {
1500         struct mgmt_pending_cmd *cmd = data;
1501
1502         bt_dev_dbg(hdev, "err %d", err);
1503
1504         /* Make sure cmd still outstanding. */
1505         if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1506                 return;
1507
1508         hci_dev_lock(hdev);
1509
1510         if (err) {
1511                 u8 mgmt_err = mgmt_status(err);
1512                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1513                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1514                 goto done;
1515         }
1516
1517         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1518             hdev->discov_timeout > 0) {
1519                 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1520                 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1521         }
1522
1523         send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1524         new_settings(hdev, cmd->sk);
1525
1526 done:
1527         mgmt_pending_remove(cmd);
1528         hci_dev_unlock(hdev);
1529 }
1530
1531 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1532 {
1533         BT_DBG("%s", hdev->name);
1534
1535         return hci_update_discoverable_sync(hdev);
1536 }
1537
1538 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1539                             u16 len)
1540 {
1541         struct mgmt_cp_set_discoverable *cp = data;
1542         struct mgmt_pending_cmd *cmd;
1543         u16 timeout;
1544         int err;
1545
1546         bt_dev_dbg(hdev, "sock %p", sk);
1547
1548         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1549             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1550                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1551                                        MGMT_STATUS_REJECTED);
1552
1553         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1554                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1555                                        MGMT_STATUS_INVALID_PARAMS);
1556
1557         timeout = __le16_to_cpu(cp->timeout);
1558
1559         /* Disabling discoverable requires that no timeout is set,
1560          * and enabling limited discoverable requires a timeout.
1561          */
1562         if ((cp->val == 0x00 && timeout > 0) ||
1563             (cp->val == 0x02 && timeout == 0))
1564                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1565                                        MGMT_STATUS_INVALID_PARAMS);
1566
1567         hci_dev_lock(hdev);
1568
1569         if (!hdev_is_powered(hdev) && timeout > 0) {
1570                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1571                                       MGMT_STATUS_NOT_POWERED);
1572                 goto failed;
1573         }
1574
1575         if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1576             pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1577                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1578                                       MGMT_STATUS_BUSY);
1579                 goto failed;
1580         }
1581
1582         if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1583                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1584                                       MGMT_STATUS_REJECTED);
1585                 goto failed;
1586         }
1587
1588         if (hdev->advertising_paused) {
1589                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1590                                       MGMT_STATUS_BUSY);
1591                 goto failed;
1592         }
1593
1594         if (!hdev_is_powered(hdev)) {
1595                 bool changed = false;
1596
1597                 /* Setting limited discoverable when powered off is
1598                  * not a valid operation since it requires a timeout
1599                  * and so no need to check HCI_LIMITED_DISCOVERABLE.
1600                  */
1601                 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1602                         hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1603                         changed = true;
1604                 }
1605
1606                 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1607                 if (err < 0)
1608                         goto failed;
1609
1610                 if (changed)
1611                         err = new_settings(hdev, sk);
1612
1613                 goto failed;
1614         }
1615
1616         /* If the current mode is the same, then just update the timeout
1617          * value with the new value. And if only the timeout gets updated,
1618          * then no need for any HCI transactions.
1619          */
1620         if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1621             (cp->val == 0x02) == hci_dev_test_flag(hdev,
1622                                                    HCI_LIMITED_DISCOVERABLE)) {
1623                 cancel_delayed_work(&hdev->discov_off);
1624                 hdev->discov_timeout = timeout;
1625
1626                 if (cp->val && hdev->discov_timeout > 0) {
1627                         int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1628                         queue_delayed_work(hdev->req_workqueue,
1629                                            &hdev->discov_off, to);
1630                 }
1631
1632                 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1633                 goto failed;
1634         }
1635
1636         cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1637         if (!cmd) {
1638                 err = -ENOMEM;
1639                 goto failed;
1640         }
1641
1642         /* Cancel any potential discoverable timeout that might be
1643          * still active and store new timeout value. The arming of
1644          * the timeout happens in the complete handler.
1645          */
1646         cancel_delayed_work(&hdev->discov_off);
1647         hdev->discov_timeout = timeout;
1648
1649         if (cp->val)
1650                 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1651         else
1652                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1653
1654         /* Limited discoverable mode */
1655         if (cp->val == 0x02)
1656                 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1657         else
1658                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1659
1660         err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1661                                  mgmt_set_discoverable_complete);
1662
1663         if (err < 0)
1664                 mgmt_pending_remove(cmd);
1665
1666 failed:
1667         hci_dev_unlock(hdev);
1668         return err;
1669 }
1670
1671 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1672                                           int err)
1673 {
1674         struct mgmt_pending_cmd *cmd = data;
1675
1676         bt_dev_dbg(hdev, "err %d", err);
1677
1678         /* Make sure cmd still outstanding. */
1679         if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1680                 return;
1681
1682         hci_dev_lock(hdev);
1683
1684         if (err) {
1685                 u8 mgmt_err = mgmt_status(err);
1686                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1687                 goto done;
1688         }
1689
1690         send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1691         new_settings(hdev, cmd->sk);
1692
1693 done:
1694         if (cmd)
1695                 mgmt_pending_remove(cmd);
1696
1697         hci_dev_unlock(hdev);
1698 }
1699
1700 static int set_connectable_update_settings(struct hci_dev *hdev,
1701                                            struct sock *sk, u8 val)
1702 {
1703         bool changed = false;
1704         int err;
1705
1706         if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1707                 changed = true;
1708
1709         if (val) {
1710                 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1711         } else {
1712                 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1713                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1714         }
1715
1716         err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1717         if (err < 0)
1718                 return err;
1719
1720         if (changed) {
1721                 hci_update_scan(hdev);
1722                 hci_update_passive_scan(hdev);
1723                 return new_settings(hdev, sk);
1724         }
1725
1726         return 0;
1727 }
1728
1729 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1730 {
1731         BT_DBG("%s", hdev->name);
1732
1733         return hci_update_connectable_sync(hdev);
1734 }
1735
1736 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1737                            u16 len)
1738 {
1739         struct mgmt_mode *cp = data;
1740         struct mgmt_pending_cmd *cmd;
1741         int err;
1742
1743         bt_dev_dbg(hdev, "sock %p", sk);
1744
1745         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1746             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1747                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1748                                        MGMT_STATUS_REJECTED);
1749
1750         if (cp->val != 0x00 && cp->val != 0x01)
1751                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1752                                        MGMT_STATUS_INVALID_PARAMS);
1753
1754         hci_dev_lock(hdev);
1755
1756         if (!hdev_is_powered(hdev)) {
1757                 err = set_connectable_update_settings(hdev, sk, cp->val);
1758                 goto failed;
1759         }
1760
1761         if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1762             pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1763                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1764                                       MGMT_STATUS_BUSY);
1765                 goto failed;
1766         }
1767
1768         cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1769         if (!cmd) {
1770                 err = -ENOMEM;
1771                 goto failed;
1772         }
1773
1774         if (cp->val) {
1775                 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1776         } else {
1777                 if (hdev->discov_timeout > 0)
1778                         cancel_delayed_work(&hdev->discov_off);
1779
1780                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1781                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1782                 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1783         }
1784
1785         err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1786                                  mgmt_set_connectable_complete);
1787
1788         if (err < 0)
1789                 mgmt_pending_remove(cmd);
1790
1791 failed:
1792         hci_dev_unlock(hdev);
1793         return err;
1794 }
1795
1796 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1797                         u16 len)
1798 {
1799         struct mgmt_mode *cp = data;
1800         bool changed;
1801         int err;
1802
1803         bt_dev_dbg(hdev, "sock %p", sk);
1804
1805         if (cp->val != 0x00 && cp->val != 0x01)
1806                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1807                                        MGMT_STATUS_INVALID_PARAMS);
1808
1809         hci_dev_lock(hdev);
1810
1811         if (cp->val)
1812                 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1813         else
1814                 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1815
1816         err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1817         if (err < 0)
1818                 goto unlock;
1819
1820         if (changed) {
1821                 /* In limited privacy mode the change of bondable mode
1822                  * may affect the local advertising address.
1823                  */
1824                 hci_update_discoverable(hdev);
1825
1826                 err = new_settings(hdev, sk);
1827         }
1828
1829 unlock:
1830         hci_dev_unlock(hdev);
1831         return err;
1832 }
1833
1834 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1835                              u16 len)
1836 {
1837         struct mgmt_mode *cp = data;
1838         struct mgmt_pending_cmd *cmd;
1839         u8 val, status;
1840         int err;
1841
1842         bt_dev_dbg(hdev, "sock %p", sk);
1843
1844         status = mgmt_bredr_support(hdev);
1845         if (status)
1846                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1847                                        status);
1848
1849         if (cp->val != 0x00 && cp->val != 0x01)
1850                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1851                                        MGMT_STATUS_INVALID_PARAMS);
1852
1853         hci_dev_lock(hdev);
1854
1855         if (!hdev_is_powered(hdev)) {
1856                 bool changed = false;
1857
1858                 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1859                         hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1860                         changed = true;
1861                 }
1862
1863                 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1864                 if (err < 0)
1865                         goto failed;
1866
1867                 if (changed)
1868                         err = new_settings(hdev, sk);
1869
1870                 goto failed;
1871         }
1872
1873         if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1874                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1875                                       MGMT_STATUS_BUSY);
1876                 goto failed;
1877         }
1878
1879         val = !!cp->val;
1880
1881         if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1882                 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1883                 goto failed;
1884         }
1885
1886         cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1887         if (!cmd) {
1888                 err = -ENOMEM;
1889                 goto failed;
1890         }
1891
1892         err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1893         if (err < 0) {
1894                 mgmt_pending_remove(cmd);
1895                 goto failed;
1896         }
1897
1898 failed:
1899         hci_dev_unlock(hdev);
1900         return err;
1901 }
1902
1903 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1904 {
1905         struct cmd_lookup match = { NULL, hdev };
1906         struct mgmt_pending_cmd *cmd = data;
1907         struct mgmt_mode *cp = cmd->param;
1908         u8 enable = cp->val;
1909         bool changed;
1910
1911         /* Make sure cmd still outstanding. */
1912         if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1913                 return;
1914
1915         if (err) {
1916                 u8 mgmt_err = mgmt_status(err);
1917
1918                 if (enable && hci_dev_test_and_clear_flag(hdev,
1919                                                           HCI_SSP_ENABLED)) {
1920                         hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1921                         new_settings(hdev, NULL);
1922                 }
1923
1924                 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1925                                      &mgmt_err);
1926                 return;
1927         }
1928
1929         if (enable) {
1930                 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1931         } else {
1932                 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1933
1934                 if (!changed)
1935                         changed = hci_dev_test_and_clear_flag(hdev,
1936                                                               HCI_HS_ENABLED);
1937                 else
1938                         hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1939         }
1940
1941         mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1942
1943         if (changed)
1944                 new_settings(hdev, match.sk);
1945
1946         if (match.sk)
1947                 sock_put(match.sk);
1948
1949         hci_update_eir_sync(hdev);
1950 }
1951
1952 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1953 {
1954         struct mgmt_pending_cmd *cmd = data;
1955         struct mgmt_mode *cp = cmd->param;
1956         bool changed = false;
1957         int err;
1958
1959         if (cp->val)
1960                 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1961
1962         err = hci_write_ssp_mode_sync(hdev, cp->val);
1963
1964         if (!err && changed)
1965                 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1966
1967         return err;
1968 }
1969
1970 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1971 {
1972         struct mgmt_mode *cp = data;
1973         struct mgmt_pending_cmd *cmd;
1974         u8 status;
1975         int err;
1976
1977         bt_dev_dbg(hdev, "sock %p", sk);
1978
1979         status = mgmt_bredr_support(hdev);
1980         if (status)
1981                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1982
1983         if (!lmp_ssp_capable(hdev))
1984                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1985                                        MGMT_STATUS_NOT_SUPPORTED);
1986
1987         if (cp->val != 0x00 && cp->val != 0x01)
1988                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1989                                        MGMT_STATUS_INVALID_PARAMS);
1990
1991         hci_dev_lock(hdev);
1992
1993         if (!hdev_is_powered(hdev)) {
1994                 bool changed;
1995
1996                 if (cp->val) {
1997                         changed = !hci_dev_test_and_set_flag(hdev,
1998                                                              HCI_SSP_ENABLED);
1999                 } else {
2000                         changed = hci_dev_test_and_clear_flag(hdev,
2001                                                               HCI_SSP_ENABLED);
2002                         if (!changed)
2003                                 changed = hci_dev_test_and_clear_flag(hdev,
2004                                                                       HCI_HS_ENABLED);
2005                         else
2006                                 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2007                 }
2008
2009                 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2010                 if (err < 0)
2011                         goto failed;
2012
2013                 if (changed)
2014                         err = new_settings(hdev, sk);
2015
2016                 goto failed;
2017         }
2018
2019         if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2020                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2021                                       MGMT_STATUS_BUSY);
2022                 goto failed;
2023         }
2024
2025         if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2026                 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2027                 goto failed;
2028         }
2029
2030         cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2031         if (!cmd)
2032                 err = -ENOMEM;
2033         else
2034                 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2035                                          set_ssp_complete);
2036
2037         if (err < 0) {
2038                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2039                                       MGMT_STATUS_FAILED);
2040
2041                 if (cmd)
2042                         mgmt_pending_remove(cmd);
2043         }
2044
2045 failed:
2046         hci_dev_unlock(hdev);
2047         return err;
2048 }
2049
2050 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2051 {
2052         struct mgmt_mode *cp = data;
2053         bool changed;
2054         u8 status;
2055         int err;
2056
2057         bt_dev_dbg(hdev, "sock %p", sk);
2058
2059         if (!IS_ENABLED(CONFIG_BT_HS))
2060                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2061                                        MGMT_STATUS_NOT_SUPPORTED);
2062
2063         status = mgmt_bredr_support(hdev);
2064         if (status)
2065                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2066
2067         if (!lmp_ssp_capable(hdev))
2068                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2069                                        MGMT_STATUS_NOT_SUPPORTED);
2070
2071         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2072                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2073                                        MGMT_STATUS_REJECTED);
2074
2075         if (cp->val != 0x00 && cp->val != 0x01)
2076                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2077                                        MGMT_STATUS_INVALID_PARAMS);
2078
2079         hci_dev_lock(hdev);
2080
2081         if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2082                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2083                                       MGMT_STATUS_BUSY);
2084                 goto unlock;
2085         }
2086
2087         if (cp->val) {
2088                 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2089         } else {
2090                 if (hdev_is_powered(hdev)) {
2091                         err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2092                                               MGMT_STATUS_REJECTED);
2093                         goto unlock;
2094                 }
2095
2096                 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2097         }
2098
2099         err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2100         if (err < 0)
2101                 goto unlock;
2102
2103         if (changed)
2104                 err = new_settings(hdev, sk);
2105
2106 unlock:
2107         hci_dev_unlock(hdev);
2108         return err;
2109 }
2110
2111 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2112 {
2113         struct cmd_lookup match = { NULL, hdev };
2114         u8 status = mgmt_status(err);
2115
2116         bt_dev_dbg(hdev, "err %d", err);
2117
2118         if (status) {
2119                 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2120                                                         &status);
2121                 return;
2122         }
2123
2124         mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2125
2126         new_settings(hdev, match.sk);
2127
2128         if (match.sk)
2129                 sock_put(match.sk);
2130 }
2131
2132 static int set_le_sync(struct hci_dev *hdev, void *data)
2133 {
2134         struct mgmt_pending_cmd *cmd = data;
2135         struct mgmt_mode *cp = cmd->param;
2136         u8 val = !!cp->val;
2137         int err;
2138
2139         if (!val) {
2140                 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2141
2142                 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2143                         hci_disable_advertising_sync(hdev);
2144
2145                 if (ext_adv_capable(hdev))
2146                         hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2147         } else {
2148                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2149         }
2150
2151         err = hci_write_le_host_supported_sync(hdev, val, 0);
2152
2153         /* Make sure the controller has a good default for
2154          * advertising data. Restrict the update to when LE
2155          * has actually been enabled. During power on, the
2156          * update in powered_update_hci will take care of it.
2157          */
2158         if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2159                 if (ext_adv_capable(hdev)) {
2160                         int status;
2161
2162                         status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2163                         if (!status)
2164                                 hci_update_scan_rsp_data_sync(hdev, 0x00);
2165                 } else {
2166                         hci_update_adv_data_sync(hdev, 0x00);
2167                         hci_update_scan_rsp_data_sync(hdev, 0x00);
2168                 }
2169
2170                 hci_update_passive_scan(hdev);
2171         }
2172
2173         return err;
2174 }
2175
2176 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2177 {
2178         struct mgmt_pending_cmd *cmd = data;
2179         u8 status = mgmt_status(err);
2180         struct sock *sk = cmd->sk;
2181
2182         if (status) {
2183                 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2184                                      cmd_status_rsp, &status);
2185                 return;
2186         }
2187
2188         mgmt_pending_remove(cmd);
2189         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2190 }
2191
2192 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2193 {
2194         struct mgmt_pending_cmd *cmd = data;
2195         struct mgmt_cp_set_mesh *cp = cmd->param;
2196         size_t len = cmd->param_len;
2197
2198         memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2199
2200         if (cp->enable)
2201                 hci_dev_set_flag(hdev, HCI_MESH);
2202         else
2203                 hci_dev_clear_flag(hdev, HCI_MESH);
2204
2205         len -= sizeof(*cp);
2206
2207         /* If filters don't fit, forward all adv pkts */
2208         if (len <= sizeof(hdev->mesh_ad_types))
2209                 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2210
2211         hci_update_passive_scan_sync(hdev);
2212         return 0;
2213 }
2214
2215 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2216 {
2217         struct mgmt_cp_set_mesh *cp = data;
2218         struct mgmt_pending_cmd *cmd;
2219         int err = 0;
2220
2221         bt_dev_dbg(hdev, "sock %p", sk);
2222
2223         if (!lmp_le_capable(hdev) ||
2224             !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2225                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2226                                        MGMT_STATUS_NOT_SUPPORTED);
2227
2228         if (cp->enable != 0x00 && cp->enable != 0x01)
2229                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2230                                        MGMT_STATUS_INVALID_PARAMS);
2231
2232         hci_dev_lock(hdev);
2233
2234         cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2235         if (!cmd)
2236                 err = -ENOMEM;
2237         else
2238                 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2239                                          set_mesh_complete);
2240
2241         if (err < 0) {
2242                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2243                                       MGMT_STATUS_FAILED);
2244
2245                 if (cmd)
2246                         mgmt_pending_remove(cmd);
2247         }
2248
2249         hci_dev_unlock(hdev);
2250         return err;
2251 }
2252
2253 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2254 {
2255         struct mgmt_mesh_tx *mesh_tx = data;
2256         struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2257         unsigned long mesh_send_interval;
2258         u8 mgmt_err = mgmt_status(err);
2259
2260         /* Report any errors here, but don't report completion */
2261
2262         if (mgmt_err) {
2263                 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2264                 /* Send Complete Error Code for handle */
2265                 mesh_send_complete(hdev, mesh_tx, false);
2266                 return;
2267         }
2268
2269         mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2270         queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2271                            mesh_send_interval);
2272 }
2273
2274 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2275 {
2276         struct mgmt_mesh_tx *mesh_tx = data;
2277         struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2278         struct adv_info *adv, *next_instance;
2279         u8 instance = hdev->le_num_of_adv_sets + 1;
2280         u16 timeout, duration;
2281         int err = 0;
2282
2283         if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2284                 return MGMT_STATUS_BUSY;
2285
2286         timeout = 1000;
2287         duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2288         adv = hci_add_adv_instance(hdev, instance, 0,
2289                                    send->adv_data_len, send->adv_data,
2290                                    0, NULL,
2291                                    timeout, duration,
2292                                    HCI_ADV_TX_POWER_NO_PREFERENCE,
2293                                    hdev->le_adv_min_interval,
2294                                    hdev->le_adv_max_interval,
2295                                    mesh_tx->handle);
2296
2297         if (!IS_ERR(adv))
2298                 mesh_tx->instance = instance;
2299         else
2300                 err = PTR_ERR(adv);
2301
2302         if (hdev->cur_adv_instance == instance) {
2303                 /* If the currently advertised instance is being changed then
2304                  * cancel the current advertising and schedule the next
2305                  * instance. If there is only one instance then the overridden
2306                  * advertising data will be visible right away.
2307                  */
2308                 cancel_adv_timeout(hdev);
2309
2310                 next_instance = hci_get_next_instance(hdev, instance);
2311                 if (next_instance)
2312                         instance = next_instance->instance;
2313                 else
2314                         instance = 0;
2315         } else if (hdev->adv_instance_timeout) {
2316                 /* Immediately advertise the new instance if no other, or
2317                  * let it go naturally from queue if ADV is already happening
2318                  */
2319                 instance = 0;
2320         }
2321
2322         if (instance)
2323                 return hci_schedule_adv_instance_sync(hdev, instance, true);
2324
2325         return err;
2326 }
2327
2328 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2329 {
2330         struct mgmt_rp_mesh_read_features *rp = data;
2331
2332         if (rp->used_handles >= rp->max_handles)
2333                 return;
2334
2335         rp->handles[rp->used_handles++] = mesh_tx->handle;
2336 }
2337
2338 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2339                          void *data, u16 len)
2340 {
2341         struct mgmt_rp_mesh_read_features rp;
2342
2343         if (!lmp_le_capable(hdev) ||
2344             !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2345                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2346                                        MGMT_STATUS_NOT_SUPPORTED);
2347
2348         memset(&rp, 0, sizeof(rp));
2349         rp.index = cpu_to_le16(hdev->id);
2350         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2351                 rp.max_handles = MESH_HANDLES_MAX;
2352
2353         hci_dev_lock(hdev);
2354
2355         if (rp.max_handles)
2356                 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2357
2358         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2359                           rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2360
2361         hci_dev_unlock(hdev);
2362         return 0;
2363 }
2364
2365 static int send_cancel(struct hci_dev *hdev, void *data)
2366 {
2367         struct mgmt_pending_cmd *cmd = data;
2368         struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2369         struct mgmt_mesh_tx *mesh_tx;
2370
2371         if (!cancel->handle) {
2372                 do {
2373                         mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2374
2375                         if (mesh_tx)
2376                                 mesh_send_complete(hdev, mesh_tx, false);
2377                 } while (mesh_tx);
2378         } else {
2379                 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2380
2381                 if (mesh_tx && mesh_tx->sk == cmd->sk)
2382                         mesh_send_complete(hdev, mesh_tx, false);
2383         }
2384
2385         mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2386                           0, NULL, 0);
2387         mgmt_pending_free(cmd);
2388
2389         return 0;
2390 }
2391
2392 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2393                             void *data, u16 len)
2394 {
2395         struct mgmt_pending_cmd *cmd;
2396         int err;
2397
2398         if (!lmp_le_capable(hdev) ||
2399             !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2400                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2401                                        MGMT_STATUS_NOT_SUPPORTED);
2402
2403         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2404                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2405                                        MGMT_STATUS_REJECTED);
2406
2407         hci_dev_lock(hdev);
2408         cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2409         if (!cmd)
2410                 err = -ENOMEM;
2411         else
2412                 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2413
2414         if (err < 0) {
2415                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2416                                       MGMT_STATUS_FAILED);
2417
2418                 if (cmd)
2419                         mgmt_pending_free(cmd);
2420         }
2421
2422         hci_dev_unlock(hdev);
2423         return err;
2424 }
2425
2426 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2427 {
2428         struct mgmt_mesh_tx *mesh_tx;
2429         struct mgmt_cp_mesh_send *send = data;
2430         struct mgmt_rp_mesh_read_features rp;
2431         bool sending;
2432         int err = 0;
2433
2434         if (!lmp_le_capable(hdev) ||
2435             !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2436                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2437                                        MGMT_STATUS_NOT_SUPPORTED);
2438         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2439             len <= MGMT_MESH_SEND_SIZE ||
2440             len > (MGMT_MESH_SEND_SIZE + 31))
2441                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2442                                        MGMT_STATUS_REJECTED);
2443
2444         hci_dev_lock(hdev);
2445
2446         memset(&rp, 0, sizeof(rp));
2447         rp.max_handles = MESH_HANDLES_MAX;
2448
2449         mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2450
2451         if (rp.max_handles <= rp.used_handles) {
2452                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2453                                       MGMT_STATUS_BUSY);
2454                 goto done;
2455         }
2456
2457         sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2458         mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2459
2460         if (!mesh_tx)
2461                 err = -ENOMEM;
2462         else if (!sending)
2463                 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2464                                          mesh_send_start_complete);
2465
2466         if (err < 0) {
2467                 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2468                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2469                                       MGMT_STATUS_FAILED);
2470
2471                 if (mesh_tx) {
2472                         if (sending)
2473                                 mgmt_mesh_remove(mesh_tx);
2474                 }
2475         } else {
2476                 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2477
2478                 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2479                                   &mesh_tx->handle, 1);
2480         }
2481
2482 done:
2483         hci_dev_unlock(hdev);
2484         return err;
2485 }
2486
2487 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2488 {
2489         struct mgmt_mode *cp = data;
2490         struct mgmt_pending_cmd *cmd;
2491         int err;
2492         u8 val, enabled;
2493
2494         bt_dev_dbg(hdev, "sock %p", sk);
2495
2496         if (!lmp_le_capable(hdev))
2497                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2498                                        MGMT_STATUS_NOT_SUPPORTED);
2499
2500         if (cp->val != 0x00 && cp->val != 0x01)
2501                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2502                                        MGMT_STATUS_INVALID_PARAMS);
2503
2504         /* Bluetooth single mode LE only controllers or dual-mode
2505          * controllers configured as LE only devices, do not allow
2506          * switching LE off. These have either LE enabled explicitly
2507          * or BR/EDR has been previously switched off.
2508          *
2509          * When trying to enable an already enabled LE, then gracefully
2510          * send a positive response. Trying to disable it however will
2511          * result into rejection.
2512          */
2513         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2514                 if (cp->val == 0x01)
2515                         return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2516
2517                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2518                                        MGMT_STATUS_REJECTED);
2519         }
2520
2521         hci_dev_lock(hdev);
2522
2523         val = !!cp->val;
2524         enabled = lmp_host_le_capable(hdev);
2525
2526         if (!hdev_is_powered(hdev) || val == enabled) {
2527                 bool changed = false;
2528
2529                 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2530                         hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2531                         changed = true;
2532                 }
2533
2534                 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2535                         hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2536                         changed = true;
2537                 }
2538
2539                 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2540                 if (err < 0)
2541                         goto unlock;
2542
2543                 if (changed)
2544                         err = new_settings(hdev, sk);
2545
2546                 goto unlock;
2547         }
2548
2549         if (pending_find(MGMT_OP_SET_LE, hdev) ||
2550             pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2551                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2552                                       MGMT_STATUS_BUSY);
2553                 goto unlock;
2554         }
2555
2556         cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2557         if (!cmd)
2558                 err = -ENOMEM;
2559         else
2560                 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2561                                          set_le_complete);
2562
2563         if (err < 0) {
2564                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2565                                       MGMT_STATUS_FAILED);
2566
2567                 if (cmd)
2568                         mgmt_pending_remove(cmd);
2569         }
2570
2571 unlock:
2572         hci_dev_unlock(hdev);
2573         return err;
2574 }
2575
2576 /* This is a helper function to test for pending mgmt commands that can
2577  * cause CoD or EIR HCI commands. We can only allow one such pending
2578  * mgmt command at a time since otherwise we cannot easily track what
2579  * the current values are, will be, and based on that calculate if a new
2580  * HCI command needs to be sent and if yes with what value.
2581  */
2582 static bool pending_eir_or_class(struct hci_dev *hdev)
2583 {
2584         struct mgmt_pending_cmd *cmd;
2585
2586         list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2587                 switch (cmd->opcode) {
2588                 case MGMT_OP_ADD_UUID:
2589                 case MGMT_OP_REMOVE_UUID:
2590                 case MGMT_OP_SET_DEV_CLASS:
2591                 case MGMT_OP_SET_POWERED:
2592                         return true;
2593                 }
2594         }
2595
2596         return false;
2597 }
2598
2599 static const u8 bluetooth_base_uuid[] = {
2600                         0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2601                         0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2602 };
2603
2604 static u8 get_uuid_size(const u8 *uuid)
2605 {
2606         u32 val;
2607
2608         if (memcmp(uuid, bluetooth_base_uuid, 12))
2609                 return 128;
2610
2611         val = get_unaligned_le32(&uuid[12]);
2612         if (val > 0xffff)
2613                 return 32;
2614
2615         return 16;
2616 }
2617
2618 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2619 {
2620         struct mgmt_pending_cmd *cmd = data;
2621
2622         bt_dev_dbg(hdev, "err %d", err);
2623
2624         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2625                           mgmt_status(err), hdev->dev_class, 3);
2626
2627         mgmt_pending_free(cmd);
2628 }
2629
2630 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2631 {
2632         int err;
2633
2634         err = hci_update_class_sync(hdev);
2635         if (err)
2636                 return err;
2637
2638         return hci_update_eir_sync(hdev);
2639 }
2640
2641 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2642 {
2643         struct mgmt_cp_add_uuid *cp = data;
2644         struct mgmt_pending_cmd *cmd;
2645         struct bt_uuid *uuid;
2646         int err;
2647
2648         bt_dev_dbg(hdev, "sock %p", sk);
2649
2650         hci_dev_lock(hdev);
2651
2652         if (pending_eir_or_class(hdev)) {
2653                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2654                                       MGMT_STATUS_BUSY);
2655                 goto failed;
2656         }
2657
2658         uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2659         if (!uuid) {
2660                 err = -ENOMEM;
2661                 goto failed;
2662         }
2663
2664         memcpy(uuid->uuid, cp->uuid, 16);
2665         uuid->svc_hint = cp->svc_hint;
2666         uuid->size = get_uuid_size(cp->uuid);
2667
2668         list_add_tail(&uuid->list, &hdev->uuids);
2669
2670         cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2671         if (!cmd) {
2672                 err = -ENOMEM;
2673                 goto failed;
2674         }
2675
2676         err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2677         if (err < 0) {
2678                 mgmt_pending_free(cmd);
2679                 goto failed;
2680         }
2681
2682 failed:
2683         hci_dev_unlock(hdev);
2684         return err;
2685 }
2686
2687 static bool enable_service_cache(struct hci_dev *hdev)
2688 {
2689         if (!hdev_is_powered(hdev))
2690                 return false;
2691
2692         if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2693                 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2694                                    CACHE_TIMEOUT);
2695                 return true;
2696         }
2697
2698         return false;
2699 }
2700
2701 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2702 {
2703         int err;
2704
2705         err = hci_update_class_sync(hdev);
2706         if (err)
2707                 return err;
2708
2709         return hci_update_eir_sync(hdev);
2710 }
2711
2712 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2713                        u16 len)
2714 {
2715         struct mgmt_cp_remove_uuid *cp = data;
2716         struct mgmt_pending_cmd *cmd;
2717         struct bt_uuid *match, *tmp;
2718         static const u8 bt_uuid_any[] = {
2719                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2720         };
2721         int err, found;
2722
2723         bt_dev_dbg(hdev, "sock %p", sk);
2724
2725         hci_dev_lock(hdev);
2726
2727         if (pending_eir_or_class(hdev)) {
2728                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2729                                       MGMT_STATUS_BUSY);
2730                 goto unlock;
2731         }
2732
2733         if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2734                 hci_uuids_clear(hdev);
2735
2736                 if (enable_service_cache(hdev)) {
2737                         err = mgmt_cmd_complete(sk, hdev->id,
2738                                                 MGMT_OP_REMOVE_UUID,
2739                                                 0, hdev->dev_class, 3);
2740                         goto unlock;
2741                 }
2742
2743                 goto update_class;
2744         }
2745
2746         found = 0;
2747
2748         list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2749                 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2750                         continue;
2751
2752                 list_del(&match->list);
2753                 kfree(match);
2754                 found++;
2755         }
2756
2757         if (found == 0) {
2758                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2759                                       MGMT_STATUS_INVALID_PARAMS);
2760                 goto unlock;
2761         }
2762
2763 update_class:
2764         cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2765         if (!cmd) {
2766                 err = -ENOMEM;
2767                 goto unlock;
2768         }
2769
2770         err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2771                                  mgmt_class_complete);
2772         if (err < 0)
2773                 mgmt_pending_free(cmd);
2774
2775 unlock:
2776         hci_dev_unlock(hdev);
2777         return err;
2778 }
2779
2780 static int set_class_sync(struct hci_dev *hdev, void *data)
2781 {
2782         int err = 0;
2783
2784         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2785                 cancel_delayed_work_sync(&hdev->service_cache);
2786                 err = hci_update_eir_sync(hdev);
2787         }
2788
2789         if (err)
2790                 return err;
2791
2792         return hci_update_class_sync(hdev);
2793 }
2794
2795 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2796                          u16 len)
2797 {
2798         struct mgmt_cp_set_dev_class *cp = data;
2799         struct mgmt_pending_cmd *cmd;
2800         int err;
2801
2802         bt_dev_dbg(hdev, "sock %p", sk);
2803
2804         if (!lmp_bredr_capable(hdev))
2805                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2806                                        MGMT_STATUS_NOT_SUPPORTED);
2807
2808         hci_dev_lock(hdev);
2809
2810         if (pending_eir_or_class(hdev)) {
2811                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2812                                       MGMT_STATUS_BUSY);
2813                 goto unlock;
2814         }
2815
2816         if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2817                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2818                                       MGMT_STATUS_INVALID_PARAMS);
2819                 goto unlock;
2820         }
2821
2822         hdev->major_class = cp->major;
2823         hdev->minor_class = cp->minor;
2824
2825         if (!hdev_is_powered(hdev)) {
2826                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2827                                         hdev->dev_class, 3);
2828                 goto unlock;
2829         }
2830
2831         cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2832         if (!cmd) {
2833                 err = -ENOMEM;
2834                 goto unlock;
2835         }
2836
2837         err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2838                                  mgmt_class_complete);
2839         if (err < 0)
2840                 mgmt_pending_free(cmd);
2841
2842 unlock:
2843         hci_dev_unlock(hdev);
2844         return err;
2845 }
2846
2847 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2848                           u16 len)
2849 {
2850         struct mgmt_cp_load_link_keys *cp = data;
2851         const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2852                                    sizeof(struct mgmt_link_key_info));
2853         u16 key_count, expected_len;
2854         bool changed;
2855         int i;
2856
2857         bt_dev_dbg(hdev, "sock %p", sk);
2858
2859         if (!lmp_bredr_capable(hdev))
2860                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2861                                        MGMT_STATUS_NOT_SUPPORTED);
2862
2863         key_count = __le16_to_cpu(cp->key_count);
2864         if (key_count > max_key_count) {
2865                 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2866                            key_count);
2867                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2868                                        MGMT_STATUS_INVALID_PARAMS);
2869         }
2870
2871         expected_len = struct_size(cp, keys, key_count);
2872         if (expected_len != len) {
2873                 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2874                            expected_len, len);
2875                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2876                                        MGMT_STATUS_INVALID_PARAMS);
2877         }
2878
2879         if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2880                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2881                                        MGMT_STATUS_INVALID_PARAMS);
2882
2883         bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2884                    key_count);
2885
2886         for (i = 0; i < key_count; i++) {
2887                 struct mgmt_link_key_info *key = &cp->keys[i];
2888
2889                 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2890                         return mgmt_cmd_status(sk, hdev->id,
2891                                                MGMT_OP_LOAD_LINK_KEYS,
2892                                                MGMT_STATUS_INVALID_PARAMS);
2893         }
2894
2895         hci_dev_lock(hdev);
2896
2897         hci_link_keys_clear(hdev);
2898
2899         if (cp->debug_keys)
2900                 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2901         else
2902                 changed = hci_dev_test_and_clear_flag(hdev,
2903                                                       HCI_KEEP_DEBUG_KEYS);
2904
2905         if (changed)
2906                 new_settings(hdev, NULL);
2907
2908         for (i = 0; i < key_count; i++) {
2909                 struct mgmt_link_key_info *key = &cp->keys[i];
2910
2911                 if (hci_is_blocked_key(hdev,
2912                                        HCI_BLOCKED_KEY_TYPE_LINKKEY,
2913                                        key->val)) {
2914                         bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2915                                     &key->addr.bdaddr);
2916                         continue;
2917                 }
2918
2919                 /* Always ignore debug keys and require a new pairing if
2920                  * the user wants to use them.
2921                  */
2922                 if (key->type == HCI_LK_DEBUG_COMBINATION)
2923                         continue;
2924
2925                 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2926                                  key->type, key->pin_len, NULL);
2927         }
2928
2929         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2930
2931         hci_dev_unlock(hdev);
2932
2933         return 0;
2934 }
2935
2936 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2937                            u8 addr_type, struct sock *skip_sk)
2938 {
2939         struct mgmt_ev_device_unpaired ev;
2940
2941         bacpy(&ev.addr.bdaddr, bdaddr);
2942         ev.addr.type = addr_type;
2943
2944         return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2945                           skip_sk);
2946 }
2947
2948 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2949 {
2950         struct mgmt_pending_cmd *cmd = data;
2951         struct mgmt_cp_unpair_device *cp = cmd->param;
2952
2953         if (!err)
2954                 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2955
2956         cmd->cmd_complete(cmd, err);
2957         mgmt_pending_free(cmd);
2958 }
2959
2960 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2961 {
2962         struct mgmt_pending_cmd *cmd = data;
2963         struct mgmt_cp_unpair_device *cp = cmd->param;
2964         struct hci_conn *conn;
2965
2966         if (cp->addr.type == BDADDR_BREDR)
2967                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2968                                                &cp->addr.bdaddr);
2969         else
2970                 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2971                                                le_addr_type(cp->addr.type));
2972
2973         if (!conn)
2974                 return 0;
2975
2976         return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2977 }
2978
2979 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2980                          u16 len)
2981 {
2982         struct mgmt_cp_unpair_device *cp = data;
2983         struct mgmt_rp_unpair_device rp;
2984         struct hci_conn_params *params;
2985         struct mgmt_pending_cmd *cmd;
2986         struct hci_conn *conn;
2987         u8 addr_type;
2988         int err;
2989
2990         memset(&rp, 0, sizeof(rp));
2991         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2992         rp.addr.type = cp->addr.type;
2993
2994         if (!bdaddr_type_is_valid(cp->addr.type))
2995                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2996                                          MGMT_STATUS_INVALID_PARAMS,
2997                                          &rp, sizeof(rp));
2998
2999         if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3000                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3001                                          MGMT_STATUS_INVALID_PARAMS,
3002                                          &rp, sizeof(rp));
3003
3004         hci_dev_lock(hdev);
3005
3006         if (!hdev_is_powered(hdev)) {
3007                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3008                                         MGMT_STATUS_NOT_POWERED, &rp,
3009                                         sizeof(rp));
3010                 goto unlock;
3011         }
3012
3013         if (cp->addr.type == BDADDR_BREDR) {
3014                 /* If disconnection is requested, then look up the
3015                  * connection. If the remote device is connected, it
3016                  * will be later used to terminate the link.
3017                  *
3018                  * Setting it to NULL explicitly will cause no
3019                  * termination of the link.
3020                  */
3021                 if (cp->disconnect)
3022                         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3023                                                        &cp->addr.bdaddr);
3024                 else
3025                         conn = NULL;
3026
3027                 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3028                 if (err < 0) {
3029                         err = mgmt_cmd_complete(sk, hdev->id,
3030                                                 MGMT_OP_UNPAIR_DEVICE,
3031                                                 MGMT_STATUS_NOT_PAIRED, &rp,
3032                                                 sizeof(rp));
3033                         goto unlock;
3034                 }
3035
3036                 goto done;
3037         }
3038
3039         /* LE address type */
3040         addr_type = le_addr_type(cp->addr.type);
3041
3042         /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3043         err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3044         if (err < 0) {
3045                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3046                                         MGMT_STATUS_NOT_PAIRED, &rp,
3047                                         sizeof(rp));
3048                 goto unlock;
3049         }
3050
3051         conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3052         if (!conn) {
3053                 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3054                 goto done;
3055         }
3056
3057
3058         /* Defer clearing up the connection parameters until closing to
3059          * give a chance of keeping them if a repairing happens.
3060          */
3061         set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3062
3063         /* Disable auto-connection parameters if present */
3064         params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3065         if (params) {
3066                 if (params->explicit_connect)
3067                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3068                 else
3069                         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3070         }
3071
3072         /* If disconnection is not requested, then clear the connection
3073          * variable so that the link is not terminated.
3074          */
3075         if (!cp->disconnect)
3076                 conn = NULL;
3077
3078 done:
3079         /* If the connection variable is set, then termination of the
3080          * link is requested.
3081          */
3082         if (!conn) {
3083                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3084                                         &rp, sizeof(rp));
3085                 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3086                 goto unlock;
3087         }
3088
3089         cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3090                                sizeof(*cp));
3091         if (!cmd) {
3092                 err = -ENOMEM;
3093                 goto unlock;
3094         }
3095
3096         cmd->cmd_complete = addr_cmd_complete;
3097
3098         err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3099                                  unpair_device_complete);
3100         if (err < 0)
3101                 mgmt_pending_free(cmd);
3102
3103 unlock:
3104         hci_dev_unlock(hdev);
3105         return err;
3106 }
3107
3108 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3109                       u16 len)
3110 {
3111         struct mgmt_cp_disconnect *cp = data;
3112         struct mgmt_rp_disconnect rp;
3113         struct mgmt_pending_cmd *cmd;
3114         struct hci_conn *conn;
3115         int err;
3116
3117         bt_dev_dbg(hdev, "sock %p", sk);
3118
3119         memset(&rp, 0, sizeof(rp));
3120         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3121         rp.addr.type = cp->addr.type;
3122
3123         if (!bdaddr_type_is_valid(cp->addr.type))
3124                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3125                                          MGMT_STATUS_INVALID_PARAMS,
3126                                          &rp, sizeof(rp));
3127
3128         hci_dev_lock(hdev);
3129
3130         if (!test_bit(HCI_UP, &hdev->flags)) {
3131                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3132                                         MGMT_STATUS_NOT_POWERED, &rp,
3133                                         sizeof(rp));
3134                 goto failed;
3135         }
3136
3137         if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3138                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3139                                         MGMT_STATUS_BUSY, &rp, sizeof(rp));
3140                 goto failed;
3141         }
3142
3143         if (cp->addr.type == BDADDR_BREDR)
3144                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3145                                                &cp->addr.bdaddr);
3146         else
3147                 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3148                                                le_addr_type(cp->addr.type));
3149
3150         if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3151                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3152                                         MGMT_STATUS_NOT_CONNECTED, &rp,
3153                                         sizeof(rp));
3154                 goto failed;
3155         }
3156
3157         cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3158         if (!cmd) {
3159                 err = -ENOMEM;
3160                 goto failed;
3161         }
3162
3163         cmd->cmd_complete = generic_cmd_complete;
3164
3165         err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3166         if (err < 0)
3167                 mgmt_pending_remove(cmd);
3168
3169 failed:
3170         hci_dev_unlock(hdev);
3171         return err;
3172 }
3173
3174 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3175 {
3176         switch (link_type) {
3177         case LE_LINK:
3178                 switch (addr_type) {
3179                 case ADDR_LE_DEV_PUBLIC:
3180                         return BDADDR_LE_PUBLIC;
3181
3182                 default:
3183                         /* Fallback to LE Random address type */
3184                         return BDADDR_LE_RANDOM;
3185                 }
3186
3187         default:
3188                 /* Fallback to BR/EDR type */
3189                 return BDADDR_BREDR;
3190         }
3191 }
3192
3193 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3194                            u16 data_len)
3195 {
3196         struct mgmt_rp_get_connections *rp;
3197         struct hci_conn *c;
3198         int err;
3199         u16 i;
3200
3201         bt_dev_dbg(hdev, "sock %p", sk);
3202
3203         hci_dev_lock(hdev);
3204
3205         if (!hdev_is_powered(hdev)) {
3206                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3207                                       MGMT_STATUS_NOT_POWERED);
3208                 goto unlock;
3209         }
3210
3211         i = 0;
3212         list_for_each_entry(c, &hdev->conn_hash.list, list) {
3213                 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3214                         i++;
3215         }
3216
3217         rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3218         if (!rp) {
3219                 err = -ENOMEM;
3220                 goto unlock;
3221         }
3222
3223         i = 0;
3224         list_for_each_entry(c, &hdev->conn_hash.list, list) {
3225                 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3226                         continue;
3227                 bacpy(&rp->addr[i].bdaddr, &c->dst);
3228                 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3229                 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3230                         continue;
3231                 i++;
3232         }
3233
3234         rp->conn_count = cpu_to_le16(i);
3235
3236         /* Recalculate length in case of filtered SCO connections, etc */
3237         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3238                                 struct_size(rp, addr, i));
3239
3240         kfree(rp);
3241
3242 unlock:
3243         hci_dev_unlock(hdev);
3244         return err;
3245 }
3246
3247 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3248                                    struct mgmt_cp_pin_code_neg_reply *cp)
3249 {
3250         struct mgmt_pending_cmd *cmd;
3251         int err;
3252
3253         cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3254                                sizeof(*cp));
3255         if (!cmd)
3256                 return -ENOMEM;
3257
3258         cmd->cmd_complete = addr_cmd_complete;
3259
3260         err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3261                            sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3262         if (err < 0)
3263                 mgmt_pending_remove(cmd);
3264
3265         return err;
3266 }
3267
3268 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3269                           u16 len)
3270 {
3271         struct hci_conn *conn;
3272         struct mgmt_cp_pin_code_reply *cp = data;
3273         struct hci_cp_pin_code_reply reply;
3274         struct mgmt_pending_cmd *cmd;
3275         int err;
3276
3277         bt_dev_dbg(hdev, "sock %p", sk);
3278
3279         hci_dev_lock(hdev);
3280
3281         if (!hdev_is_powered(hdev)) {
3282                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3283                                       MGMT_STATUS_NOT_POWERED);
3284                 goto failed;
3285         }
3286
3287         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3288         if (!conn) {
3289                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3290                                       MGMT_STATUS_NOT_CONNECTED);
3291                 goto failed;
3292         }
3293
3294         if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3295                 struct mgmt_cp_pin_code_neg_reply ncp;
3296
3297                 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3298
3299                 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3300
3301                 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3302                 if (err >= 0)
3303                         err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3304                                               MGMT_STATUS_INVALID_PARAMS);
3305
3306                 goto failed;
3307         }
3308
3309         cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3310         if (!cmd) {
3311                 err = -ENOMEM;
3312                 goto failed;
3313         }
3314
3315         cmd->cmd_complete = addr_cmd_complete;
3316
3317         bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3318         reply.pin_len = cp->pin_len;
3319         memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3320
3321         err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3322         if (err < 0)
3323                 mgmt_pending_remove(cmd);
3324
3325 failed:
3326         hci_dev_unlock(hdev);
3327         return err;
3328 }
3329
3330 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3331                              u16 len)
3332 {
3333         struct mgmt_cp_set_io_capability *cp = data;
3334
3335         bt_dev_dbg(hdev, "sock %p", sk);
3336
3337         if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3338                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3339                                        MGMT_STATUS_INVALID_PARAMS);
3340
3341         hci_dev_lock(hdev);
3342
3343         hdev->io_capability = cp->io_capability;
3344
3345         bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3346
3347         hci_dev_unlock(hdev);
3348
3349         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3350                                  NULL, 0);
3351 }
3352
3353 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3354 {
3355         struct hci_dev *hdev = conn->hdev;
3356         struct mgmt_pending_cmd *cmd;
3357
3358         list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3359                 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3360                         continue;
3361
3362                 if (cmd->user_data != conn)
3363                         continue;
3364
3365                 return cmd;
3366         }
3367
3368         return NULL;
3369 }
3370
3371 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3372 {
3373         struct mgmt_rp_pair_device rp;
3374         struct hci_conn *conn = cmd->user_data;
3375         int err;
3376
3377         bacpy(&rp.addr.bdaddr, &conn->dst);
3378         rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3379
3380         err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3381                                 status, &rp, sizeof(rp));
3382
3383         /* So we don't get further callbacks for this connection */
3384         conn->connect_cfm_cb = NULL;
3385         conn->security_cfm_cb = NULL;
3386         conn->disconn_cfm_cb = NULL;
3387
3388         hci_conn_drop(conn);
3389
3390         /* The device is paired so there is no need to remove
3391          * its connection parameters anymore.
3392          */
3393         clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3394
3395         hci_conn_put(conn);
3396
3397         return err;
3398 }
3399
3400 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3401 {
3402         u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3403         struct mgmt_pending_cmd *cmd;
3404
3405         cmd = find_pairing(conn);
3406         if (cmd) {
3407                 cmd->cmd_complete(cmd, status);
3408                 mgmt_pending_remove(cmd);
3409         }
3410 }
3411
3412 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3413 {
3414         struct mgmt_pending_cmd *cmd;
3415
3416         BT_DBG("status %u", status);
3417
3418         cmd = find_pairing(conn);
3419         if (!cmd) {
3420                 BT_DBG("Unable to find a pending command");
3421                 return;
3422         }
3423
3424         cmd->cmd_complete(cmd, mgmt_status(status));
3425         mgmt_pending_remove(cmd);
3426 }
3427
3428 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3429 {
3430         struct mgmt_pending_cmd *cmd;
3431
3432         BT_DBG("status %u", status);
3433
3434         if (!status)
3435                 return;
3436
3437         cmd = find_pairing(conn);
3438         if (!cmd) {
3439                 BT_DBG("Unable to find a pending command");
3440                 return;
3441         }
3442
3443         cmd->cmd_complete(cmd, mgmt_status(status));
3444         mgmt_pending_remove(cmd);
3445 }
3446
3447 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3448                        u16 len)
3449 {
3450         struct mgmt_cp_pair_device *cp = data;
3451         struct mgmt_rp_pair_device rp;
3452         struct mgmt_pending_cmd *cmd;
3453         u8 sec_level, auth_type;
3454         struct hci_conn *conn;
3455         int err;
3456
3457         bt_dev_dbg(hdev, "sock %p", sk);
3458
3459         memset(&rp, 0, sizeof(rp));
3460         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3461         rp.addr.type = cp->addr.type;
3462
3463         if (!bdaddr_type_is_valid(cp->addr.type))
3464                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3465                                          MGMT_STATUS_INVALID_PARAMS,
3466                                          &rp, sizeof(rp));
3467
3468         if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3469                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3470                                          MGMT_STATUS_INVALID_PARAMS,
3471                                          &rp, sizeof(rp));
3472
3473         hci_dev_lock(hdev);
3474
3475         if (!hdev_is_powered(hdev)) {
3476                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3477                                         MGMT_STATUS_NOT_POWERED, &rp,
3478                                         sizeof(rp));
3479                 goto unlock;
3480         }
3481
3482         if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3483                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3484                                         MGMT_STATUS_ALREADY_PAIRED, &rp,
3485                                         sizeof(rp));
3486                 goto unlock;
3487         }
3488
3489         sec_level = BT_SECURITY_MEDIUM;
3490         auth_type = HCI_AT_DEDICATED_BONDING;
3491
3492         if (cp->addr.type == BDADDR_BREDR) {
3493                 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3494                                        auth_type, CONN_REASON_PAIR_DEVICE);
3495         } else {
3496                 u8 addr_type = le_addr_type(cp->addr.type);
3497                 struct hci_conn_params *p;
3498
3499                 /* When pairing a new device, it is expected to remember
3500                  * this device for future connections. Adding the connection
3501                  * parameter information ahead of time allows tracking
3502                  * of the peripheral preferred values and will speed up any
3503                  * further connection establishment.
3504                  *
3505                  * If connection parameters already exist, then they
3506                  * will be kept and this function does nothing.
3507                  */
3508                 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3509
3510                 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3511                         p->auto_connect = HCI_AUTO_CONN_DISABLED;
3512
3513                 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3514                                            sec_level, HCI_LE_CONN_TIMEOUT,
3515                                            CONN_REASON_PAIR_DEVICE);
3516         }
3517
3518         if (IS_ERR(conn)) {
3519                 int status;
3520
3521                 if (PTR_ERR(conn) == -EBUSY)
3522                         status = MGMT_STATUS_BUSY;
3523                 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3524                         status = MGMT_STATUS_NOT_SUPPORTED;
3525                 else if (PTR_ERR(conn) == -ECONNREFUSED)
3526                         status = MGMT_STATUS_REJECTED;
3527                 else
3528                         status = MGMT_STATUS_CONNECT_FAILED;
3529
3530                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3531                                         status, &rp, sizeof(rp));
3532                 goto unlock;
3533         }
3534
3535         if (conn->connect_cfm_cb) {
3536                 hci_conn_drop(conn);
3537                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3538                                         MGMT_STATUS_BUSY, &rp, sizeof(rp));
3539                 goto unlock;
3540         }
3541
3542         cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3543         if (!cmd) {
3544                 err = -ENOMEM;
3545                 hci_conn_drop(conn);
3546                 goto unlock;
3547         }
3548
3549         cmd->cmd_complete = pairing_complete;
3550
3551         /* For LE, just connecting isn't a proof that the pairing finished */
3552         if (cp->addr.type == BDADDR_BREDR) {
3553                 conn->connect_cfm_cb = pairing_complete_cb;
3554                 conn->security_cfm_cb = pairing_complete_cb;
3555                 conn->disconn_cfm_cb = pairing_complete_cb;
3556         } else {
3557                 conn->connect_cfm_cb = le_pairing_complete_cb;
3558                 conn->security_cfm_cb = le_pairing_complete_cb;
3559                 conn->disconn_cfm_cb = le_pairing_complete_cb;
3560         }
3561
3562         conn->io_capability = cp->io_cap;
3563         cmd->user_data = hci_conn_get(conn);
3564
3565         if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3566             hci_conn_security(conn, sec_level, auth_type, true)) {
3567                 cmd->cmd_complete(cmd, 0);
3568                 mgmt_pending_remove(cmd);
3569         }
3570
3571         err = 0;
3572
3573 unlock:
3574         hci_dev_unlock(hdev);
3575         return err;
3576 }
3577
3578 static int abort_conn_sync(struct hci_dev *hdev, void *data)
3579 {
3580         struct hci_conn *conn;
3581         u16 handle = PTR_ERR(data);
3582
3583         conn = hci_conn_hash_lookup_handle(hdev, handle);
3584         if (!conn)
3585                 return 0;
3586
3587         return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
3588 }
3589
3590 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3591                               u16 len)
3592 {
3593         struct mgmt_addr_info *addr = data;
3594         struct mgmt_pending_cmd *cmd;
3595         struct hci_conn *conn;
3596         int err;
3597
3598         bt_dev_dbg(hdev, "sock %p", sk);
3599
3600         hci_dev_lock(hdev);
3601
3602         if (!hdev_is_powered(hdev)) {
3603                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3604                                       MGMT_STATUS_NOT_POWERED);
3605                 goto unlock;
3606         }
3607
3608         cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3609         if (!cmd) {
3610                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3611                                       MGMT_STATUS_INVALID_PARAMS);
3612                 goto unlock;
3613         }
3614
3615         conn = cmd->user_data;
3616
3617         if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3618                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3619                                       MGMT_STATUS_INVALID_PARAMS);
3620                 goto unlock;
3621         }
3622
3623         cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3624         mgmt_pending_remove(cmd);
3625
3626         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3627                                 addr, sizeof(*addr));
3628
3629         /* Since user doesn't want to proceed with the connection, abort any
3630          * ongoing pairing and then terminate the link if it was created
3631          * because of the pair device action.
3632          */
3633         if (addr->type == BDADDR_BREDR)
3634                 hci_remove_link_key(hdev, &addr->bdaddr);
3635         else
3636                 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3637                                               le_addr_type(addr->type));
3638
3639         if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3640                 hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
3641                                    NULL);
3642
3643 unlock:
3644         hci_dev_unlock(hdev);
3645         return err;
3646 }
3647
3648 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3649                              struct mgmt_addr_info *addr, u16 mgmt_op,
3650                              u16 hci_op, __le32 passkey)
3651 {
3652         struct mgmt_pending_cmd *cmd;
3653         struct hci_conn *conn;
3654         int err;
3655
3656         hci_dev_lock(hdev);
3657
3658         if (!hdev_is_powered(hdev)) {
3659                 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3660                                         MGMT_STATUS_NOT_POWERED, addr,
3661                                         sizeof(*addr));
3662                 goto done;
3663         }
3664
3665         if (addr->type == BDADDR_BREDR)
3666                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3667         else
3668                 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3669                                                le_addr_type(addr->type));
3670
3671         if (!conn) {
3672                 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3673                                         MGMT_STATUS_NOT_CONNECTED, addr,
3674                                         sizeof(*addr));
3675                 goto done;
3676         }
3677
3678         if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3679                 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3680                 if (!err)
3681                         err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3682                                                 MGMT_STATUS_SUCCESS, addr,
3683                                                 sizeof(*addr));
3684                 else
3685                         err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3686                                                 MGMT_STATUS_FAILED, addr,
3687                                                 sizeof(*addr));
3688
3689                 goto done;
3690         }
3691
3692         cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3693         if (!cmd) {
3694                 err = -ENOMEM;
3695                 goto done;
3696         }
3697
3698         cmd->cmd_complete = addr_cmd_complete;
3699
3700         /* Continue with pairing via HCI */
3701         if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3702                 struct hci_cp_user_passkey_reply cp;
3703
3704                 bacpy(&cp.bdaddr, &addr->bdaddr);
3705                 cp.passkey = passkey;
3706                 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3707         } else
3708                 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3709                                    &addr->bdaddr);
3710
3711         if (err < 0)
3712                 mgmt_pending_remove(cmd);
3713
3714 done:
3715         hci_dev_unlock(hdev);
3716         return err;
3717 }
3718
3719 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3720                               void *data, u16 len)
3721 {
3722         struct mgmt_cp_pin_code_neg_reply *cp = data;
3723
3724         bt_dev_dbg(hdev, "sock %p", sk);
3725
3726         return user_pairing_resp(sk, hdev, &cp->addr,
3727                                 MGMT_OP_PIN_CODE_NEG_REPLY,
3728                                 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3729 }
3730
3731 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3732                               u16 len)
3733 {
3734         struct mgmt_cp_user_confirm_reply *cp = data;
3735
3736         bt_dev_dbg(hdev, "sock %p", sk);
3737
3738         if (len != sizeof(*cp))
3739                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3740                                        MGMT_STATUS_INVALID_PARAMS);
3741
3742         return user_pairing_resp(sk, hdev, &cp->addr,
3743                                  MGMT_OP_USER_CONFIRM_REPLY,
3744                                  HCI_OP_USER_CONFIRM_REPLY, 0);
3745 }
3746
3747 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3748                                   void *data, u16 len)
3749 {
3750         struct mgmt_cp_user_confirm_neg_reply *cp = data;
3751
3752         bt_dev_dbg(hdev, "sock %p", sk);
3753
3754         return user_pairing_resp(sk, hdev, &cp->addr,
3755                                  MGMT_OP_USER_CONFIRM_NEG_REPLY,
3756                                  HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3757 }
3758
3759 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3760                               u16 len)
3761 {
3762         struct mgmt_cp_user_passkey_reply *cp = data;
3763
3764         bt_dev_dbg(hdev, "sock %p", sk);
3765
3766         return user_pairing_resp(sk, hdev, &cp->addr,
3767                                  MGMT_OP_USER_PASSKEY_REPLY,
3768                                  HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3769 }
3770
3771 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3772                                   void *data, u16 len)
3773 {
3774         struct mgmt_cp_user_passkey_neg_reply *cp = data;
3775
3776         bt_dev_dbg(hdev, "sock %p", sk);
3777
3778         return user_pairing_resp(sk, hdev, &cp->addr,
3779                                  MGMT_OP_USER_PASSKEY_NEG_REPLY,
3780                                  HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3781 }
3782
3783 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3784 {
3785         struct adv_info *adv_instance;
3786
3787         adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3788         if (!adv_instance)
3789                 return 0;
3790
3791         /* stop if current instance doesn't need to be changed */
3792         if (!(adv_instance->flags & flags))
3793                 return 0;
3794
3795         cancel_adv_timeout(hdev);
3796
3797         adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3798         if (!adv_instance)
3799                 return 0;
3800
3801         hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3802
3803         return 0;
3804 }
3805
3806 static int name_changed_sync(struct hci_dev *hdev, void *data)
3807 {
3808         return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3809 }
3810
3811 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3812 {
3813         struct mgmt_pending_cmd *cmd = data;
3814         struct mgmt_cp_set_local_name *cp = cmd->param;
3815         u8 status = mgmt_status(err);
3816
3817         bt_dev_dbg(hdev, "err %d", err);
3818
3819         if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3820                 return;
3821
3822         if (status) {
3823                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3824                                 status);
3825         } else {
3826                 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3827                                   cp, sizeof(*cp));
3828
3829                 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3830                         hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3831         }
3832
3833         mgmt_pending_remove(cmd);
3834 }
3835
3836 static int set_name_sync(struct hci_dev *hdev, void *data)
3837 {
3838         if (lmp_bredr_capable(hdev)) {
3839                 hci_update_name_sync(hdev);
3840                 hci_update_eir_sync(hdev);
3841         }
3842
3843         /* The name is stored in the scan response data and so
3844          * no need to update the advertising data here.
3845          */
3846         if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3847                 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3848
3849         return 0;
3850 }
3851
3852 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3853                           u16 len)
3854 {
3855         struct mgmt_cp_set_local_name *cp = data;
3856         struct mgmt_pending_cmd *cmd;
3857         int err;
3858
3859         bt_dev_dbg(hdev, "sock %p", sk);
3860
3861         hci_dev_lock(hdev);
3862
3863         /* If the old values are the same as the new ones just return a
3864          * direct command complete event.
3865          */
3866         if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3867             !memcmp(hdev->short_name, cp->short_name,
3868                     sizeof(hdev->short_name))) {
3869                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3870                                         data, len);
3871                 goto failed;
3872         }
3873
3874         memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3875
3876         if (!hdev_is_powered(hdev)) {
3877                 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3878
3879                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3880                                         data, len);
3881                 if (err < 0)
3882                         goto failed;
3883
3884                 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3885                                          len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3886                 ext_info_changed(hdev, sk);
3887
3888                 goto failed;
3889         }
3890
3891         cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3892         if (!cmd)
3893                 err = -ENOMEM;
3894         else
3895                 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3896                                          set_name_complete);
3897
3898         if (err < 0) {
3899                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3900                                       MGMT_STATUS_FAILED);
3901
3902                 if (cmd)
3903                         mgmt_pending_remove(cmd);
3904
3905                 goto failed;
3906         }
3907
3908         memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3909
3910 failed:
3911         hci_dev_unlock(hdev);
3912         return err;
3913 }
3914
3915 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3916 {
3917         return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3918 }
3919
3920 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3921                           u16 len)
3922 {
3923         struct mgmt_cp_set_appearance *cp = data;
3924         u16 appearance;
3925         int err;
3926
3927         bt_dev_dbg(hdev, "sock %p", sk);
3928
3929         if (!lmp_le_capable(hdev))
3930                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3931                                        MGMT_STATUS_NOT_SUPPORTED);
3932
3933         appearance = le16_to_cpu(cp->appearance);
3934
3935         hci_dev_lock(hdev);
3936
3937         if (hdev->appearance != appearance) {
3938                 hdev->appearance = appearance;
3939
3940                 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3941                         hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3942                                            NULL);
3943
3944                 ext_info_changed(hdev, sk);
3945         }
3946
3947         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3948                                 0);
3949
3950         hci_dev_unlock(hdev);
3951
3952         return err;
3953 }
3954
3955 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3956                                  void *data, u16 len)
3957 {
3958         struct mgmt_rp_get_phy_configuration rp;
3959
3960         bt_dev_dbg(hdev, "sock %p", sk);
3961
3962         hci_dev_lock(hdev);
3963
3964         memset(&rp, 0, sizeof(rp));
3965
3966         rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3967         rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3968         rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3969
3970         hci_dev_unlock(hdev);
3971
3972         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3973                                  &rp, sizeof(rp));
3974 }
3975
3976 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3977 {
3978         struct mgmt_ev_phy_configuration_changed ev;
3979
3980         memset(&ev, 0, sizeof(ev));
3981
3982         ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3983
3984         return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3985                           sizeof(ev), skip);
3986 }
3987
3988 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3989 {
3990         struct mgmt_pending_cmd *cmd = data;
3991         struct sk_buff *skb = cmd->skb;
3992         u8 status = mgmt_status(err);
3993
3994         if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3995                 return;
3996
3997         if (!status) {
3998                 if (!skb)
3999                         status = MGMT_STATUS_FAILED;
4000                 else if (IS_ERR(skb))
4001                         status = mgmt_status(PTR_ERR(skb));
4002                 else
4003                         status = mgmt_status(skb->data[0]);
4004         }
4005
4006         bt_dev_dbg(hdev, "status %d", status);
4007
4008         if (status) {
4009                 mgmt_cmd_status(cmd->sk, hdev->id,
4010                                 MGMT_OP_SET_PHY_CONFIGURATION, status);
4011         } else {
4012                 mgmt_cmd_complete(cmd->sk, hdev->id,
4013                                   MGMT_OP_SET_PHY_CONFIGURATION, 0,
4014                                   NULL, 0);
4015
4016                 mgmt_phy_configuration_changed(hdev, cmd->sk);
4017         }
4018
4019         if (skb && !IS_ERR(skb))
4020                 kfree_skb(skb);
4021
4022         mgmt_pending_remove(cmd);
4023 }
4024
4025 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4026 {
4027         struct mgmt_pending_cmd *cmd = data;
4028         struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4029         struct hci_cp_le_set_default_phy cp_phy;
4030         u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4031
4032         memset(&cp_phy, 0, sizeof(cp_phy));
4033
4034         if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4035                 cp_phy.all_phys |= 0x01;
4036
4037         if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4038                 cp_phy.all_phys |= 0x02;
4039
4040         if (selected_phys & MGMT_PHY_LE_1M_TX)
4041                 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4042
4043         if (selected_phys & MGMT_PHY_LE_2M_TX)
4044                 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4045
4046         if (selected_phys & MGMT_PHY_LE_CODED_TX)
4047                 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4048
4049         if (selected_phys & MGMT_PHY_LE_1M_RX)
4050                 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4051
4052         if (selected_phys & MGMT_PHY_LE_2M_RX)
4053                 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4054
4055         if (selected_phys & MGMT_PHY_LE_CODED_RX)
4056                 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4057
4058         cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4059                                    sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4060
4061         return 0;
4062 }
4063
4064 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4065                                  void *data, u16 len)
4066 {
4067         struct mgmt_cp_set_phy_configuration *cp = data;
4068         struct mgmt_pending_cmd *cmd;
4069         u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4070         u16 pkt_type = (HCI_DH1 | HCI_DM1);
4071         bool changed = false;
4072         int err;
4073
4074         bt_dev_dbg(hdev, "sock %p", sk);
4075
4076         configurable_phys = get_configurable_phys(hdev);
4077         supported_phys = get_supported_phys(hdev);
4078         selected_phys = __le32_to_cpu(cp->selected_phys);
4079
4080         if (selected_phys & ~supported_phys)
4081                 return mgmt_cmd_status(sk, hdev->id,
4082                                        MGMT_OP_SET_PHY_CONFIGURATION,
4083                                        MGMT_STATUS_INVALID_PARAMS);
4084
4085         unconfigure_phys = supported_phys & ~configurable_phys;
4086
4087         if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4088                 return mgmt_cmd_status(sk, hdev->id,
4089                                        MGMT_OP_SET_PHY_CONFIGURATION,
4090                                        MGMT_STATUS_INVALID_PARAMS);
4091
4092         if (selected_phys == get_selected_phys(hdev))
4093                 return mgmt_cmd_complete(sk, hdev->id,
4094                                          MGMT_OP_SET_PHY_CONFIGURATION,
4095                                          0, NULL, 0);
4096
4097         hci_dev_lock(hdev);
4098
4099         if (!hdev_is_powered(hdev)) {
4100                 err = mgmt_cmd_status(sk, hdev->id,
4101                                       MGMT_OP_SET_PHY_CONFIGURATION,
4102                                       MGMT_STATUS_REJECTED);
4103                 goto unlock;
4104         }
4105
4106         if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4107                 err = mgmt_cmd_status(sk, hdev->id,
4108                                       MGMT_OP_SET_PHY_CONFIGURATION,
4109                                       MGMT_STATUS_BUSY);
4110                 goto unlock;
4111         }
4112
4113         if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4114                 pkt_type |= (HCI_DH3 | HCI_DM3);
4115         else
4116                 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4117
4118         if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4119                 pkt_type |= (HCI_DH5 | HCI_DM5);
4120         else
4121                 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4122
4123         if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4124                 pkt_type &= ~HCI_2DH1;
4125         else
4126                 pkt_type |= HCI_2DH1;
4127
4128         if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4129                 pkt_type &= ~HCI_2DH3;
4130         else
4131                 pkt_type |= HCI_2DH3;
4132
4133         if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4134                 pkt_type &= ~HCI_2DH5;
4135         else
4136                 pkt_type |= HCI_2DH5;
4137
4138         if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4139                 pkt_type &= ~HCI_3DH1;
4140         else
4141                 pkt_type |= HCI_3DH1;
4142
4143         if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4144                 pkt_type &= ~HCI_3DH3;
4145         else
4146                 pkt_type |= HCI_3DH3;
4147
4148         if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4149                 pkt_type &= ~HCI_3DH5;
4150         else
4151                 pkt_type |= HCI_3DH5;
4152
4153         if (pkt_type != hdev->pkt_type) {
4154                 hdev->pkt_type = pkt_type;
4155                 changed = true;
4156         }
4157
4158         if ((selected_phys & MGMT_PHY_LE_MASK) ==
4159             (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4160                 if (changed)
4161                         mgmt_phy_configuration_changed(hdev, sk);
4162
4163                 err = mgmt_cmd_complete(sk, hdev->id,
4164                                         MGMT_OP_SET_PHY_CONFIGURATION,
4165                                         0, NULL, 0);
4166
4167                 goto unlock;
4168         }
4169
4170         cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4171                                len);
4172         if (!cmd)
4173                 err = -ENOMEM;
4174         else
4175                 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4176                                          set_default_phy_complete);
4177
4178         if (err < 0) {
4179                 err = mgmt_cmd_status(sk, hdev->id,
4180                                       MGMT_OP_SET_PHY_CONFIGURATION,
4181                                       MGMT_STATUS_FAILED);
4182
4183                 if (cmd)
4184                         mgmt_pending_remove(cmd);
4185         }
4186
4187 unlock:
4188         hci_dev_unlock(hdev);
4189
4190         return err;
4191 }
4192
4193 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4194                             u16 len)
4195 {
4196         int err = MGMT_STATUS_SUCCESS;
4197         struct mgmt_cp_set_blocked_keys *keys = data;
4198         const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4199                                    sizeof(struct mgmt_blocked_key_info));
4200         u16 key_count, expected_len;
4201         int i;
4202
4203         bt_dev_dbg(hdev, "sock %p", sk);
4204
4205         key_count = __le16_to_cpu(keys->key_count);
4206         if (key_count > max_key_count) {
4207                 bt_dev_err(hdev, "too big key_count value %u", key_count);
4208                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4209                                        MGMT_STATUS_INVALID_PARAMS);
4210         }
4211
4212         expected_len = struct_size(keys, keys, key_count);
4213         if (expected_len != len) {
4214                 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4215                            expected_len, len);
4216                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4217                                        MGMT_STATUS_INVALID_PARAMS);
4218         }
4219
4220         hci_dev_lock(hdev);
4221
4222         hci_blocked_keys_clear(hdev);
4223
4224         for (i = 0; i < key_count; ++i) {
4225                 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4226
4227                 if (!b) {
4228                         err = MGMT_STATUS_NO_RESOURCES;
4229                         break;
4230                 }
4231
4232                 b->type = keys->keys[i].type;
4233                 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4234                 list_add_rcu(&b->list, &hdev->blocked_keys);
4235         }
4236         hci_dev_unlock(hdev);
4237
4238         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4239                                 err, NULL, 0);
4240 }
4241
4242 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4243                                void *data, u16 len)
4244 {
4245         struct mgmt_mode *cp = data;
4246         int err;
4247         bool changed = false;
4248
4249         bt_dev_dbg(hdev, "sock %p", sk);
4250
4251         if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4252                 return mgmt_cmd_status(sk, hdev->id,
4253                                        MGMT_OP_SET_WIDEBAND_SPEECH,
4254                                        MGMT_STATUS_NOT_SUPPORTED);
4255
4256         if (cp->val != 0x00 && cp->val != 0x01)
4257                 return mgmt_cmd_status(sk, hdev->id,
4258                                        MGMT_OP_SET_WIDEBAND_SPEECH,
4259                                        MGMT_STATUS_INVALID_PARAMS);
4260
4261         hci_dev_lock(hdev);
4262
4263         if (hdev_is_powered(hdev) &&
4264             !!cp->val != hci_dev_test_flag(hdev,
4265                                            HCI_WIDEBAND_SPEECH_ENABLED)) {
4266                 err = mgmt_cmd_status(sk, hdev->id,
4267                                       MGMT_OP_SET_WIDEBAND_SPEECH,
4268                                       MGMT_STATUS_REJECTED);
4269                 goto unlock;
4270         }
4271
4272         if (cp->val)
4273                 changed = !hci_dev_test_and_set_flag(hdev,
4274                                                    HCI_WIDEBAND_SPEECH_ENABLED);
4275         else
4276                 changed = hci_dev_test_and_clear_flag(hdev,
4277                                                    HCI_WIDEBAND_SPEECH_ENABLED);
4278
4279         err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4280         if (err < 0)
4281                 goto unlock;
4282
4283         if (changed)
4284                 err = new_settings(hdev, sk);
4285
4286 unlock:
4287         hci_dev_unlock(hdev);
4288         return err;
4289 }
4290
4291 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4292                                void *data, u16 data_len)
4293 {
4294         char buf[20];
4295         struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4296         u16 cap_len = 0;
4297         u8 flags = 0;
4298         u8 tx_power_range[2];
4299
4300         bt_dev_dbg(hdev, "sock %p", sk);
4301
4302         memset(&buf, 0, sizeof(buf));
4303
4304         hci_dev_lock(hdev);
4305
4306         /* When the Read Simple Pairing Options command is supported, then
4307          * the remote public key validation is supported.
4308          *
4309          * Alternatively, when Microsoft extensions are available, they can
4310          * indicate support for public key validation as well.
4311          */
4312         if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4313                 flags |= 0x01;  /* Remote public key validation (BR/EDR) */
4314
4315         flags |= 0x02;          /* Remote public key validation (LE) */
4316
4317         /* When the Read Encryption Key Size command is supported, then the
4318          * encryption key size is enforced.
4319          */
4320         if (hdev->commands[20] & 0x10)
4321                 flags |= 0x04;  /* Encryption key size enforcement (BR/EDR) */
4322
4323         flags |= 0x08;          /* Encryption key size enforcement (LE) */
4324
4325         cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4326                                   &flags, 1);
4327
4328         /* When the Read Simple Pairing Options command is supported, then
4329          * also max encryption key size information is provided.
4330          */
4331         if (hdev->commands[41] & 0x08)
4332                 cap_len = eir_append_le16(rp->cap, cap_len,
4333                                           MGMT_CAP_MAX_ENC_KEY_SIZE,
4334                                           hdev->max_enc_key_size);
4335
4336         cap_len = eir_append_le16(rp->cap, cap_len,
4337                                   MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4338                                   SMP_MAX_ENC_KEY_SIZE);
4339
4340         /* Append the min/max LE tx power parameters if we were able to fetch
4341          * it from the controller
4342          */
4343         if (hdev->commands[38] & 0x80) {
4344                 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4345                 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4346                 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4347                                           tx_power_range, 2);
4348         }
4349
4350         rp->cap_len = cpu_to_le16(cap_len);
4351
4352         hci_dev_unlock(hdev);
4353
4354         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4355                                  rp, sizeof(*rp) + cap_len);
4356 }
4357
4358 #ifdef CONFIG_BT_FEATURE_DEBUG
4359 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4360 static const u8 debug_uuid[16] = {
4361         0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4362         0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4363 };
4364 #endif
4365
4366 /* 330859bc-7506-492d-9370-9a6f0614037f */
4367 static const u8 quality_report_uuid[16] = {
4368         0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4369         0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4370 };
4371
4372 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4373 static const u8 offload_codecs_uuid[16] = {
4374         0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4375         0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4376 };
4377
4378 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4379 static const u8 le_simultaneous_roles_uuid[16] = {
4380         0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4381         0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4382 };
4383
4384 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4385 static const u8 rpa_resolution_uuid[16] = {
4386         0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4387         0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4388 };
4389
4390 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4391 static const u8 iso_socket_uuid[16] = {
4392         0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4393         0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4394 };
4395
4396 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4397 static const u8 mgmt_mesh_uuid[16] = {
4398         0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4399         0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4400 };
4401
4402 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4403                                   void *data, u16 data_len)
4404 {
4405         struct mgmt_rp_read_exp_features_info *rp;
4406         size_t len;
4407         u16 idx = 0;
4408         u32 flags;
4409         int status;
4410
4411         bt_dev_dbg(hdev, "sock %p", sk);
4412
4413         /* Enough space for 7 features */
4414         len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4415         rp = kzalloc(len, GFP_KERNEL);
4416         if (!rp)
4417                 return -ENOMEM;
4418
4419 #ifdef CONFIG_BT_FEATURE_DEBUG
4420         if (!hdev) {
4421                 flags = bt_dbg_get() ? BIT(0) : 0;
4422
4423                 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4424                 rp->features[idx].flags = cpu_to_le32(flags);
4425                 idx++;
4426         }
4427 #endif
4428
4429         if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4430                 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4431                         flags = BIT(0);
4432                 else
4433                         flags = 0;
4434
4435                 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4436                 rp->features[idx].flags = cpu_to_le32(flags);
4437                 idx++;
4438         }
4439
4440         if (hdev && ll_privacy_capable(hdev)) {
4441                 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4442                         flags = BIT(0) | BIT(1);
4443                 else
4444                         flags = BIT(1);
4445
4446                 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4447                 rp->features[idx].flags = cpu_to_le32(flags);
4448                 idx++;
4449         }
4450
4451         if (hdev && (aosp_has_quality_report(hdev) ||
4452                      hdev->set_quality_report)) {
4453                 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4454                         flags = BIT(0);
4455                 else
4456                         flags = 0;
4457
4458                 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4459                 rp->features[idx].flags = cpu_to_le32(flags);
4460                 idx++;
4461         }
4462
4463         if (hdev && hdev->get_data_path_id) {
4464                 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4465                         flags = BIT(0);
4466                 else
4467                         flags = 0;
4468
4469                 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4470                 rp->features[idx].flags = cpu_to_le32(flags);
4471                 idx++;
4472         }
4473
4474         if (IS_ENABLED(CONFIG_BT_LE)) {
4475                 flags = iso_enabled() ? BIT(0) : 0;
4476                 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4477                 rp->features[idx].flags = cpu_to_le32(flags);
4478                 idx++;
4479         }
4480
4481         if (hdev && lmp_le_capable(hdev)) {
4482                 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4483                         flags = BIT(0);
4484                 else
4485                         flags = 0;
4486
4487                 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4488                 rp->features[idx].flags = cpu_to_le32(flags);
4489                 idx++;
4490         }
4491
4492         rp->feature_count = cpu_to_le16(idx);
4493
4494         /* After reading the experimental features information, enable
4495          * the events to update client on any future change.
4496          */
4497         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4498
4499         status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4500                                    MGMT_OP_READ_EXP_FEATURES_INFO,
4501                                    0, rp, sizeof(*rp) + (20 * idx));
4502
4503         kfree(rp);
4504         return status;
4505 }
4506
4507 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4508                                           struct sock *skip)
4509 {
4510         struct mgmt_ev_exp_feature_changed ev;
4511
4512         memset(&ev, 0, sizeof(ev));
4513         memcpy(ev.uuid, rpa_resolution_uuid, 16);
4514         ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4515
4516         // Do we need to be atomic with the conn_flags?
4517         if (enabled && privacy_mode_capable(hdev))
4518                 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4519         else
4520                 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4521
4522         return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4523                                   &ev, sizeof(ev),
4524                                   HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4525
4526 }
4527
4528 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4529                                bool enabled, struct sock *skip)
4530 {
4531         struct mgmt_ev_exp_feature_changed ev;
4532
4533         memset(&ev, 0, sizeof(ev));
4534         memcpy(ev.uuid, uuid, 16);
4535         ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4536
4537         return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4538                                   &ev, sizeof(ev),
4539                                   HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4540 }
4541
4542 #define EXP_FEAT(_uuid, _set_func)      \
4543 {                                       \
4544         .uuid = _uuid,                  \
4545         .set_func = _set_func,          \
4546 }
4547
4548 /* The zero key uuid is special. Multiple exp features are set through it. */
4549 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4550                              struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4551 {
4552         struct mgmt_rp_set_exp_feature rp;
4553
4554         memset(rp.uuid, 0, 16);
4555         rp.flags = cpu_to_le32(0);
4556
4557 #ifdef CONFIG_BT_FEATURE_DEBUG
4558         if (!hdev) {
4559                 bool changed = bt_dbg_get();
4560
4561                 bt_dbg_set(false);
4562
4563                 if (changed)
4564                         exp_feature_changed(NULL, ZERO_KEY, false, sk);
4565         }
4566 #endif
4567
4568         if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4569                 bool changed;
4570
4571                 changed = hci_dev_test_and_clear_flag(hdev,
4572                                                       HCI_ENABLE_LL_PRIVACY);
4573                 if (changed)
4574                         exp_feature_changed(hdev, rpa_resolution_uuid, false,
4575                                             sk);
4576         }
4577
4578         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4579
4580         return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4581                                  MGMT_OP_SET_EXP_FEATURE, 0,
4582                                  &rp, sizeof(rp));
4583 }
4584
4585 #ifdef CONFIG_BT_FEATURE_DEBUG
4586 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4587                           struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4588 {
4589         struct mgmt_rp_set_exp_feature rp;
4590
4591         bool val, changed;
4592         int err;
4593
4594         /* Command requires to use the non-controller index */
4595         if (hdev)
4596                 return mgmt_cmd_status(sk, hdev->id,
4597                                        MGMT_OP_SET_EXP_FEATURE,
4598                                        MGMT_STATUS_INVALID_INDEX);
4599
4600         /* Parameters are limited to a single octet */
4601         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4602                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4603                                        MGMT_OP_SET_EXP_FEATURE,
4604                                        MGMT_STATUS_INVALID_PARAMS);
4605
4606         /* Only boolean on/off is supported */
4607         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4608                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4609                                        MGMT_OP_SET_EXP_FEATURE,
4610                                        MGMT_STATUS_INVALID_PARAMS);
4611
4612         val = !!cp->param[0];
4613         changed = val ? !bt_dbg_get() : bt_dbg_get();
4614         bt_dbg_set(val);
4615
4616         memcpy(rp.uuid, debug_uuid, 16);
4617         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4618
4619         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4620
4621         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4622                                 MGMT_OP_SET_EXP_FEATURE, 0,
4623                                 &rp, sizeof(rp));
4624
4625         if (changed)
4626                 exp_feature_changed(hdev, debug_uuid, val, sk);
4627
4628         return err;
4629 }
4630 #endif
4631
4632 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4633                               struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4634 {
4635         struct mgmt_rp_set_exp_feature rp;
4636         bool val, changed;
4637         int err;
4638
4639         /* Command requires to use the controller index */
4640         if (!hdev)
4641                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4642                                        MGMT_OP_SET_EXP_FEATURE,
4643                                        MGMT_STATUS_INVALID_INDEX);
4644
4645         /* Parameters are limited to a single octet */
4646         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4647                 return mgmt_cmd_status(sk, hdev->id,
4648                                        MGMT_OP_SET_EXP_FEATURE,
4649                                        MGMT_STATUS_INVALID_PARAMS);
4650
4651         /* Only boolean on/off is supported */
4652         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4653                 return mgmt_cmd_status(sk, hdev->id,
4654                                        MGMT_OP_SET_EXP_FEATURE,
4655                                        MGMT_STATUS_INVALID_PARAMS);
4656
4657         val = !!cp->param[0];
4658
4659         if (val) {
4660                 changed = !hci_dev_test_and_set_flag(hdev,
4661                                                      HCI_MESH_EXPERIMENTAL);
4662         } else {
4663                 hci_dev_clear_flag(hdev, HCI_MESH);
4664                 changed = hci_dev_test_and_clear_flag(hdev,
4665                                                       HCI_MESH_EXPERIMENTAL);
4666         }
4667
4668         memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4669         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4670
4671         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4672
4673         err = mgmt_cmd_complete(sk, hdev->id,
4674                                 MGMT_OP_SET_EXP_FEATURE, 0,
4675                                 &rp, sizeof(rp));
4676
4677         if (changed)
4678                 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4679
4680         return err;
4681 }
4682
4683 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4684                                    struct mgmt_cp_set_exp_feature *cp,
4685                                    u16 data_len)
4686 {
4687         struct mgmt_rp_set_exp_feature rp;
4688         bool val, changed;
4689         int err;
4690         u32 flags;
4691
4692         /* Command requires to use the controller index */
4693         if (!hdev)
4694                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4695                                        MGMT_OP_SET_EXP_FEATURE,
4696                                        MGMT_STATUS_INVALID_INDEX);
4697
4698         /* Changes can only be made when controller is powered down */
4699         if (hdev_is_powered(hdev))
4700                 return mgmt_cmd_status(sk, hdev->id,
4701                                        MGMT_OP_SET_EXP_FEATURE,
4702                                        MGMT_STATUS_REJECTED);
4703
4704         /* Parameters are limited to a single octet */
4705         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4706                 return mgmt_cmd_status(sk, hdev->id,
4707                                        MGMT_OP_SET_EXP_FEATURE,
4708                                        MGMT_STATUS_INVALID_PARAMS);
4709
4710         /* Only boolean on/off is supported */
4711         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4712                 return mgmt_cmd_status(sk, hdev->id,
4713                                        MGMT_OP_SET_EXP_FEATURE,
4714                                        MGMT_STATUS_INVALID_PARAMS);
4715
4716         val = !!cp->param[0];
4717
4718         if (val) {
4719                 changed = !hci_dev_test_and_set_flag(hdev,
4720                                                      HCI_ENABLE_LL_PRIVACY);
4721                 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4722
4723                 /* Enable LL privacy + supported settings changed */
4724                 flags = BIT(0) | BIT(1);
4725         } else {
4726                 changed = hci_dev_test_and_clear_flag(hdev,
4727                                                       HCI_ENABLE_LL_PRIVACY);
4728
4729                 /* Disable LL privacy + supported settings changed */
4730                 flags = BIT(1);
4731         }
4732
4733         memcpy(rp.uuid, rpa_resolution_uuid, 16);
4734         rp.flags = cpu_to_le32(flags);
4735
4736         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4737
4738         err = mgmt_cmd_complete(sk, hdev->id,
4739                                 MGMT_OP_SET_EXP_FEATURE, 0,
4740                                 &rp, sizeof(rp));
4741
4742         if (changed)
4743                 exp_ll_privacy_feature_changed(val, hdev, sk);
4744
4745         return err;
4746 }
4747
4748 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4749                                    struct mgmt_cp_set_exp_feature *cp,
4750                                    u16 data_len)
4751 {
4752         struct mgmt_rp_set_exp_feature rp;
4753         bool val, changed;
4754         int err;
4755
4756         /* Command requires to use a valid controller index */
4757         if (!hdev)
4758                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4759                                        MGMT_OP_SET_EXP_FEATURE,
4760                                        MGMT_STATUS_INVALID_INDEX);
4761
4762         /* Parameters are limited to a single octet */
4763         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4764                 return mgmt_cmd_status(sk, hdev->id,
4765                                        MGMT_OP_SET_EXP_FEATURE,
4766                                        MGMT_STATUS_INVALID_PARAMS);
4767
4768         /* Only boolean on/off is supported */
4769         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4770                 return mgmt_cmd_status(sk, hdev->id,
4771                                        MGMT_OP_SET_EXP_FEATURE,
4772                                        MGMT_STATUS_INVALID_PARAMS);
4773
4774         hci_req_sync_lock(hdev);
4775
4776         val = !!cp->param[0];
4777         changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4778
4779         if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4780                 err = mgmt_cmd_status(sk, hdev->id,
4781                                       MGMT_OP_SET_EXP_FEATURE,
4782                                       MGMT_STATUS_NOT_SUPPORTED);
4783                 goto unlock_quality_report;
4784         }
4785
4786         if (changed) {
4787                 if (hdev->set_quality_report)
4788                         err = hdev->set_quality_report(hdev, val);
4789                 else
4790                         err = aosp_set_quality_report(hdev, val);
4791
4792                 if (err) {
4793                         err = mgmt_cmd_status(sk, hdev->id,
4794                                               MGMT_OP_SET_EXP_FEATURE,
4795                                               MGMT_STATUS_FAILED);
4796                         goto unlock_quality_report;
4797                 }
4798
4799                 if (val)
4800                         hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4801                 else
4802                         hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4803         }
4804
4805         bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4806
4807         memcpy(rp.uuid, quality_report_uuid, 16);
4808         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4809         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4810
4811         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4812                                 &rp, sizeof(rp));
4813
4814         if (changed)
4815                 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4816
4817 unlock_quality_report:
4818         hci_req_sync_unlock(hdev);
4819         return err;
4820 }
4821
4822 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4823                                   struct mgmt_cp_set_exp_feature *cp,
4824                                   u16 data_len)
4825 {
4826         bool val, changed;
4827         int err;
4828         struct mgmt_rp_set_exp_feature rp;
4829
4830         /* Command requires to use a valid controller index */
4831         if (!hdev)
4832                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4833                                        MGMT_OP_SET_EXP_FEATURE,
4834                                        MGMT_STATUS_INVALID_INDEX);
4835
4836         /* Parameters are limited to a single octet */
4837         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4838                 return mgmt_cmd_status(sk, hdev->id,
4839                                        MGMT_OP_SET_EXP_FEATURE,
4840                                        MGMT_STATUS_INVALID_PARAMS);
4841
4842         /* Only boolean on/off is supported */
4843         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4844                 return mgmt_cmd_status(sk, hdev->id,
4845                                        MGMT_OP_SET_EXP_FEATURE,
4846                                        MGMT_STATUS_INVALID_PARAMS);
4847
4848         val = !!cp->param[0];
4849         changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4850
4851         if (!hdev->get_data_path_id) {
4852                 return mgmt_cmd_status(sk, hdev->id,
4853                                        MGMT_OP_SET_EXP_FEATURE,
4854                                        MGMT_STATUS_NOT_SUPPORTED);
4855         }
4856
4857         if (changed) {
4858                 if (val)
4859                         hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4860                 else
4861                         hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4862         }
4863
4864         bt_dev_info(hdev, "offload codecs enable %d changed %d",
4865                     val, changed);
4866
4867         memcpy(rp.uuid, offload_codecs_uuid, 16);
4868         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4869         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4870         err = mgmt_cmd_complete(sk, hdev->id,
4871                                 MGMT_OP_SET_EXP_FEATURE, 0,
4872                                 &rp, sizeof(rp));
4873
4874         if (changed)
4875                 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4876
4877         return err;
4878 }
4879
4880 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4881                                           struct mgmt_cp_set_exp_feature *cp,
4882                                           u16 data_len)
4883 {
4884         bool val, changed;
4885         int err;
4886         struct mgmt_rp_set_exp_feature rp;
4887
4888         /* Command requires to use a valid controller index */
4889         if (!hdev)
4890                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4891                                        MGMT_OP_SET_EXP_FEATURE,
4892                                        MGMT_STATUS_INVALID_INDEX);
4893
4894         /* Parameters are limited to a single octet */
4895         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4896                 return mgmt_cmd_status(sk, hdev->id,
4897                                        MGMT_OP_SET_EXP_FEATURE,
4898                                        MGMT_STATUS_INVALID_PARAMS);
4899
4900         /* Only boolean on/off is supported */
4901         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4902                 return mgmt_cmd_status(sk, hdev->id,
4903                                        MGMT_OP_SET_EXP_FEATURE,
4904                                        MGMT_STATUS_INVALID_PARAMS);
4905
4906         val = !!cp->param[0];
4907         changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4908
4909         if (!hci_dev_le_state_simultaneous(hdev)) {
4910                 return mgmt_cmd_status(sk, hdev->id,
4911                                        MGMT_OP_SET_EXP_FEATURE,
4912                                        MGMT_STATUS_NOT_SUPPORTED);
4913         }
4914
4915         if (changed) {
4916                 if (val)
4917                         hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4918                 else
4919                         hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4920         }
4921
4922         bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4923                     val, changed);
4924
4925         memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4926         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4927         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4928         err = mgmt_cmd_complete(sk, hdev->id,
4929                                 MGMT_OP_SET_EXP_FEATURE, 0,
4930                                 &rp, sizeof(rp));
4931
4932         if (changed)
4933                 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4934
4935         return err;
4936 }
4937
4938 #ifdef CONFIG_BT_LE
4939 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4940                                struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4941 {
4942         struct mgmt_rp_set_exp_feature rp;
4943         bool val, changed = false;
4944         int err;
4945
4946         /* Command requires to use the non-controller index */
4947         if (hdev)
4948                 return mgmt_cmd_status(sk, hdev->id,
4949                                        MGMT_OP_SET_EXP_FEATURE,
4950                                        MGMT_STATUS_INVALID_INDEX);
4951
4952         /* Parameters are limited to a single octet */
4953         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4954                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4955                                        MGMT_OP_SET_EXP_FEATURE,
4956                                        MGMT_STATUS_INVALID_PARAMS);
4957
4958         /* Only boolean on/off is supported */
4959         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4960                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4961                                        MGMT_OP_SET_EXP_FEATURE,
4962                                        MGMT_STATUS_INVALID_PARAMS);
4963
4964         val = cp->param[0] ? true : false;
4965         if (val)
4966                 err = iso_init();
4967         else
4968                 err = iso_exit();
4969
4970         if (!err)
4971                 changed = true;
4972
4973         memcpy(rp.uuid, iso_socket_uuid, 16);
4974         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4975
4976         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4977
4978         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4979                                 MGMT_OP_SET_EXP_FEATURE, 0,
4980                                 &rp, sizeof(rp));
4981
4982         if (changed)
4983                 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4984
4985         return err;
4986 }
4987 #endif
4988
4989 static const struct mgmt_exp_feature {
4990         const u8 *uuid;
4991         int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4992                         struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4993 } exp_features[] = {
4994         EXP_FEAT(ZERO_KEY, set_zero_key_func),
4995 #ifdef CONFIG_BT_FEATURE_DEBUG
4996         EXP_FEAT(debug_uuid, set_debug_func),
4997 #endif
4998         EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4999         EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
5000         EXP_FEAT(quality_report_uuid, set_quality_report_func),
5001         EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5002         EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5003 #ifdef CONFIG_BT_LE
5004         EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5005 #endif
5006
5007         /* end with a null feature */
5008         EXP_FEAT(NULL, NULL)
5009 };
5010
5011 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5012                            void *data, u16 data_len)
5013 {
5014         struct mgmt_cp_set_exp_feature *cp = data;
5015         size_t i = 0;
5016
5017         bt_dev_dbg(hdev, "sock %p", sk);
5018
5019         for (i = 0; exp_features[i].uuid; i++) {
5020                 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5021                         return exp_features[i].set_func(sk, hdev, cp, data_len);
5022         }
5023
5024         return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5025                                MGMT_OP_SET_EXP_FEATURE,
5026                                MGMT_STATUS_NOT_SUPPORTED);
5027 }
5028
5029 static u32 get_params_flags(struct hci_dev *hdev,
5030                             struct hci_conn_params *params)
5031 {
5032         u32 flags = hdev->conn_flags;
5033
5034         /* Devices using RPAs can only be programmed in the acceptlist if
5035          * LL Privacy has been enable otherwise they cannot mark
5036          * HCI_CONN_FLAG_REMOTE_WAKEUP.
5037          */
5038         if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5039             hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
5040                 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5041
5042         return flags;
5043 }
5044
5045 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5046                             u16 data_len)
5047 {
5048         struct mgmt_cp_get_device_flags *cp = data;
5049         struct mgmt_rp_get_device_flags rp;
5050         struct bdaddr_list_with_flags *br_params;
5051         struct hci_conn_params *params;
5052         u32 supported_flags;
5053         u32 current_flags = 0;
5054         u8 status = MGMT_STATUS_INVALID_PARAMS;
5055
5056         bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5057                    &cp->addr.bdaddr, cp->addr.type);
5058
5059         hci_dev_lock(hdev);
5060
5061         supported_flags = hdev->conn_flags;
5062
5063         memset(&rp, 0, sizeof(rp));
5064
5065         if (cp->addr.type == BDADDR_BREDR) {
5066                 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5067                                                               &cp->addr.bdaddr,
5068                                                               cp->addr.type);
5069                 if (!br_params)
5070                         goto done;
5071
5072                 current_flags = br_params->flags;
5073         } else {
5074                 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5075                                                 le_addr_type(cp->addr.type));
5076                 if (!params)
5077                         goto done;
5078
5079                 supported_flags = get_params_flags(hdev, params);
5080                 current_flags = params->flags;
5081         }
5082
5083         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5084         rp.addr.type = cp->addr.type;
5085         rp.supported_flags = cpu_to_le32(supported_flags);
5086         rp.current_flags = cpu_to_le32(current_flags);
5087
5088         status = MGMT_STATUS_SUCCESS;
5089
5090 done:
5091         hci_dev_unlock(hdev);
5092
5093         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5094                                 &rp, sizeof(rp));
5095 }
5096
5097 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5098                                  bdaddr_t *bdaddr, u8 bdaddr_type,
5099                                  u32 supported_flags, u32 current_flags)
5100 {
5101         struct mgmt_ev_device_flags_changed ev;
5102
5103         bacpy(&ev.addr.bdaddr, bdaddr);
5104         ev.addr.type = bdaddr_type;
5105         ev.supported_flags = cpu_to_le32(supported_flags);
5106         ev.current_flags = cpu_to_le32(current_flags);
5107
5108         mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5109 }
5110
5111 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5112                             u16 len)
5113 {
5114         struct mgmt_cp_set_device_flags *cp = data;
5115         struct bdaddr_list_with_flags *br_params;
5116         struct hci_conn_params *params;
5117         u8 status = MGMT_STATUS_INVALID_PARAMS;
5118         u32 supported_flags;
5119         u32 current_flags = __le32_to_cpu(cp->current_flags);
5120
5121         bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5122                    &cp->addr.bdaddr, cp->addr.type, current_flags);
5123
5124         // We should take hci_dev_lock() early, I think.. conn_flags can change
5125         supported_flags = hdev->conn_flags;
5126
5127         if ((supported_flags | current_flags) != supported_flags) {
5128                 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5129                             current_flags, supported_flags);
5130                 goto done;
5131         }
5132
5133         hci_dev_lock(hdev);
5134
5135         if (cp->addr.type == BDADDR_BREDR) {
5136                 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5137                                                               &cp->addr.bdaddr,
5138                                                               cp->addr.type);
5139
5140                 if (br_params) {
5141                         br_params->flags = current_flags;
5142                         status = MGMT_STATUS_SUCCESS;
5143                 } else {
5144                         bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5145                                     &cp->addr.bdaddr, cp->addr.type);
5146                 }
5147
5148                 goto unlock;
5149         }
5150
5151         params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5152                                         le_addr_type(cp->addr.type));
5153         if (!params) {
5154                 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5155                             &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5156                 goto unlock;
5157         }
5158
5159         supported_flags = get_params_flags(hdev, params);
5160
5161         if ((supported_flags | current_flags) != supported_flags) {
5162                 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5163                             current_flags, supported_flags);
5164                 goto unlock;
5165         }
5166
5167         params->flags = current_flags;
5168         status = MGMT_STATUS_SUCCESS;
5169
5170         /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5171          * has been set.
5172          */
5173         if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5174                 hci_update_passive_scan(hdev);
5175
5176 unlock:
5177         hci_dev_unlock(hdev);
5178
5179 done:
5180         if (status == MGMT_STATUS_SUCCESS)
5181                 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5182                                      supported_flags, current_flags);
5183
5184         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5185                                  &cp->addr, sizeof(cp->addr));
5186 }
5187
5188 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5189                                    u16 handle)
5190 {
5191         struct mgmt_ev_adv_monitor_added ev;
5192
5193         ev.monitor_handle = cpu_to_le16(handle);
5194
5195         mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5196 }
5197
5198 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5199 {
5200         struct mgmt_ev_adv_monitor_removed ev;
5201         struct mgmt_pending_cmd *cmd;
5202         struct sock *sk_skip = NULL;
5203         struct mgmt_cp_remove_adv_monitor *cp;
5204
5205         cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5206         if (cmd) {
5207                 cp = cmd->param;
5208
5209                 if (cp->monitor_handle)
5210                         sk_skip = cmd->sk;
5211         }
5212
5213         ev.monitor_handle = cpu_to_le16(handle);
5214
5215         mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5216 }
5217
5218 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5219                                  void *data, u16 len)
5220 {
5221         struct adv_monitor *monitor = NULL;
5222         struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5223         int handle, err;
5224         size_t rp_size = 0;
5225         __u32 supported = 0;
5226         __u32 enabled = 0;
5227         __u16 num_handles = 0;
5228         __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5229
5230         BT_DBG("request for %s", hdev->name);
5231
5232         hci_dev_lock(hdev);
5233
5234         if (msft_monitor_supported(hdev))
5235                 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5236
5237         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5238                 handles[num_handles++] = monitor->handle;
5239
5240         hci_dev_unlock(hdev);
5241
5242         rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5243         rp = kmalloc(rp_size, GFP_KERNEL);
5244         if (!rp)
5245                 return -ENOMEM;
5246
5247         /* All supported features are currently enabled */
5248         enabled = supported;
5249
5250         rp->supported_features = cpu_to_le32(supported);
5251         rp->enabled_features = cpu_to_le32(enabled);
5252         rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5253         rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5254         rp->num_handles = cpu_to_le16(num_handles);
5255         if (num_handles)
5256                 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5257
5258         err = mgmt_cmd_complete(sk, hdev->id,
5259                                 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5260                                 MGMT_STATUS_SUCCESS, rp, rp_size);
5261
5262         kfree(rp);
5263
5264         return err;
5265 }
5266
5267 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5268                                                    void *data, int status)
5269 {
5270         struct mgmt_rp_add_adv_patterns_monitor rp;
5271         struct mgmt_pending_cmd *cmd = data;
5272         struct adv_monitor *monitor = cmd->user_data;
5273
5274         hci_dev_lock(hdev);
5275
5276         rp.monitor_handle = cpu_to_le16(monitor->handle);
5277
5278         if (!status) {
5279                 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5280                 hdev->adv_monitors_cnt++;
5281                 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5282                         monitor->state = ADV_MONITOR_STATE_REGISTERED;
5283                 hci_update_passive_scan(hdev);
5284         }
5285
5286         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5287                           mgmt_status(status), &rp, sizeof(rp));
5288         mgmt_pending_remove(cmd);
5289
5290         hci_dev_unlock(hdev);
5291         bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5292                    rp.monitor_handle, status);
5293 }
5294
5295 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5296 {
5297         struct mgmt_pending_cmd *cmd = data;
5298         struct adv_monitor *monitor = cmd->user_data;
5299
5300         return hci_add_adv_monitor(hdev, monitor);
5301 }
5302
5303 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5304                                       struct adv_monitor *m, u8 status,
5305                                       void *data, u16 len, u16 op)
5306 {
5307         struct mgmt_pending_cmd *cmd;
5308         int err;
5309
5310         hci_dev_lock(hdev);
5311
5312         if (status)
5313                 goto unlock;
5314
5315         if (pending_find(MGMT_OP_SET_LE, hdev) ||
5316             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5317             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5318             pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5319                 status = MGMT_STATUS_BUSY;
5320                 goto unlock;
5321         }
5322
5323         cmd = mgmt_pending_add(sk, op, hdev, data, len);
5324         if (!cmd) {
5325                 status = MGMT_STATUS_NO_RESOURCES;
5326                 goto unlock;
5327         }
5328
5329         cmd->user_data = m;
5330         err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5331                                  mgmt_add_adv_patterns_monitor_complete);
5332         if (err) {
5333                 if (err == -ENOMEM)
5334                         status = MGMT_STATUS_NO_RESOURCES;
5335                 else
5336                         status = MGMT_STATUS_FAILED;
5337
5338                 goto unlock;
5339         }
5340
5341         hci_dev_unlock(hdev);
5342
5343         return 0;
5344
5345 unlock:
5346         hci_free_adv_monitor(hdev, m);
5347         hci_dev_unlock(hdev);
5348         return mgmt_cmd_status(sk, hdev->id, op, status);
5349 }
5350
5351 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5352                                    struct mgmt_adv_rssi_thresholds *rssi)
5353 {
5354         if (rssi) {
5355                 m->rssi.low_threshold = rssi->low_threshold;
5356                 m->rssi.low_threshold_timeout =
5357                     __le16_to_cpu(rssi->low_threshold_timeout);
5358                 m->rssi.high_threshold = rssi->high_threshold;
5359                 m->rssi.high_threshold_timeout =
5360                     __le16_to_cpu(rssi->high_threshold_timeout);
5361                 m->rssi.sampling_period = rssi->sampling_period;
5362         } else {
5363                 /* Default values. These numbers are the least constricting
5364                  * parameters for MSFT API to work, so it behaves as if there
5365                  * are no rssi parameter to consider. May need to be changed
5366                  * if other API are to be supported.
5367                  */
5368                 m->rssi.low_threshold = -127;
5369                 m->rssi.low_threshold_timeout = 60;
5370                 m->rssi.high_threshold = -127;
5371                 m->rssi.high_threshold_timeout = 0;
5372                 m->rssi.sampling_period = 0;
5373         }
5374 }
5375
5376 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5377                                     struct mgmt_adv_pattern *patterns)
5378 {
5379         u8 offset = 0, length = 0;
5380         struct adv_pattern *p = NULL;
5381         int i;
5382
5383         for (i = 0; i < pattern_count; i++) {
5384                 offset = patterns[i].offset;
5385                 length = patterns[i].length;
5386                 if (offset >= HCI_MAX_AD_LENGTH ||
5387                     length > HCI_MAX_AD_LENGTH ||
5388                     (offset + length) > HCI_MAX_AD_LENGTH)
5389                         return MGMT_STATUS_INVALID_PARAMS;
5390
5391                 p = kmalloc(sizeof(*p), GFP_KERNEL);
5392                 if (!p)
5393                         return MGMT_STATUS_NO_RESOURCES;
5394
5395                 p->ad_type = patterns[i].ad_type;
5396                 p->offset = patterns[i].offset;
5397                 p->length = patterns[i].length;
5398                 memcpy(p->value, patterns[i].value, p->length);
5399
5400                 INIT_LIST_HEAD(&p->list);
5401                 list_add(&p->list, &m->patterns);
5402         }
5403
5404         return MGMT_STATUS_SUCCESS;
5405 }
5406
5407 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5408                                     void *data, u16 len)
5409 {
5410         struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5411         struct adv_monitor *m = NULL;
5412         u8 status = MGMT_STATUS_SUCCESS;
5413         size_t expected_size = sizeof(*cp);
5414
5415         BT_DBG("request for %s", hdev->name);
5416
5417         if (len <= sizeof(*cp)) {
5418                 status = MGMT_STATUS_INVALID_PARAMS;
5419                 goto done;
5420         }
5421
5422         expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5423         if (len != expected_size) {
5424                 status = MGMT_STATUS_INVALID_PARAMS;
5425                 goto done;
5426         }
5427
5428         m = kzalloc(sizeof(*m), GFP_KERNEL);
5429         if (!m) {
5430                 status = MGMT_STATUS_NO_RESOURCES;
5431                 goto done;
5432         }
5433
5434         INIT_LIST_HEAD(&m->patterns);
5435
5436         parse_adv_monitor_rssi(m, NULL);
5437         status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5438
5439 done:
5440         return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5441                                           MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5442 }
5443
5444 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5445                                          void *data, u16 len)
5446 {
5447         struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5448         struct adv_monitor *m = NULL;
5449         u8 status = MGMT_STATUS_SUCCESS;
5450         size_t expected_size = sizeof(*cp);
5451
5452         BT_DBG("request for %s", hdev->name);
5453
5454         if (len <= sizeof(*cp)) {
5455                 status = MGMT_STATUS_INVALID_PARAMS;
5456                 goto done;
5457         }
5458
5459         expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5460         if (len != expected_size) {
5461                 status = MGMT_STATUS_INVALID_PARAMS;
5462                 goto done;
5463         }
5464
5465         m = kzalloc(sizeof(*m), GFP_KERNEL);
5466         if (!m) {
5467                 status = MGMT_STATUS_NO_RESOURCES;
5468                 goto done;
5469         }
5470
5471         INIT_LIST_HEAD(&m->patterns);
5472
5473         parse_adv_monitor_rssi(m, &cp->rssi);
5474         status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5475
5476 done:
5477         return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5478                                          MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5479 }
5480
5481 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5482                                              void *data, int status)
5483 {
5484         struct mgmt_rp_remove_adv_monitor rp;
5485         struct mgmt_pending_cmd *cmd = data;
5486         struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5487
5488         hci_dev_lock(hdev);
5489
5490         rp.monitor_handle = cp->monitor_handle;
5491
5492         if (!status)
5493                 hci_update_passive_scan(hdev);
5494
5495         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5496                           mgmt_status(status), &rp, sizeof(rp));
5497         mgmt_pending_remove(cmd);
5498
5499         hci_dev_unlock(hdev);
5500         bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5501                    rp.monitor_handle, status);
5502 }
5503
5504 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5505 {
5506         struct mgmt_pending_cmd *cmd = data;
5507         struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5508         u16 handle = __le16_to_cpu(cp->monitor_handle);
5509
5510         if (!handle)
5511                 return hci_remove_all_adv_monitor(hdev);
5512
5513         return hci_remove_single_adv_monitor(hdev, handle);
5514 }
5515
5516 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5517                               void *data, u16 len)
5518 {
5519         struct mgmt_pending_cmd *cmd;
5520         int err, status;
5521
5522         hci_dev_lock(hdev);
5523
5524         if (pending_find(MGMT_OP_SET_LE, hdev) ||
5525             pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5526             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5527             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5528                 status = MGMT_STATUS_BUSY;
5529                 goto unlock;
5530         }
5531
5532         cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5533         if (!cmd) {
5534                 status = MGMT_STATUS_NO_RESOURCES;
5535                 goto unlock;
5536         }
5537
5538         err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5539                                  mgmt_remove_adv_monitor_complete);
5540
5541         if (err) {
5542                 mgmt_pending_remove(cmd);
5543
5544                 if (err == -ENOMEM)
5545                         status = MGMT_STATUS_NO_RESOURCES;
5546                 else
5547                         status = MGMT_STATUS_FAILED;
5548
5549                 goto unlock;
5550         }
5551
5552         hci_dev_unlock(hdev);
5553
5554         return 0;
5555
5556 unlock:
5557         hci_dev_unlock(hdev);
5558         return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5559                                status);
5560 }
5561
5562 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5563 {
5564         struct mgmt_rp_read_local_oob_data mgmt_rp;
5565         size_t rp_size = sizeof(mgmt_rp);
5566         struct mgmt_pending_cmd *cmd = data;
5567         struct sk_buff *skb = cmd->skb;
5568         u8 status = mgmt_status(err);
5569
5570         if (!status) {
5571                 if (!skb)
5572                         status = MGMT_STATUS_FAILED;
5573                 else if (IS_ERR(skb))
5574                         status = mgmt_status(PTR_ERR(skb));
5575                 else
5576                         status = mgmt_status(skb->data[0]);
5577         }
5578
5579         bt_dev_dbg(hdev, "status %d", status);
5580
5581         if (status) {
5582                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5583                 goto remove;
5584         }
5585
5586         memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5587
5588         if (!bredr_sc_enabled(hdev)) {
5589                 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5590
5591                 if (skb->len < sizeof(*rp)) {
5592                         mgmt_cmd_status(cmd->sk, hdev->id,
5593                                         MGMT_OP_READ_LOCAL_OOB_DATA,
5594                                         MGMT_STATUS_FAILED);
5595                         goto remove;
5596                 }
5597
5598                 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5599                 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5600
5601                 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5602         } else {
5603                 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5604
5605                 if (skb->len < sizeof(*rp)) {
5606                         mgmt_cmd_status(cmd->sk, hdev->id,
5607                                         MGMT_OP_READ_LOCAL_OOB_DATA,
5608                                         MGMT_STATUS_FAILED);
5609                         goto remove;
5610                 }
5611
5612                 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5613                 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5614
5615                 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5616                 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5617         }
5618
5619         mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5620                           MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5621
5622 remove:
5623         if (skb && !IS_ERR(skb))
5624                 kfree_skb(skb);
5625
5626         mgmt_pending_free(cmd);
5627 }
5628
5629 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5630 {
5631         struct mgmt_pending_cmd *cmd = data;
5632
5633         if (bredr_sc_enabled(hdev))
5634                 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5635         else
5636                 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5637
5638         if (IS_ERR(cmd->skb))
5639                 return PTR_ERR(cmd->skb);
5640         else
5641                 return 0;
5642 }
5643
5644 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5645                                void *data, u16 data_len)
5646 {
5647         struct mgmt_pending_cmd *cmd;
5648         int err;
5649
5650         bt_dev_dbg(hdev, "sock %p", sk);
5651
5652         hci_dev_lock(hdev);
5653
5654         if (!hdev_is_powered(hdev)) {
5655                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5656                                       MGMT_STATUS_NOT_POWERED);
5657                 goto unlock;
5658         }
5659
5660         if (!lmp_ssp_capable(hdev)) {
5661                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5662                                       MGMT_STATUS_NOT_SUPPORTED);
5663                 goto unlock;
5664         }
5665
5666         cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5667         if (!cmd)
5668                 err = -ENOMEM;
5669         else
5670                 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5671                                          read_local_oob_data_complete);
5672
5673         if (err < 0) {
5674                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5675                                       MGMT_STATUS_FAILED);
5676
5677                 if (cmd)
5678                         mgmt_pending_free(cmd);
5679         }
5680
5681 unlock:
5682         hci_dev_unlock(hdev);
5683         return err;
5684 }
5685
5686 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5687                                void *data, u16 len)
5688 {
5689         struct mgmt_addr_info *addr = data;
5690         int err;
5691
5692         bt_dev_dbg(hdev, "sock %p", sk);
5693
5694         if (!bdaddr_type_is_valid(addr->type))
5695                 return mgmt_cmd_complete(sk, hdev->id,
5696                                          MGMT_OP_ADD_REMOTE_OOB_DATA,
5697                                          MGMT_STATUS_INVALID_PARAMS,
5698                                          addr, sizeof(*addr));
5699
5700         hci_dev_lock(hdev);
5701
5702         if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5703                 struct mgmt_cp_add_remote_oob_data *cp = data;
5704                 u8 status;
5705
5706                 if (cp->addr.type != BDADDR_BREDR) {
5707                         err = mgmt_cmd_complete(sk, hdev->id,
5708                                                 MGMT_OP_ADD_REMOTE_OOB_DATA,
5709                                                 MGMT_STATUS_INVALID_PARAMS,
5710                                                 &cp->addr, sizeof(cp->addr));
5711                         goto unlock;
5712                 }
5713
5714                 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5715                                               cp->addr.type, cp->hash,
5716                                               cp->rand, NULL, NULL);
5717                 if (err < 0)
5718                         status = MGMT_STATUS_FAILED;
5719                 else
5720                         status = MGMT_STATUS_SUCCESS;
5721
5722                 err = mgmt_cmd_complete(sk, hdev->id,
5723                                         MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5724                                         &cp->addr, sizeof(cp->addr));
5725         } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5726                 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5727                 u8 *rand192, *hash192, *rand256, *hash256;
5728                 u8 status;
5729
5730                 if (bdaddr_type_is_le(cp->addr.type)) {
5731                         /* Enforce zero-valued 192-bit parameters as
5732                          * long as legacy SMP OOB isn't implemented.
5733                          */
5734                         if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5735                             memcmp(cp->hash192, ZERO_KEY, 16)) {
5736                                 err = mgmt_cmd_complete(sk, hdev->id,
5737                                                         MGMT_OP_ADD_REMOTE_OOB_DATA,
5738                                                         MGMT_STATUS_INVALID_PARAMS,
5739                                                         addr, sizeof(*addr));
5740                                 goto unlock;
5741                         }
5742
5743                         rand192 = NULL;
5744                         hash192 = NULL;
5745                 } else {
5746                         /* In case one of the P-192 values is set to zero,
5747                          * then just disable OOB data for P-192.
5748                          */
5749                         if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5750                             !memcmp(cp->hash192, ZERO_KEY, 16)) {
5751                                 rand192 = NULL;
5752                                 hash192 = NULL;
5753                         } else {
5754                                 rand192 = cp->rand192;
5755                                 hash192 = cp->hash192;
5756                         }
5757                 }
5758
5759                 /* In case one of the P-256 values is set to zero, then just
5760                  * disable OOB data for P-256.
5761                  */
5762                 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5763                     !memcmp(cp->hash256, ZERO_KEY, 16)) {
5764                         rand256 = NULL;
5765                         hash256 = NULL;
5766                 } else {
5767                         rand256 = cp->rand256;
5768                         hash256 = cp->hash256;
5769                 }
5770
5771                 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5772                                               cp->addr.type, hash192, rand192,
5773                                               hash256, rand256);
5774                 if (err < 0)
5775                         status = MGMT_STATUS_FAILED;
5776                 else
5777                         status = MGMT_STATUS_SUCCESS;
5778
5779                 err = mgmt_cmd_complete(sk, hdev->id,
5780                                         MGMT_OP_ADD_REMOTE_OOB_DATA,
5781                                         status, &cp->addr, sizeof(cp->addr));
5782         } else {
5783                 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5784                            len);
5785                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5786                                       MGMT_STATUS_INVALID_PARAMS);
5787         }
5788
5789 unlock:
5790         hci_dev_unlock(hdev);
5791         return err;
5792 }
5793
5794 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5795                                   void *data, u16 len)
5796 {
5797         struct mgmt_cp_remove_remote_oob_data *cp = data;
5798         u8 status;
5799         int err;
5800
5801         bt_dev_dbg(hdev, "sock %p", sk);
5802
5803         if (cp->addr.type != BDADDR_BREDR)
5804                 return mgmt_cmd_complete(sk, hdev->id,
5805                                          MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5806                                          MGMT_STATUS_INVALID_PARAMS,
5807                                          &cp->addr, sizeof(cp->addr));
5808
5809         hci_dev_lock(hdev);
5810
5811         if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5812                 hci_remote_oob_data_clear(hdev);
5813                 status = MGMT_STATUS_SUCCESS;
5814                 goto done;
5815         }
5816
5817         err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5818         if (err < 0)
5819                 status = MGMT_STATUS_INVALID_PARAMS;
5820         else
5821                 status = MGMT_STATUS_SUCCESS;
5822
5823 done:
5824         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5825                                 status, &cp->addr, sizeof(cp->addr));
5826
5827         hci_dev_unlock(hdev);
5828         return err;
5829 }
5830
5831 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5832 {
5833         struct mgmt_pending_cmd *cmd;
5834
5835         bt_dev_dbg(hdev, "status %u", status);
5836
5837         hci_dev_lock(hdev);
5838
5839         cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5840         if (!cmd)
5841                 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5842
5843         if (!cmd)
5844                 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5845
5846         if (cmd) {
5847                 cmd->cmd_complete(cmd, mgmt_status(status));
5848                 mgmt_pending_remove(cmd);
5849         }
5850
5851         hci_dev_unlock(hdev);
5852 }
5853
5854 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5855                                     uint8_t *mgmt_status)
5856 {
5857         switch (type) {
5858         case DISCOV_TYPE_LE:
5859                 *mgmt_status = mgmt_le_support(hdev);
5860                 if (*mgmt_status)
5861                         return false;
5862                 break;
5863         case DISCOV_TYPE_INTERLEAVED:
5864                 *mgmt_status = mgmt_le_support(hdev);
5865                 if (*mgmt_status)
5866                         return false;
5867                 fallthrough;
5868         case DISCOV_TYPE_BREDR:
5869                 *mgmt_status = mgmt_bredr_support(hdev);
5870                 if (*mgmt_status)
5871                         return false;
5872                 break;
5873         default:
5874                 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5875                 return false;
5876         }
5877
5878         return true;
5879 }
5880
5881 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5882 {
5883         struct mgmt_pending_cmd *cmd = data;
5884
5885         if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5886             cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5887             cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5888                 return;
5889
5890         bt_dev_dbg(hdev, "err %d", err);
5891
5892         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5893                           cmd->param, 1);
5894         mgmt_pending_remove(cmd);
5895
5896         hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5897                                 DISCOVERY_FINDING);
5898 }
5899
5900 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5901 {
5902         return hci_start_discovery_sync(hdev);
5903 }
5904
5905 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5906                                     u16 op, void *data, u16 len)
5907 {
5908         struct mgmt_cp_start_discovery *cp = data;
5909         struct mgmt_pending_cmd *cmd;
5910         u8 status;
5911         int err;
5912
5913         bt_dev_dbg(hdev, "sock %p", sk);
5914
5915         hci_dev_lock(hdev);
5916
5917         if (!hdev_is_powered(hdev)) {
5918                 err = mgmt_cmd_complete(sk, hdev->id, op,
5919                                         MGMT_STATUS_NOT_POWERED,
5920                                         &cp->type, sizeof(cp->type));
5921                 goto failed;
5922         }
5923
5924         if (hdev->discovery.state != DISCOVERY_STOPPED ||
5925             hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5926                 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5927                                         &cp->type, sizeof(cp->type));
5928                 goto failed;
5929         }
5930
5931         if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5932                 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5933                                         &cp->type, sizeof(cp->type));
5934                 goto failed;
5935         }
5936
5937         /* Can't start discovery when it is paused */
5938         if (hdev->discovery_paused) {
5939                 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5940                                         &cp->type, sizeof(cp->type));
5941                 goto failed;
5942         }
5943
5944         /* Clear the discovery filter first to free any previously
5945          * allocated memory for the UUID list.
5946          */
5947         hci_discovery_filter_clear(hdev);
5948
5949         hdev->discovery.type = cp->type;
5950         hdev->discovery.report_invalid_rssi = false;
5951         if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5952                 hdev->discovery.limited = true;
5953         else
5954                 hdev->discovery.limited = false;
5955
5956         cmd = mgmt_pending_add(sk, op, hdev, data, len);
5957         if (!cmd) {
5958                 err = -ENOMEM;
5959                 goto failed;
5960         }
5961
5962         err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5963                                  start_discovery_complete);
5964         if (err < 0) {
5965                 mgmt_pending_remove(cmd);
5966                 goto failed;
5967         }
5968
5969         hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5970
5971 failed:
5972         hci_dev_unlock(hdev);
5973         return err;
5974 }
5975
5976 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5977                            void *data, u16 len)
5978 {
5979         return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5980                                         data, len);
5981 }
5982
5983 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5984                                    void *data, u16 len)
5985 {
5986         return start_discovery_internal(sk, hdev,
5987                                         MGMT_OP_START_LIMITED_DISCOVERY,
5988                                         data, len);
5989 }
5990
5991 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5992                                    void *data, u16 len)
5993 {
5994         struct mgmt_cp_start_service_discovery *cp = data;
5995         struct mgmt_pending_cmd *cmd;
5996         const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5997         u16 uuid_count, expected_len;
5998         u8 status;
5999         int err;
6000
6001         bt_dev_dbg(hdev, "sock %p", sk);
6002
6003         hci_dev_lock(hdev);
6004
6005         if (!hdev_is_powered(hdev)) {
6006                 err = mgmt_cmd_complete(sk, hdev->id,
6007                                         MGMT_OP_START_SERVICE_DISCOVERY,
6008                                         MGMT_STATUS_NOT_POWERED,
6009                                         &cp->type, sizeof(cp->type));
6010                 goto failed;
6011         }
6012
6013         if (hdev->discovery.state != DISCOVERY_STOPPED ||
6014             hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6015                 err = mgmt_cmd_complete(sk, hdev->id,
6016                                         MGMT_OP_START_SERVICE_DISCOVERY,
6017                                         MGMT_STATUS_BUSY, &cp->type,
6018                                         sizeof(cp->type));
6019                 goto failed;
6020         }
6021
6022         if (hdev->discovery_paused) {
6023                 err = mgmt_cmd_complete(sk, hdev->id,
6024                                         MGMT_OP_START_SERVICE_DISCOVERY,
6025                                         MGMT_STATUS_BUSY, &cp->type,
6026                                         sizeof(cp->type));
6027                 goto failed;
6028         }
6029
6030         uuid_count = __le16_to_cpu(cp->uuid_count);
6031         if (uuid_count > max_uuid_count) {
6032                 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6033                            uuid_count);
6034                 err = mgmt_cmd_complete(sk, hdev->id,
6035                                         MGMT_OP_START_SERVICE_DISCOVERY,
6036                                         MGMT_STATUS_INVALID_PARAMS, &cp->type,
6037                                         sizeof(cp->type));
6038                 goto failed;
6039         }
6040
6041         expected_len = sizeof(*cp) + uuid_count * 16;
6042         if (expected_len != len) {
6043                 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6044                            expected_len, len);
6045                 err = mgmt_cmd_complete(sk, hdev->id,
6046                                         MGMT_OP_START_SERVICE_DISCOVERY,
6047                                         MGMT_STATUS_INVALID_PARAMS, &cp->type,
6048                                         sizeof(cp->type));
6049                 goto failed;
6050         }
6051
6052         if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6053                 err = mgmt_cmd_complete(sk, hdev->id,
6054                                         MGMT_OP_START_SERVICE_DISCOVERY,
6055                                         status, &cp->type, sizeof(cp->type));
6056                 goto failed;
6057         }
6058
6059         cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6060                                hdev, data, len);
6061         if (!cmd) {
6062                 err = -ENOMEM;
6063                 goto failed;
6064         }
6065
6066         /* Clear the discovery filter first to free any previously
6067          * allocated memory for the UUID list.
6068          */
6069         hci_discovery_filter_clear(hdev);
6070
6071         hdev->discovery.result_filtering = true;
6072         hdev->discovery.type = cp->type;
6073         hdev->discovery.rssi = cp->rssi;
6074         hdev->discovery.uuid_count = uuid_count;
6075
6076         if (uuid_count > 0) {
6077                 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6078                                                 GFP_KERNEL);
6079                 if (!hdev->discovery.uuids) {
6080                         err = mgmt_cmd_complete(sk, hdev->id,
6081                                                 MGMT_OP_START_SERVICE_DISCOVERY,
6082                                                 MGMT_STATUS_FAILED,
6083                                                 &cp->type, sizeof(cp->type));
6084                         mgmt_pending_remove(cmd);
6085                         goto failed;
6086                 }
6087         }
6088
6089         err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6090                                  start_discovery_complete);
6091         if (err < 0) {
6092                 mgmt_pending_remove(cmd);
6093                 goto failed;
6094         }
6095
6096         hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6097
6098 failed:
6099         hci_dev_unlock(hdev);
6100         return err;
6101 }
6102
6103 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6104 {
6105         struct mgmt_pending_cmd *cmd;
6106
6107         bt_dev_dbg(hdev, "status %u", status);
6108
6109         hci_dev_lock(hdev);
6110
6111         cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6112         if (cmd) {
6113                 cmd->cmd_complete(cmd, mgmt_status(status));
6114                 mgmt_pending_remove(cmd);
6115         }
6116
6117         hci_dev_unlock(hdev);
6118 }
6119
6120 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6121 {
6122         struct mgmt_pending_cmd *cmd = data;
6123
6124         if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6125                 return;
6126
6127         bt_dev_dbg(hdev, "err %d", err);
6128
6129         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6130                           cmd->param, 1);
6131         mgmt_pending_remove(cmd);
6132
6133         if (!err)
6134                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6135 }
6136
6137 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6138 {
6139         return hci_stop_discovery_sync(hdev);
6140 }
6141
6142 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6143                           u16 len)
6144 {
6145         struct mgmt_cp_stop_discovery *mgmt_cp = data;
6146         struct mgmt_pending_cmd *cmd;
6147         int err;
6148
6149         bt_dev_dbg(hdev, "sock %p", sk);
6150
6151         hci_dev_lock(hdev);
6152
6153         if (!hci_discovery_active(hdev)) {
6154                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6155                                         MGMT_STATUS_REJECTED, &mgmt_cp->type,
6156                                         sizeof(mgmt_cp->type));
6157                 goto unlock;
6158         }
6159
6160         if (hdev->discovery.type != mgmt_cp->type) {
6161                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6162                                         MGMT_STATUS_INVALID_PARAMS,
6163                                         &mgmt_cp->type, sizeof(mgmt_cp->type));
6164                 goto unlock;
6165         }
6166
6167         cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6168         if (!cmd) {
6169                 err = -ENOMEM;
6170                 goto unlock;
6171         }
6172
6173         err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6174                                  stop_discovery_complete);
6175         if (err < 0) {
6176                 mgmt_pending_remove(cmd);
6177                 goto unlock;
6178         }
6179
6180         hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6181
6182 unlock:
6183         hci_dev_unlock(hdev);
6184         return err;
6185 }
6186
6187 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6188                         u16 len)
6189 {
6190         struct mgmt_cp_confirm_name *cp = data;
6191         struct inquiry_entry *e;
6192         int err;
6193
6194         bt_dev_dbg(hdev, "sock %p", sk);
6195
6196         hci_dev_lock(hdev);
6197
6198         if (!hci_discovery_active(hdev)) {
6199                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6200                                         MGMT_STATUS_FAILED, &cp->addr,
6201                                         sizeof(cp->addr));
6202                 goto failed;
6203         }
6204
6205         e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6206         if (!e) {
6207                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6208                                         MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6209                                         sizeof(cp->addr));
6210                 goto failed;
6211         }
6212
6213         if (cp->name_known) {
6214                 e->name_state = NAME_KNOWN;
6215                 list_del(&e->list);
6216         } else {
6217                 e->name_state = NAME_NEEDED;
6218                 hci_inquiry_cache_update_resolve(hdev, e);
6219         }
6220
6221         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6222                                 &cp->addr, sizeof(cp->addr));
6223
6224 failed:
6225         hci_dev_unlock(hdev);
6226         return err;
6227 }
6228
6229 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6230                         u16 len)
6231 {
6232         struct mgmt_cp_block_device *cp = data;
6233         u8 status;
6234         int err;
6235
6236         bt_dev_dbg(hdev, "sock %p", sk);
6237
6238         if (!bdaddr_type_is_valid(cp->addr.type))
6239                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6240                                          MGMT_STATUS_INVALID_PARAMS,
6241                                          &cp->addr, sizeof(cp->addr));
6242
6243         hci_dev_lock(hdev);
6244
6245         err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6246                                   cp->addr.type);
6247         if (err < 0) {
6248                 status = MGMT_STATUS_FAILED;
6249                 goto done;
6250         }
6251
6252         mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6253                    sk);
6254         status = MGMT_STATUS_SUCCESS;
6255
6256 done:
6257         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6258                                 &cp->addr, sizeof(cp->addr));
6259
6260         hci_dev_unlock(hdev);
6261
6262         return err;
6263 }
6264
6265 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6266                           u16 len)
6267 {
6268         struct mgmt_cp_unblock_device *cp = data;
6269         u8 status;
6270         int err;
6271
6272         bt_dev_dbg(hdev, "sock %p", sk);
6273
6274         if (!bdaddr_type_is_valid(cp->addr.type))
6275                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6276                                          MGMT_STATUS_INVALID_PARAMS,
6277                                          &cp->addr, sizeof(cp->addr));
6278
6279         hci_dev_lock(hdev);
6280
6281         err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6282                                   cp->addr.type);
6283         if (err < 0) {
6284                 status = MGMT_STATUS_INVALID_PARAMS;
6285                 goto done;
6286         }
6287
6288         mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6289                    sk);
6290         status = MGMT_STATUS_SUCCESS;
6291
6292 done:
6293         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6294                                 &cp->addr, sizeof(cp->addr));
6295
6296         hci_dev_unlock(hdev);
6297
6298         return err;
6299 }
6300
6301 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6302 {
6303         return hci_update_eir_sync(hdev);
6304 }
6305
6306 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6307                          u16 len)
6308 {
6309         struct mgmt_cp_set_device_id *cp = data;
6310         int err;
6311         __u16 source;
6312
6313         bt_dev_dbg(hdev, "sock %p", sk);
6314
6315         source = __le16_to_cpu(cp->source);
6316
6317         if (source > 0x0002)
6318                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6319                                        MGMT_STATUS_INVALID_PARAMS);
6320
6321         hci_dev_lock(hdev);
6322
6323         hdev->devid_source = source;
6324         hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6325         hdev->devid_product = __le16_to_cpu(cp->product);
6326         hdev->devid_version = __le16_to_cpu(cp->version);
6327
6328         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6329                                 NULL, 0);
6330
6331         hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6332
6333         hci_dev_unlock(hdev);
6334
6335         return err;
6336 }
6337
6338 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6339 {
6340         if (err)
6341                 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6342         else
6343                 bt_dev_dbg(hdev, "status %d", err);
6344 }
6345
6346 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6347 {
6348         struct cmd_lookup match = { NULL, hdev };
6349         u8 instance;
6350         struct adv_info *adv_instance;
6351         u8 status = mgmt_status(err);
6352
6353         if (status) {
6354                 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6355                                      cmd_status_rsp, &status);
6356                 return;
6357         }
6358
6359         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6360                 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6361         else
6362                 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6363
6364         mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6365                              &match);
6366
6367         new_settings(hdev, match.sk);
6368
6369         if (match.sk)
6370                 sock_put(match.sk);
6371
6372         /* If "Set Advertising" was just disabled and instance advertising was
6373          * set up earlier, then re-enable multi-instance advertising.
6374          */
6375         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6376             list_empty(&hdev->adv_instances))
6377                 return;
6378
6379         instance = hdev->cur_adv_instance;
6380         if (!instance) {
6381                 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6382                                                         struct adv_info, list);
6383                 if (!adv_instance)
6384                         return;
6385
6386                 instance = adv_instance->instance;
6387         }
6388
6389         err = hci_schedule_adv_instance_sync(hdev, instance, true);
6390
6391         enable_advertising_instance(hdev, err);
6392 }
6393
6394 static int set_adv_sync(struct hci_dev *hdev, void *data)
6395 {
6396         struct mgmt_pending_cmd *cmd = data;
6397         struct mgmt_mode *cp = cmd->param;
6398         u8 val = !!cp->val;
6399
6400         if (cp->val == 0x02)
6401                 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6402         else
6403                 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6404
6405         cancel_adv_timeout(hdev);
6406
6407         if (val) {
6408                 /* Switch to instance "0" for the Set Advertising setting.
6409                  * We cannot use update_[adv|scan_rsp]_data() here as the
6410                  * HCI_ADVERTISING flag is not yet set.
6411                  */
6412                 hdev->cur_adv_instance = 0x00;
6413
6414                 if (ext_adv_capable(hdev)) {
6415                         hci_start_ext_adv_sync(hdev, 0x00);
6416                 } else {
6417                         hci_update_adv_data_sync(hdev, 0x00);
6418                         hci_update_scan_rsp_data_sync(hdev, 0x00);
6419                         hci_enable_advertising_sync(hdev);
6420                 }
6421         } else {
6422                 hci_disable_advertising_sync(hdev);
6423         }
6424
6425         return 0;
6426 }
6427
6428 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6429                            u16 len)
6430 {
6431         struct mgmt_mode *cp = data;
6432         struct mgmt_pending_cmd *cmd;
6433         u8 val, status;
6434         int err;
6435
6436         bt_dev_dbg(hdev, "sock %p", sk);
6437
6438         status = mgmt_le_support(hdev);
6439         if (status)
6440                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6441                                        status);
6442
6443         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6444                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6445                                        MGMT_STATUS_INVALID_PARAMS);
6446
6447         if (hdev->advertising_paused)
6448                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6449                                        MGMT_STATUS_BUSY);
6450
6451         hci_dev_lock(hdev);
6452
6453         val = !!cp->val;
6454
6455         /* The following conditions are ones which mean that we should
6456          * not do any HCI communication but directly send a mgmt
6457          * response to user space (after toggling the flag if
6458          * necessary).
6459          */
6460         if (!hdev_is_powered(hdev) ||
6461             (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6462              (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6463             hci_dev_test_flag(hdev, HCI_MESH) ||
6464             hci_conn_num(hdev, LE_LINK) > 0 ||
6465             (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6466              hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6467                 bool changed;
6468
6469                 if (cp->val) {
6470                         hdev->cur_adv_instance = 0x00;
6471                         changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6472                         if (cp->val == 0x02)
6473                                 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6474                         else
6475                                 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6476                 } else {
6477                         changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6478                         hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6479                 }
6480
6481                 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6482                 if (err < 0)
6483                         goto unlock;
6484
6485                 if (changed)
6486                         err = new_settings(hdev, sk);
6487
6488                 goto unlock;
6489         }
6490
6491         if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6492             pending_find(MGMT_OP_SET_LE, hdev)) {
6493                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6494                                       MGMT_STATUS_BUSY);
6495                 goto unlock;
6496         }
6497
6498         cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6499         if (!cmd)
6500                 err = -ENOMEM;
6501         else
6502                 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6503                                          set_advertising_complete);
6504
6505         if (err < 0 && cmd)
6506                 mgmt_pending_remove(cmd);
6507
6508 unlock:
6509         hci_dev_unlock(hdev);
6510         return err;
6511 }
6512
6513 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6514                               void *data, u16 len)
6515 {
6516         struct mgmt_cp_set_static_address *cp = data;
6517         int err;
6518
6519         bt_dev_dbg(hdev, "sock %p", sk);
6520
6521         if (!lmp_le_capable(hdev))
6522                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6523                                        MGMT_STATUS_NOT_SUPPORTED);
6524
6525         if (hdev_is_powered(hdev))
6526                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6527                                        MGMT_STATUS_REJECTED);
6528
6529         if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6530                 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6531                         return mgmt_cmd_status(sk, hdev->id,
6532                                                MGMT_OP_SET_STATIC_ADDRESS,
6533                                                MGMT_STATUS_INVALID_PARAMS);
6534
6535                 /* Two most significant bits shall be set */
6536                 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6537                         return mgmt_cmd_status(sk, hdev->id,
6538                                                MGMT_OP_SET_STATIC_ADDRESS,
6539                                                MGMT_STATUS_INVALID_PARAMS);
6540         }
6541
6542         hci_dev_lock(hdev);
6543
6544         bacpy(&hdev->static_addr, &cp->bdaddr);
6545
6546         err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6547         if (err < 0)
6548                 goto unlock;
6549
6550         err = new_settings(hdev, sk);
6551
6552 unlock:
6553         hci_dev_unlock(hdev);
6554         return err;
6555 }
6556
6557 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6558                            void *data, u16 len)
6559 {
6560         struct mgmt_cp_set_scan_params *cp = data;
6561         __u16 interval, window;
6562         int err;
6563
6564         bt_dev_dbg(hdev, "sock %p", sk);
6565
6566         if (!lmp_le_capable(hdev))
6567                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6568                                        MGMT_STATUS_NOT_SUPPORTED);
6569
6570         interval = __le16_to_cpu(cp->interval);
6571
6572         if (interval < 0x0004 || interval > 0x4000)
6573                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6574                                        MGMT_STATUS_INVALID_PARAMS);
6575
6576         window = __le16_to_cpu(cp->window);
6577
6578         if (window < 0x0004 || window > 0x4000)
6579                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6580                                        MGMT_STATUS_INVALID_PARAMS);
6581
6582         if (window > interval)
6583                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6584                                        MGMT_STATUS_INVALID_PARAMS);
6585
6586         hci_dev_lock(hdev);
6587
6588         hdev->le_scan_interval = interval;
6589         hdev->le_scan_window = window;
6590
6591         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6592                                 NULL, 0);
6593
6594         /* If background scan is running, restart it so new parameters are
6595          * loaded.
6596          */
6597         if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6598             hdev->discovery.state == DISCOVERY_STOPPED)
6599                 hci_update_passive_scan(hdev);
6600
6601         hci_dev_unlock(hdev);
6602
6603         return err;
6604 }
6605
6606 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6607 {
6608         struct mgmt_pending_cmd *cmd = data;
6609
6610         bt_dev_dbg(hdev, "err %d", err);
6611
6612         if (err) {
6613                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6614                                 mgmt_status(err));
6615         } else {
6616                 struct mgmt_mode *cp = cmd->param;
6617
6618                 if (cp->val)
6619                         hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6620                 else
6621                         hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6622
6623                 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6624                 new_settings(hdev, cmd->sk);
6625         }
6626
6627         mgmt_pending_free(cmd);
6628 }
6629
6630 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6631 {
6632         struct mgmt_pending_cmd *cmd = data;
6633         struct mgmt_mode *cp = cmd->param;
6634
6635         return hci_write_fast_connectable_sync(hdev, cp->val);
6636 }
6637
6638 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6639                                 void *data, u16 len)
6640 {
6641         struct mgmt_mode *cp = data;
6642         struct mgmt_pending_cmd *cmd;
6643         int err;
6644
6645         bt_dev_dbg(hdev, "sock %p", sk);
6646
6647         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6648             hdev->hci_ver < BLUETOOTH_VER_1_2)
6649                 return mgmt_cmd_status(sk, hdev->id,
6650                                        MGMT_OP_SET_FAST_CONNECTABLE,
6651                                        MGMT_STATUS_NOT_SUPPORTED);
6652
6653         if (cp->val != 0x00 && cp->val != 0x01)
6654                 return mgmt_cmd_status(sk, hdev->id,
6655                                        MGMT_OP_SET_FAST_CONNECTABLE,
6656                                        MGMT_STATUS_INVALID_PARAMS);
6657
6658         hci_dev_lock(hdev);
6659
6660         if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6661                 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6662                 goto unlock;
6663         }
6664
6665         if (!hdev_is_powered(hdev)) {
6666                 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6667                 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6668                 new_settings(hdev, sk);
6669                 goto unlock;
6670         }
6671
6672         cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6673                                len);
6674         if (!cmd)
6675                 err = -ENOMEM;
6676         else
6677                 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6678                                          fast_connectable_complete);
6679
6680         if (err < 0) {
6681                 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6682                                 MGMT_STATUS_FAILED);
6683
6684                 if (cmd)
6685                         mgmt_pending_free(cmd);
6686         }
6687
6688 unlock:
6689         hci_dev_unlock(hdev);
6690
6691         return err;
6692 }
6693
6694 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6695 {
6696         struct mgmt_pending_cmd *cmd = data;
6697
6698         bt_dev_dbg(hdev, "err %d", err);
6699
6700         if (err) {
6701                 u8 mgmt_err = mgmt_status(err);
6702
6703                 /* We need to restore the flag if related HCI commands
6704                  * failed.
6705                  */
6706                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6707
6708                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6709         } else {
6710                 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6711                 new_settings(hdev, cmd->sk);
6712         }
6713
6714         mgmt_pending_free(cmd);
6715 }
6716
6717 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6718 {
6719         int status;
6720
6721         status = hci_write_fast_connectable_sync(hdev, false);
6722
6723         if (!status)
6724                 status = hci_update_scan_sync(hdev);
6725
6726         /* Since only the advertising data flags will change, there
6727          * is no need to update the scan response data.
6728          */
6729         if (!status)
6730                 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6731
6732         return status;
6733 }
6734
6735 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6736 {
6737         struct mgmt_mode *cp = data;
6738         struct mgmt_pending_cmd *cmd;
6739         int err;
6740
6741         bt_dev_dbg(hdev, "sock %p", sk);
6742
6743         if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6744                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6745                                        MGMT_STATUS_NOT_SUPPORTED);
6746
6747         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6748                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6749                                        MGMT_STATUS_REJECTED);
6750
6751         if (cp->val != 0x00 && cp->val != 0x01)
6752                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6753                                        MGMT_STATUS_INVALID_PARAMS);
6754
6755         hci_dev_lock(hdev);
6756
6757         if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6758                 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6759                 goto unlock;
6760         }
6761
6762         if (!hdev_is_powered(hdev)) {
6763                 if (!cp->val) {
6764                         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6765                         hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6766                         hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6767                         hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6768                         hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6769                 }
6770
6771                 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6772
6773                 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6774                 if (err < 0)
6775                         goto unlock;
6776
6777                 err = new_settings(hdev, sk);
6778                 goto unlock;
6779         }
6780
6781         /* Reject disabling when powered on */
6782         if (!cp->val) {
6783                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6784                                       MGMT_STATUS_REJECTED);
6785                 goto unlock;
6786         } else {
6787                 /* When configuring a dual-mode controller to operate
6788                  * with LE only and using a static address, then switching
6789                  * BR/EDR back on is not allowed.
6790                  *
6791                  * Dual-mode controllers shall operate with the public
6792                  * address as its identity address for BR/EDR and LE. So
6793                  * reject the attempt to create an invalid configuration.
6794                  *
6795                  * The same restrictions applies when secure connections
6796                  * has been enabled. For BR/EDR this is a controller feature
6797                  * while for LE it is a host stack feature. This means that
6798                  * switching BR/EDR back on when secure connections has been
6799                  * enabled is not a supported transaction.
6800                  */
6801                 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6802                     (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6803                      hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6804                         err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6805                                               MGMT_STATUS_REJECTED);
6806                         goto unlock;
6807                 }
6808         }
6809
6810         cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6811         if (!cmd)
6812                 err = -ENOMEM;
6813         else
6814                 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6815                                          set_bredr_complete);
6816
6817         if (err < 0) {
6818                 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6819                                 MGMT_STATUS_FAILED);
6820                 if (cmd)
6821                         mgmt_pending_free(cmd);
6822
6823                 goto unlock;
6824         }
6825
6826         /* We need to flip the bit already here so that
6827          * hci_req_update_adv_data generates the correct flags.
6828          */
6829         hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6830
6831 unlock:
6832         hci_dev_unlock(hdev);
6833         return err;
6834 }
6835
6836 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6837 {
6838         struct mgmt_pending_cmd *cmd = data;
6839         struct mgmt_mode *cp;
6840
6841         bt_dev_dbg(hdev, "err %d", err);
6842
6843         if (err) {
6844                 u8 mgmt_err = mgmt_status(err);
6845
6846                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6847                 goto done;
6848         }
6849
6850         cp = cmd->param;
6851
6852         switch (cp->val) {
6853         case 0x00:
6854                 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6855                 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6856                 break;
6857         case 0x01:
6858                 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6859                 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6860                 break;
6861         case 0x02:
6862                 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6863                 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6864                 break;
6865         }
6866
6867         send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6868         new_settings(hdev, cmd->sk);
6869
6870 done:
6871         mgmt_pending_free(cmd);
6872 }
6873
6874 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6875 {
6876         struct mgmt_pending_cmd *cmd = data;
6877         struct mgmt_mode *cp = cmd->param;
6878         u8 val = !!cp->val;
6879
6880         /* Force write of val */
6881         hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6882
6883         return hci_write_sc_support_sync(hdev, val);
6884 }
6885
6886 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6887                            void *data, u16 len)
6888 {
6889         struct mgmt_mode *cp = data;
6890         struct mgmt_pending_cmd *cmd;
6891         u8 val;
6892         int err;
6893
6894         bt_dev_dbg(hdev, "sock %p", sk);
6895
6896         if (!lmp_sc_capable(hdev) &&
6897             !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6898                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6899                                        MGMT_STATUS_NOT_SUPPORTED);
6900
6901         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6902             lmp_sc_capable(hdev) &&
6903             !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6904                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6905                                        MGMT_STATUS_REJECTED);
6906
6907         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6908                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6909                                        MGMT_STATUS_INVALID_PARAMS);
6910
6911         hci_dev_lock(hdev);
6912
6913         if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6914             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6915                 bool changed;
6916
6917                 if (cp->val) {
6918                         changed = !hci_dev_test_and_set_flag(hdev,
6919                                                              HCI_SC_ENABLED);
6920                         if (cp->val == 0x02)
6921                                 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6922                         else
6923                                 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6924                 } else {
6925                         changed = hci_dev_test_and_clear_flag(hdev,
6926                                                               HCI_SC_ENABLED);
6927                         hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6928                 }
6929
6930                 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6931                 if (err < 0)
6932                         goto failed;
6933
6934                 if (changed)
6935                         err = new_settings(hdev, sk);
6936
6937                 goto failed;
6938         }
6939
6940         val = !!cp->val;
6941
6942         if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6943             (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6944                 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6945                 goto failed;
6946         }
6947
6948         cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6949         if (!cmd)
6950                 err = -ENOMEM;
6951         else
6952                 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6953                                          set_secure_conn_complete);
6954
6955         if (err < 0) {
6956                 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6957                                 MGMT_STATUS_FAILED);
6958                 if (cmd)
6959                         mgmt_pending_free(cmd);
6960         }
6961
6962 failed:
6963         hci_dev_unlock(hdev);
6964         return err;
6965 }
6966
6967 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6968                           void *data, u16 len)
6969 {
6970         struct mgmt_mode *cp = data;
6971         bool changed, use_changed;
6972         int err;
6973
6974         bt_dev_dbg(hdev, "sock %p", sk);
6975
6976         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6977                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6978                                        MGMT_STATUS_INVALID_PARAMS);
6979
6980         hci_dev_lock(hdev);
6981
6982         if (cp->val)
6983                 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6984         else
6985                 changed = hci_dev_test_and_clear_flag(hdev,
6986                                                       HCI_KEEP_DEBUG_KEYS);
6987
6988         if (cp->val == 0x02)
6989                 use_changed = !hci_dev_test_and_set_flag(hdev,
6990                                                          HCI_USE_DEBUG_KEYS);
6991         else
6992                 use_changed = hci_dev_test_and_clear_flag(hdev,
6993                                                           HCI_USE_DEBUG_KEYS);
6994
6995         if (hdev_is_powered(hdev) && use_changed &&
6996             hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6997                 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6998                 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6999                              sizeof(mode), &mode);
7000         }
7001
7002         err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7003         if (err < 0)
7004                 goto unlock;
7005
7006         if (changed)
7007                 err = new_settings(hdev, sk);
7008
7009 unlock:
7010         hci_dev_unlock(hdev);
7011         return err;
7012 }
7013
7014 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7015                        u16 len)
7016 {
7017         struct mgmt_cp_set_privacy *cp = cp_data;
7018         bool changed;
7019         int err;
7020
7021         bt_dev_dbg(hdev, "sock %p", sk);
7022
7023         if (!lmp_le_capable(hdev))
7024                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7025                                        MGMT_STATUS_NOT_SUPPORTED);
7026
7027         if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7028                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7029                                        MGMT_STATUS_INVALID_PARAMS);
7030
7031         if (hdev_is_powered(hdev))
7032                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7033                                        MGMT_STATUS_REJECTED);
7034
7035         hci_dev_lock(hdev);
7036
7037         /* If user space supports this command it is also expected to
7038          * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7039          */
7040         hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7041
7042         if (cp->privacy) {
7043                 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7044                 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7045                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7046                 hci_adv_instances_set_rpa_expired(hdev, true);
7047                 if (cp->privacy == 0x02)
7048                         hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7049                 else
7050                         hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7051         } else {
7052                 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7053                 memset(hdev->irk, 0, sizeof(hdev->irk));
7054                 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7055                 hci_adv_instances_set_rpa_expired(hdev, false);
7056                 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7057         }
7058
7059         err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7060         if (err < 0)
7061                 goto unlock;
7062
7063         if (changed)
7064                 err = new_settings(hdev, sk);
7065
7066 unlock:
7067         hci_dev_unlock(hdev);
7068         return err;
7069 }
7070
7071 static bool irk_is_valid(struct mgmt_irk_info *irk)
7072 {
7073         switch (irk->addr.type) {
7074         case BDADDR_LE_PUBLIC:
7075                 return true;
7076
7077         case BDADDR_LE_RANDOM:
7078                 /* Two most significant bits shall be set */
7079                 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7080                         return false;
7081                 return true;
7082         }
7083
7084         return false;
7085 }
7086
7087 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7088                      u16 len)
7089 {
7090         struct mgmt_cp_load_irks *cp = cp_data;
7091         const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7092                                    sizeof(struct mgmt_irk_info));
7093         u16 irk_count, expected_len;
7094         int i, err;
7095
7096         bt_dev_dbg(hdev, "sock %p", sk);
7097
7098         if (!lmp_le_capable(hdev))
7099                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7100                                        MGMT_STATUS_NOT_SUPPORTED);
7101
7102         irk_count = __le16_to_cpu(cp->irk_count);
7103         if (irk_count > max_irk_count) {
7104                 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7105                            irk_count);
7106                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7107                                        MGMT_STATUS_INVALID_PARAMS);
7108         }
7109
7110         expected_len = struct_size(cp, irks, irk_count);
7111         if (expected_len != len) {
7112                 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7113                            expected_len, len);
7114                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7115                                        MGMT_STATUS_INVALID_PARAMS);
7116         }
7117
7118         bt_dev_dbg(hdev, "irk_count %u", irk_count);
7119
7120         for (i = 0; i < irk_count; i++) {
7121                 struct mgmt_irk_info *key = &cp->irks[i];
7122
7123                 if (!irk_is_valid(key))
7124                         return mgmt_cmd_status(sk, hdev->id,
7125                                                MGMT_OP_LOAD_IRKS,
7126                                                MGMT_STATUS_INVALID_PARAMS);
7127         }
7128
7129         hci_dev_lock(hdev);
7130
7131         hci_smp_irks_clear(hdev);
7132
7133         for (i = 0; i < irk_count; i++) {
7134                 struct mgmt_irk_info *irk = &cp->irks[i];
7135
7136                 if (hci_is_blocked_key(hdev,
7137                                        HCI_BLOCKED_KEY_TYPE_IRK,
7138                                        irk->val)) {
7139                         bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7140                                     &irk->addr.bdaddr);
7141                         continue;
7142                 }
7143
7144                 hci_add_irk(hdev, &irk->addr.bdaddr,
7145                             le_addr_type(irk->addr.type), irk->val,
7146                             BDADDR_ANY);
7147         }
7148
7149         hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7150
7151         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7152
7153         hci_dev_unlock(hdev);
7154
7155         return err;
7156 }
7157
7158 #ifdef TIZEN_BT
7159 static int set_advertising_params(struct sock *sk, struct hci_dev *hdev,
7160                         void *data, u16 len)
7161 {
7162         struct mgmt_cp_set_advertising_params *cp = data;
7163         __u16 min_interval;
7164         __u16 max_interval;
7165         int err;
7166
7167         BT_DBG("%s", hdev->name);
7168
7169         if (!lmp_le_capable(hdev))
7170                 return mgmt_cmd_status(sk, hdev->id,
7171                                 MGMT_OP_SET_ADVERTISING_PARAMS,
7172                                 MGMT_STATUS_NOT_SUPPORTED);
7173
7174         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7175                 return mgmt_cmd_status(sk, hdev->id,
7176                                 MGMT_OP_SET_ADVERTISING_PARAMS,
7177                                 MGMT_STATUS_BUSY);
7178
7179         min_interval = __le16_to_cpu(cp->interval_min);
7180         max_interval = __le16_to_cpu(cp->interval_max);
7181
7182         if (min_interval > max_interval ||
7183             min_interval < 0x0020 || max_interval > 0x4000)
7184                 return mgmt_cmd_status(sk, hdev->id,
7185                                 MGMT_OP_SET_ADVERTISING_PARAMS,
7186                                 MGMT_STATUS_INVALID_PARAMS);
7187
7188         hci_dev_lock(hdev);
7189
7190         hdev->le_adv_min_interval = min_interval;
7191         hdev->le_adv_max_interval = max_interval;
7192         hdev->adv_filter_policy = cp->filter_policy;
7193         hdev->adv_type = cp->type;
7194
7195         err = mgmt_cmd_complete(sk, hdev->id,
7196                         MGMT_OP_SET_ADVERTISING_PARAMS, 0, NULL, 0);
7197
7198         hci_dev_unlock(hdev);
7199
7200         return err;
7201 }
7202
7203 static void set_advertising_data_complete(struct hci_dev *hdev,
7204                         u8 status, u16 opcode)
7205 {
7206         struct mgmt_cp_set_advertising_data *cp;
7207         struct mgmt_pending_cmd *cmd;
7208
7209         BT_DBG("status 0x%02x", status);
7210
7211         hci_dev_lock(hdev);
7212
7213         cmd = pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev);
7214         if (!cmd)
7215                 goto unlock;
7216
7217         cp = cmd->param;
7218
7219         if (status)
7220                 mgmt_cmd_status(cmd->sk, hdev->id,
7221                                 MGMT_OP_SET_ADVERTISING_DATA,
7222                                 mgmt_status(status));
7223         else
7224                 mgmt_cmd_complete(cmd->sk, hdev->id,
7225                                 MGMT_OP_SET_ADVERTISING_DATA, 0,
7226                                 cp, sizeof(*cp));
7227
7228         mgmt_pending_remove(cmd);
7229
7230 unlock:
7231         hci_dev_unlock(hdev);
7232 }
7233
7234 static int set_advertising_data(struct sock *sk, struct hci_dev *hdev,
7235                         void *data, u16 len)
7236 {
7237         struct mgmt_pending_cmd *cmd;
7238         struct hci_request req;
7239         struct mgmt_cp_set_advertising_data *cp = data;
7240         struct hci_cp_le_set_adv_data adv;
7241         int err;
7242
7243         BT_DBG("%s", hdev->name);
7244
7245         if (!lmp_le_capable(hdev)) {
7246                 return mgmt_cmd_status(sk, hdev->id,
7247                                 MGMT_OP_SET_ADVERTISING_DATA,
7248                                 MGMT_STATUS_NOT_SUPPORTED);
7249         }
7250
7251         hci_dev_lock(hdev);
7252
7253         if (pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev)) {
7254                 err = mgmt_cmd_status(sk, hdev->id,
7255                                 MGMT_OP_SET_ADVERTISING_DATA,
7256                                 MGMT_STATUS_BUSY);
7257                 goto unlocked;
7258         }
7259
7260         if (len > HCI_MAX_AD_LENGTH) {
7261                 err = mgmt_cmd_status(sk, hdev->id,
7262                                 MGMT_OP_SET_ADVERTISING_DATA,
7263                                 MGMT_STATUS_INVALID_PARAMS);
7264                 goto unlocked;
7265         }
7266
7267         cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING_DATA,
7268                                hdev, data, len);
7269         if (!cmd) {
7270                 err = -ENOMEM;
7271                 goto unlocked;
7272         }
7273
7274         hci_req_init(&req, hdev);
7275
7276         memset(&adv, 0, sizeof(adv));
7277         memcpy(adv.data, cp->data, len);
7278         adv.length = len;
7279
7280         hci_req_add(&req, HCI_OP_LE_SET_ADV_DATA, sizeof(adv), &adv);
7281
7282         err = hci_req_run(&req, set_advertising_data_complete);
7283         if (err < 0)
7284                 mgmt_pending_remove(cmd);
7285
7286 unlocked:
7287         hci_dev_unlock(hdev);
7288
7289         return err;
7290 }
7291
7292 /* Adv White List feature */
7293 static void add_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7294 {
7295         struct mgmt_cp_add_dev_white_list *cp;
7296         struct mgmt_pending_cmd *cmd;
7297
7298         BT_DBG("status 0x%02x", status);
7299
7300         hci_dev_lock(hdev);
7301
7302         cmd = pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev);
7303         if (!cmd)
7304                 goto unlock;
7305
7306         cp = cmd->param;
7307
7308         if (status)
7309                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7310                            mgmt_status(status));
7311         else
7312                 mgmt_cmd_complete(cmd->sk, hdev->id,
7313                                 MGMT_OP_ADD_DEV_WHITE_LIST, 0, cp, sizeof(*cp));
7314
7315         mgmt_pending_remove(cmd);
7316
7317 unlock:
7318         hci_dev_unlock(hdev);
7319 }
7320
7321 static int add_white_list(struct sock *sk, struct hci_dev *hdev,
7322                            void *data, u16 len)
7323 {
7324         struct mgmt_pending_cmd *cmd;
7325         struct mgmt_cp_add_dev_white_list *cp = data;
7326         struct hci_request req;
7327         int err;
7328
7329         BT_DBG("%s", hdev->name);
7330
7331         if (!lmp_le_capable(hdev))
7332                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7333                                   MGMT_STATUS_NOT_SUPPORTED);
7334
7335         if (!hdev_is_powered(hdev))
7336                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7337                                   MGMT_STATUS_REJECTED);
7338
7339         hci_dev_lock(hdev);
7340
7341         if (pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev)) {
7342                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7343                                 MGMT_STATUS_BUSY);
7344                 goto unlocked;
7345         }
7346
7347         cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEV_WHITE_LIST, hdev, data, len);
7348         if (!cmd) {
7349                 err = -ENOMEM;
7350                 goto unlocked;
7351         }
7352
7353         hci_req_init(&req, hdev);
7354
7355         hci_req_add(&req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(*cp), cp);
7356
7357         err = hci_req_run(&req, add_white_list_complete);
7358         if (err < 0) {
7359                 mgmt_pending_remove(cmd);
7360                 goto unlocked;
7361         }
7362
7363 unlocked:
7364         hci_dev_unlock(hdev);
7365
7366         return err;
7367 }
7368
7369 static void remove_from_white_list_complete(struct hci_dev *hdev,
7370                         u8 status, u16 opcode)
7371 {
7372         struct mgmt_cp_remove_dev_from_white_list *cp;
7373         struct mgmt_pending_cmd *cmd;
7374
7375         BT_DBG("status 0x%02x", status);
7376
7377         hci_dev_lock(hdev);
7378
7379         cmd = pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev);
7380         if (!cmd)
7381                 goto unlock;
7382
7383         cp = cmd->param;
7384
7385         if (status)
7386                 mgmt_cmd_status(cmd->sk, hdev->id,
7387                         MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7388                         mgmt_status(status));
7389         else
7390                 mgmt_cmd_complete(cmd->sk, hdev->id,
7391                         MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, 0,
7392                         cp, sizeof(*cp));
7393
7394         mgmt_pending_remove(cmd);
7395
7396 unlock:
7397         hci_dev_unlock(hdev);
7398 }
7399
7400 static int remove_from_white_list(struct sock *sk, struct hci_dev *hdev,
7401                            void *data, u16 len)
7402 {
7403         struct mgmt_pending_cmd *cmd;
7404         struct mgmt_cp_remove_dev_from_white_list *cp = data;
7405         struct hci_request req;
7406         int err;
7407
7408         BT_DBG("%s", hdev->name);
7409
7410         if (!lmp_le_capable(hdev))
7411                 return mgmt_cmd_status(sk, hdev->id,
7412                                 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7413                                 MGMT_STATUS_NOT_SUPPORTED);
7414
7415         if (!hdev_is_powered(hdev))
7416                 return mgmt_cmd_status(sk, hdev->id,
7417                                 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7418                                 MGMT_STATUS_REJECTED);
7419
7420         hci_dev_lock(hdev);
7421
7422         if (pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev)) {
7423                 err = mgmt_cmd_status(sk, hdev->id,
7424                                 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7425                                 MGMT_STATUS_BUSY);
7426                 goto unlocked;
7427         }
7428
7429         cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7430                                 hdev, data, len);
7431         if (!cmd) {
7432                 err = -ENOMEM;
7433                 goto unlocked;
7434         }
7435
7436         hci_req_init(&req, hdev);
7437
7438         hci_req_add(&req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(*cp), cp);
7439
7440         err = hci_req_run(&req, remove_from_white_list_complete);
7441         if (err < 0) {
7442                 mgmt_pending_remove(cmd);
7443                 goto unlocked;
7444         }
7445
7446 unlocked:
7447         hci_dev_unlock(hdev);
7448
7449         return err;
7450 }
7451
7452 static void clear_white_list_complete(struct hci_dev *hdev, u8 status,
7453                         u16 opcode)
7454 {
7455         struct mgmt_pending_cmd *cmd;
7456
7457         BT_DBG("status 0x%02x", status);
7458
7459         hci_dev_lock(hdev);
7460
7461         cmd = pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev);
7462         if (!cmd)
7463                 goto unlock;
7464
7465         if (status)
7466                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
7467                            mgmt_status(status));
7468         else
7469                 mgmt_cmd_complete(cmd->sk, hdev->id,
7470                                 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7471                                 0, NULL, 0);
7472
7473         mgmt_pending_remove(cmd);
7474
7475 unlock:
7476         hci_dev_unlock(hdev);
7477 }
7478
7479 static int clear_white_list(struct sock *sk, struct hci_dev *hdev,
7480                            void *data, u16 len)
7481 {
7482         struct mgmt_pending_cmd *cmd;
7483         struct hci_request req;
7484         int err;
7485
7486         BT_DBG("%s", hdev->name);
7487
7488         if (!lmp_le_capable(hdev))
7489                 return mgmt_cmd_status(sk, hdev->id,
7490                                 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7491                                 MGMT_STATUS_NOT_SUPPORTED);
7492
7493         if (!hdev_is_powered(hdev))
7494                 return mgmt_cmd_status(sk, hdev->id,
7495                                 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7496                                 MGMT_STATUS_REJECTED);
7497
7498         hci_dev_lock(hdev);
7499
7500         if (pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev)) {
7501                 err = mgmt_cmd_status(sk, hdev->id,
7502                                 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7503                                 MGMT_STATUS_BUSY);
7504                 goto unlocked;
7505         }
7506
7507         cmd = mgmt_pending_add(sk, MGMT_OP_CLEAR_DEV_WHITE_LIST,
7508                                 hdev, NULL, 0);
7509         if (!cmd) {
7510                 err = -ENOMEM;
7511                 goto unlocked;
7512         }
7513
7514         hci_req_init(&req, hdev);
7515
7516         hci_req_add(&req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
7517
7518         err = hci_req_run(&req, clear_white_list_complete);
7519         if (err < 0) {
7520                 mgmt_pending_remove(cmd);
7521                 goto unlocked;
7522         }
7523
7524 unlocked:
7525         hci_dev_unlock(hdev);
7526
7527         return err;
7528 }
7529
7530 static void set_scan_rsp_data_complete(struct hci_dev *hdev, u8 status,
7531                         u16 opcode)
7532 {
7533         struct mgmt_cp_set_scan_rsp_data *cp;
7534         struct mgmt_pending_cmd *cmd;
7535
7536         BT_DBG("status 0x%02x", status);
7537
7538         hci_dev_lock(hdev);
7539
7540         cmd = pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev);
7541         if (!cmd)
7542                 goto unlock;
7543
7544         cp = cmd->param;
7545
7546         if (status)
7547                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7548                                 mgmt_status(status));
7549         else
7550                 mgmt_cmd_complete(cmd->sk, hdev->id,
7551                                 MGMT_OP_SET_SCAN_RSP_DATA, 0,
7552                                 cp, sizeof(*cp));
7553
7554         mgmt_pending_remove(cmd);
7555
7556 unlock:
7557         hci_dev_unlock(hdev);
7558 }
7559
7560 static int set_scan_rsp_data(struct sock *sk, struct hci_dev *hdev, void *data,
7561                         u16 len)
7562 {
7563         struct mgmt_pending_cmd *cmd;
7564         struct hci_request req;
7565         struct mgmt_cp_set_scan_rsp_data *cp = data;
7566         struct hci_cp_le_set_scan_rsp_data rsp;
7567         int err;
7568
7569         BT_DBG("%s", hdev->name);
7570
7571         if (!lmp_le_capable(hdev))
7572                 return mgmt_cmd_status(sk, hdev->id,
7573                                 MGMT_OP_SET_SCAN_RSP_DATA,
7574                                 MGMT_STATUS_NOT_SUPPORTED);
7575
7576         hci_dev_lock(hdev);
7577
7578         if (pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev)) {
7579                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7580                                 MGMT_STATUS_BUSY);
7581                 goto unlocked;
7582         }
7583
7584         if (len > HCI_MAX_AD_LENGTH) {
7585                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7586                                 MGMT_STATUS_INVALID_PARAMS);
7587                 goto unlocked;
7588         }
7589
7590         cmd = mgmt_pending_add(sk, MGMT_OP_SET_SCAN_RSP_DATA, hdev, data, len);
7591         if (!cmd) {
7592                 err = -ENOMEM;
7593                 goto unlocked;
7594         }
7595
7596         hci_req_init(&req, hdev);
7597
7598         memset(&rsp, 0, sizeof(rsp));
7599         memcpy(rsp.data, cp->data, len);
7600         rsp.length = len;
7601
7602         hci_req_add(&req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(rsp), &rsp);
7603
7604         err = hci_req_run(&req, set_scan_rsp_data_complete);
7605         if (err < 0)
7606                 mgmt_pending_remove(cmd);
7607
7608 unlocked:
7609         hci_dev_unlock(hdev);
7610
7611         return err;
7612 }
7613 #endif /* TIZEN_BT */
7614
7615 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7616 {
7617         if (key->initiator != 0x00 && key->initiator != 0x01)
7618                 return false;
7619
7620         switch (key->addr.type) {
7621         case BDADDR_LE_PUBLIC:
7622                 return true;
7623
7624         case BDADDR_LE_RANDOM:
7625                 /* Two most significant bits shall be set */
7626                 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7627                         return false;
7628                 return true;
7629         }
7630
7631         return false;
7632 }
7633
7634 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7635                                void *cp_data, u16 len)
7636 {
7637         struct mgmt_cp_load_long_term_keys *cp = cp_data;
7638         const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7639                                    sizeof(struct mgmt_ltk_info));
7640         u16 key_count, expected_len;
7641         int i, err;
7642
7643         bt_dev_dbg(hdev, "sock %p", sk);
7644
7645         if (!lmp_le_capable(hdev))
7646                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7647                                        MGMT_STATUS_NOT_SUPPORTED);
7648
7649         key_count = __le16_to_cpu(cp->key_count);
7650         if (key_count > max_key_count) {
7651                 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7652                            key_count);
7653                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7654                                        MGMT_STATUS_INVALID_PARAMS);
7655         }
7656
7657         expected_len = struct_size(cp, keys, key_count);
7658         if (expected_len != len) {
7659                 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7660                            expected_len, len);
7661                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7662                                        MGMT_STATUS_INVALID_PARAMS);
7663         }
7664
7665         bt_dev_dbg(hdev, "key_count %u", key_count);
7666
7667         for (i = 0; i < key_count; i++) {
7668                 struct mgmt_ltk_info *key = &cp->keys[i];
7669
7670                 if (!ltk_is_valid(key))
7671                         return mgmt_cmd_status(sk, hdev->id,
7672                                                MGMT_OP_LOAD_LONG_TERM_KEYS,
7673                                                MGMT_STATUS_INVALID_PARAMS);
7674         }
7675
7676         hci_dev_lock(hdev);
7677
7678         hci_smp_ltks_clear(hdev);
7679
7680         for (i = 0; i < key_count; i++) {
7681                 struct mgmt_ltk_info *key = &cp->keys[i];
7682                 u8 type, authenticated;
7683
7684                 if (hci_is_blocked_key(hdev,
7685                                        HCI_BLOCKED_KEY_TYPE_LTK,
7686                                        key->val)) {
7687                         bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7688                                     &key->addr.bdaddr);
7689                         continue;
7690                 }
7691
7692                 switch (key->type) {
7693                 case MGMT_LTK_UNAUTHENTICATED:
7694                         authenticated = 0x00;
7695                         type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7696                         break;
7697                 case MGMT_LTK_AUTHENTICATED:
7698                         authenticated = 0x01;
7699                         type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7700                         break;
7701                 case MGMT_LTK_P256_UNAUTH:
7702                         authenticated = 0x00;
7703                         type = SMP_LTK_P256;
7704                         break;
7705                 case MGMT_LTK_P256_AUTH:
7706                         authenticated = 0x01;
7707                         type = SMP_LTK_P256;
7708                         break;
7709                 case MGMT_LTK_P256_DEBUG:
7710                         authenticated = 0x00;
7711                         type = SMP_LTK_P256_DEBUG;
7712                         fallthrough;
7713                 default:
7714                         continue;
7715                 }
7716
7717                 hci_add_ltk(hdev, &key->addr.bdaddr,
7718                             le_addr_type(key->addr.type), type, authenticated,
7719                             key->val, key->enc_size, key->ediv, key->rand);
7720         }
7721
7722         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7723                            NULL, 0);
7724
7725         hci_dev_unlock(hdev);
7726
7727         return err;
7728 }
7729
7730 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7731 {
7732         struct mgmt_pending_cmd *cmd = data;
7733         struct hci_conn *conn = cmd->user_data;
7734         struct mgmt_cp_get_conn_info *cp = cmd->param;
7735         struct mgmt_rp_get_conn_info rp;
7736         u8 status;
7737
7738         bt_dev_dbg(hdev, "err %d", err);
7739
7740         memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
7741
7742         status = mgmt_status(err);
7743         if (status == MGMT_STATUS_SUCCESS) {
7744                 rp.rssi = conn->rssi;
7745                 rp.tx_power = conn->tx_power;
7746                 rp.max_tx_power = conn->max_tx_power;
7747         } else {
7748                 rp.rssi = HCI_RSSI_INVALID;
7749                 rp.tx_power = HCI_TX_POWER_INVALID;
7750                 rp.max_tx_power = HCI_TX_POWER_INVALID;
7751         }
7752
7753         mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7754                           &rp, sizeof(rp));
7755
7756         mgmt_pending_free(cmd);
7757 }
7758
7759 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7760 {
7761         struct mgmt_pending_cmd *cmd = data;
7762         struct mgmt_cp_get_conn_info *cp = cmd->param;
7763         struct hci_conn *conn;
7764         int err;
7765         __le16   handle;
7766
7767         /* Make sure we are still connected */
7768         if (cp->addr.type == BDADDR_BREDR)
7769                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7770                                                &cp->addr.bdaddr);
7771         else
7772                 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7773
7774         if (!conn || conn->state != BT_CONNECTED)
7775                 return MGMT_STATUS_NOT_CONNECTED;
7776
7777         cmd->user_data = conn;
7778         handle = cpu_to_le16(conn->handle);
7779
7780         /* Refresh RSSI each time */
7781         err = hci_read_rssi_sync(hdev, handle);
7782
7783         /* For LE links TX power does not change thus we don't need to
7784          * query for it once value is known.
7785          */
7786         if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7787                      conn->tx_power == HCI_TX_POWER_INVALID))
7788                 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7789
7790         /* Max TX power needs to be read only once per connection */
7791         if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7792                 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7793
7794         return err;
7795 }
7796
7797 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7798                          u16 len)
7799 {
7800         struct mgmt_cp_get_conn_info *cp = data;
7801         struct mgmt_rp_get_conn_info rp;
7802         struct hci_conn *conn;
7803         unsigned long conn_info_age;
7804         int err = 0;
7805
7806         bt_dev_dbg(hdev, "sock %p", sk);
7807
7808         memset(&rp, 0, sizeof(rp));
7809         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7810         rp.addr.type = cp->addr.type;
7811
7812         if (!bdaddr_type_is_valid(cp->addr.type))
7813                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7814                                          MGMT_STATUS_INVALID_PARAMS,
7815                                          &rp, sizeof(rp));
7816
7817         hci_dev_lock(hdev);
7818
7819         if (!hdev_is_powered(hdev)) {
7820                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7821                                         MGMT_STATUS_NOT_POWERED, &rp,
7822                                         sizeof(rp));
7823                 goto unlock;
7824         }
7825
7826         if (cp->addr.type == BDADDR_BREDR)
7827                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7828                                                &cp->addr.bdaddr);
7829         else
7830                 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7831
7832         if (!conn || conn->state != BT_CONNECTED) {
7833                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7834                                         MGMT_STATUS_NOT_CONNECTED, &rp,
7835                                         sizeof(rp));
7836                 goto unlock;
7837         }
7838
7839         /* To avoid client trying to guess when to poll again for information we
7840          * calculate conn info age as random value between min/max set in hdev.
7841          */
7842         conn_info_age = hdev->conn_info_min_age +
7843                         prandom_u32_max(hdev->conn_info_max_age -
7844                                         hdev->conn_info_min_age);
7845
7846         /* Query controller to refresh cached values if they are too old or were
7847          * never read.
7848          */
7849         if (time_after(jiffies, conn->conn_info_timestamp +
7850                        msecs_to_jiffies(conn_info_age)) ||
7851             !conn->conn_info_timestamp) {
7852                 struct mgmt_pending_cmd *cmd;
7853
7854                 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7855                                        len);
7856                 if (!cmd) {
7857                         err = -ENOMEM;
7858                 } else {
7859                         err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7860                                                  cmd, get_conn_info_complete);
7861                 }
7862
7863                 if (err < 0) {
7864                         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7865                                           MGMT_STATUS_FAILED, &rp, sizeof(rp));
7866
7867                         if (cmd)
7868                                 mgmt_pending_free(cmd);
7869
7870                         goto unlock;
7871                 }
7872
7873                 conn->conn_info_timestamp = jiffies;
7874         } else {
7875                 /* Cache is valid, just reply with values cached in hci_conn */
7876                 rp.rssi = conn->rssi;
7877                 rp.tx_power = conn->tx_power;
7878                 rp.max_tx_power = conn->max_tx_power;
7879
7880                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7881                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7882         }
7883
7884 unlock:
7885         hci_dev_unlock(hdev);
7886         return err;
7887 }
7888
7889 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7890 {
7891         struct mgmt_pending_cmd *cmd = data;
7892         struct mgmt_cp_get_clock_info *cp = cmd->param;
7893         struct mgmt_rp_get_clock_info rp;
7894         struct hci_conn *conn = cmd->user_data;
7895         u8 status = mgmt_status(err);
7896
7897         bt_dev_dbg(hdev, "err %d", err);
7898
7899         memset(&rp, 0, sizeof(rp));
7900         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7901         rp.addr.type = cp->addr.type;
7902
7903         if (err)
7904                 goto complete;
7905
7906         rp.local_clock = cpu_to_le32(hdev->clock);
7907
7908         if (conn) {
7909                 rp.piconet_clock = cpu_to_le32(conn->clock);
7910                 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7911         }
7912
7913 complete:
7914         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7915                           sizeof(rp));
7916
7917         mgmt_pending_free(cmd);
7918 }
7919
7920 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7921 {
7922         struct mgmt_pending_cmd *cmd = data;
7923         struct mgmt_cp_get_clock_info *cp = cmd->param;
7924         struct hci_cp_read_clock hci_cp;
7925         struct hci_conn *conn;
7926
7927         memset(&hci_cp, 0, sizeof(hci_cp));
7928         hci_read_clock_sync(hdev, &hci_cp);
7929
7930         /* Make sure connection still exists */
7931         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7932         if (!conn || conn->state != BT_CONNECTED)
7933                 return MGMT_STATUS_NOT_CONNECTED;
7934
7935         cmd->user_data = conn;
7936         hci_cp.handle = cpu_to_le16(conn->handle);
7937         hci_cp.which = 0x01; /* Piconet clock */
7938
7939         return hci_read_clock_sync(hdev, &hci_cp);
7940 }
7941
7942 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7943                                                                 u16 len)
7944 {
7945         struct mgmt_cp_get_clock_info *cp = data;
7946         struct mgmt_rp_get_clock_info rp;
7947         struct mgmt_pending_cmd *cmd;
7948         struct hci_conn *conn;
7949         int err;
7950
7951         bt_dev_dbg(hdev, "sock %p", sk);
7952
7953         memset(&rp, 0, sizeof(rp));
7954         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7955         rp.addr.type = cp->addr.type;
7956
7957         if (cp->addr.type != BDADDR_BREDR)
7958                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7959                                          MGMT_STATUS_INVALID_PARAMS,
7960                                          &rp, sizeof(rp));
7961
7962         hci_dev_lock(hdev);
7963
7964         if (!hdev_is_powered(hdev)) {
7965                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7966                                         MGMT_STATUS_NOT_POWERED, &rp,
7967                                         sizeof(rp));
7968                 goto unlock;
7969         }
7970
7971         if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7972                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7973                                                &cp->addr.bdaddr);
7974                 if (!conn || conn->state != BT_CONNECTED) {
7975                         err = mgmt_cmd_complete(sk, hdev->id,
7976                                                 MGMT_OP_GET_CLOCK_INFO,
7977                                                 MGMT_STATUS_NOT_CONNECTED,
7978                                                 &rp, sizeof(rp));
7979                         goto unlock;
7980                 }
7981         } else {
7982                 conn = NULL;
7983         }
7984
7985         cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7986         if (!cmd)
7987                 err = -ENOMEM;
7988         else
7989                 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7990                                          get_clock_info_complete);
7991
7992         if (err < 0) {
7993                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7994                                         MGMT_STATUS_FAILED, &rp, sizeof(rp));
7995
7996                 if (cmd)
7997                         mgmt_pending_free(cmd);
7998         }
7999
8000
8001 unlock:
8002         hci_dev_unlock(hdev);
8003         return err;
8004 }
8005
8006 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
8007 {
8008         struct hci_conn *conn;
8009
8010         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
8011         if (!conn)
8012                 return false;
8013
8014         if (conn->dst_type != type)
8015                 return false;
8016
8017         if (conn->state != BT_CONNECTED)
8018                 return false;
8019
8020         return true;
8021 }
8022
8023 /* This function requires the caller holds hdev->lock */
8024 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
8025                                u8 addr_type, u8 auto_connect)
8026 {
8027         struct hci_conn_params *params;
8028
8029         params = hci_conn_params_add(hdev, addr, addr_type);
8030         if (!params)
8031                 return -EIO;
8032
8033         if (params->auto_connect == auto_connect)
8034                 return 0;
8035
8036         list_del_init(&params->action);
8037
8038         switch (auto_connect) {
8039         case HCI_AUTO_CONN_DISABLED:
8040         case HCI_AUTO_CONN_LINK_LOSS:
8041                 /* If auto connect is being disabled when we're trying to
8042                  * connect to device, keep connecting.
8043                  */
8044                 if (params->explicit_connect)
8045                         list_add(&params->action, &hdev->pend_le_conns);
8046                 break;
8047         case HCI_AUTO_CONN_REPORT:
8048                 if (params->explicit_connect)
8049                         list_add(&params->action, &hdev->pend_le_conns);
8050                 else
8051                         list_add(&params->action, &hdev->pend_le_reports);
8052                 break;
8053         case HCI_AUTO_CONN_DIRECT:
8054         case HCI_AUTO_CONN_ALWAYS:
8055                 if (!is_connected(hdev, addr, addr_type))
8056                         list_add(&params->action, &hdev->pend_le_conns);
8057                 break;
8058         }
8059
8060         params->auto_connect = auto_connect;
8061
8062         bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
8063                    addr, addr_type, auto_connect);
8064
8065         return 0;
8066 }
8067
8068 static void device_added(struct sock *sk, struct hci_dev *hdev,
8069                          bdaddr_t *bdaddr, u8 type, u8 action)
8070 {
8071         struct mgmt_ev_device_added ev;
8072
8073         bacpy(&ev.addr.bdaddr, bdaddr);
8074         ev.addr.type = type;
8075         ev.action = action;
8076
8077         mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
8078 }
8079
8080 static int add_device_sync(struct hci_dev *hdev, void *data)
8081 {
8082         return hci_update_passive_scan_sync(hdev);
8083 }
8084
8085 static int add_device(struct sock *sk, struct hci_dev *hdev,
8086                       void *data, u16 len)
8087 {
8088         struct mgmt_cp_add_device *cp = data;
8089         u8 auto_conn, addr_type;
8090         struct hci_conn_params *params;
8091         int err;
8092         u32 current_flags = 0;
8093         u32 supported_flags;
8094
8095         bt_dev_dbg(hdev, "sock %p", sk);
8096
8097         if (!bdaddr_type_is_valid(cp->addr.type) ||
8098             !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
8099                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8100                                          MGMT_STATUS_INVALID_PARAMS,
8101                                          &cp->addr, sizeof(cp->addr));
8102
8103         if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
8104                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8105                                          MGMT_STATUS_INVALID_PARAMS,
8106                                          &cp->addr, sizeof(cp->addr));
8107
8108         hci_dev_lock(hdev);
8109
8110         if (cp->addr.type == BDADDR_BREDR) {
8111                 /* Only incoming connections action is supported for now */
8112                 if (cp->action != 0x01) {
8113                         err = mgmt_cmd_complete(sk, hdev->id,
8114                                                 MGMT_OP_ADD_DEVICE,
8115                                                 MGMT_STATUS_INVALID_PARAMS,
8116                                                 &cp->addr, sizeof(cp->addr));
8117                         goto unlock;
8118                 }
8119
8120                 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
8121                                                      &cp->addr.bdaddr,
8122                                                      cp->addr.type, 0);
8123                 if (err)
8124                         goto unlock;
8125
8126                 hci_update_scan(hdev);
8127
8128                 goto added;
8129         }
8130
8131         addr_type = le_addr_type(cp->addr.type);
8132
8133         if (cp->action == 0x02)
8134                 auto_conn = HCI_AUTO_CONN_ALWAYS;
8135         else if (cp->action == 0x01)
8136                 auto_conn = HCI_AUTO_CONN_DIRECT;
8137         else
8138                 auto_conn = HCI_AUTO_CONN_REPORT;
8139
8140         /* Kernel internally uses conn_params with resolvable private
8141          * address, but Add Device allows only identity addresses.
8142          * Make sure it is enforced before calling
8143          * hci_conn_params_lookup.
8144          */
8145         if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
8146                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8147                                         MGMT_STATUS_INVALID_PARAMS,
8148                                         &cp->addr, sizeof(cp->addr));
8149                 goto unlock;
8150         }
8151
8152         /* If the connection parameters don't exist for this device,
8153          * they will be created and configured with defaults.
8154          */
8155         if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
8156                                 auto_conn) < 0) {
8157                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8158                                         MGMT_STATUS_FAILED, &cp->addr,
8159                                         sizeof(cp->addr));
8160                 goto unlock;
8161         } else {
8162                 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
8163                                                 addr_type);
8164                 if (params)
8165                         current_flags = params->flags;
8166         }
8167
8168         err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
8169         if (err < 0)
8170                 goto unlock;
8171
8172 added:
8173         device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
8174         supported_flags = hdev->conn_flags;
8175         device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
8176                              supported_flags, current_flags);
8177
8178         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8179                                 MGMT_STATUS_SUCCESS, &cp->addr,
8180                                 sizeof(cp->addr));
8181
8182 unlock:
8183         hci_dev_unlock(hdev);
8184         return err;
8185 }
8186
8187 static void device_removed(struct sock *sk, struct hci_dev *hdev,
8188                            bdaddr_t *bdaddr, u8 type)
8189 {
8190         struct mgmt_ev_device_removed ev;
8191
8192         bacpy(&ev.addr.bdaddr, bdaddr);
8193         ev.addr.type = type;
8194
8195         mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
8196 }
8197
8198 static int remove_device_sync(struct hci_dev *hdev, void *data)
8199 {
8200         return hci_update_passive_scan_sync(hdev);
8201 }
8202
8203 static int remove_device(struct sock *sk, struct hci_dev *hdev,
8204                          void *data, u16 len)
8205 {
8206         struct mgmt_cp_remove_device *cp = data;
8207         int err;
8208
8209         bt_dev_dbg(hdev, "sock %p", sk);
8210
8211         hci_dev_lock(hdev);
8212
8213         if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
8214                 struct hci_conn_params *params;
8215                 u8 addr_type;
8216
8217                 if (!bdaddr_type_is_valid(cp->addr.type)) {
8218                         err = mgmt_cmd_complete(sk, hdev->id,
8219                                                 MGMT_OP_REMOVE_DEVICE,
8220                                                 MGMT_STATUS_INVALID_PARAMS,
8221                                                 &cp->addr, sizeof(cp->addr));
8222                         goto unlock;
8223                 }
8224
8225                 if (cp->addr.type == BDADDR_BREDR) {
8226                         err = hci_bdaddr_list_del(&hdev->accept_list,
8227                                                   &cp->addr.bdaddr,
8228                                                   cp->addr.type);
8229                         if (err) {
8230                                 err = mgmt_cmd_complete(sk, hdev->id,
8231                                                         MGMT_OP_REMOVE_DEVICE,
8232                                                         MGMT_STATUS_INVALID_PARAMS,
8233                                                         &cp->addr,
8234                                                         sizeof(cp->addr));
8235                                 goto unlock;
8236                         }
8237
8238                         hci_update_scan(hdev);
8239
8240                         device_removed(sk, hdev, &cp->addr.bdaddr,
8241                                        cp->addr.type);
8242                         goto complete;
8243                 }
8244
8245                 addr_type = le_addr_type(cp->addr.type);
8246
8247                 /* Kernel internally uses conn_params with resolvable private
8248                  * address, but Remove Device allows only identity addresses.
8249                  * Make sure it is enforced before calling
8250                  * hci_conn_params_lookup.
8251                  */
8252                 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
8253                         err = mgmt_cmd_complete(sk, hdev->id,
8254                                                 MGMT_OP_REMOVE_DEVICE,
8255                                                 MGMT_STATUS_INVALID_PARAMS,
8256                                                 &cp->addr, sizeof(cp->addr));
8257                         goto unlock;
8258                 }
8259
8260                 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
8261                                                 addr_type);
8262                 if (!params) {
8263                         err = mgmt_cmd_complete(sk, hdev->id,
8264                                                 MGMT_OP_REMOVE_DEVICE,
8265                                                 MGMT_STATUS_INVALID_PARAMS,
8266                                                 &cp->addr, sizeof(cp->addr));
8267                         goto unlock;
8268                 }
8269
8270                 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
8271                     params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
8272                         err = mgmt_cmd_complete(sk, hdev->id,
8273                                                 MGMT_OP_REMOVE_DEVICE,
8274                                                 MGMT_STATUS_INVALID_PARAMS,
8275                                                 &cp->addr, sizeof(cp->addr));
8276                         goto unlock;
8277                 }
8278
8279                 list_del(&params->action);
8280                 list_del(&params->list);
8281                 kfree(params);
8282
8283                 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
8284         } else {
8285                 struct hci_conn_params *p, *tmp;
8286                 struct bdaddr_list *b, *btmp;
8287
8288                 if (cp->addr.type) {
8289                         err = mgmt_cmd_complete(sk, hdev->id,
8290                                                 MGMT_OP_REMOVE_DEVICE,
8291                                                 MGMT_STATUS_INVALID_PARAMS,
8292                                                 &cp->addr, sizeof(cp->addr));
8293                         goto unlock;
8294                 }
8295
8296                 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
8297                         device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
8298                         list_del(&b->list);
8299                         kfree(b);
8300                 }
8301
8302                 hci_update_scan(hdev);
8303
8304                 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
8305                         if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
8306                                 continue;
8307                         device_removed(sk, hdev, &p->addr, p->addr_type);
8308                         if (p->explicit_connect) {
8309                                 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
8310                                 continue;
8311                         }
8312                         list_del(&p->action);
8313                         list_del(&p->list);
8314                         kfree(p);
8315                 }
8316
8317                 bt_dev_dbg(hdev, "All LE connection parameters were removed");
8318         }
8319
8320         hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
8321
8322 complete:
8323         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
8324                                 MGMT_STATUS_SUCCESS, &cp->addr,
8325                                 sizeof(cp->addr));
8326 unlock:
8327         hci_dev_unlock(hdev);
8328         return err;
8329 }
8330
8331 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
8332                            u16 len)
8333 {
8334         struct mgmt_cp_load_conn_param *cp = data;
8335         const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
8336                                      sizeof(struct mgmt_conn_param));
8337         u16 param_count, expected_len;
8338         int i;
8339
8340         if (!lmp_le_capable(hdev))
8341                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8342                                        MGMT_STATUS_NOT_SUPPORTED);
8343
8344         param_count = __le16_to_cpu(cp->param_count);
8345         if (param_count > max_param_count) {
8346                 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
8347                            param_count);
8348                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8349                                        MGMT_STATUS_INVALID_PARAMS);
8350         }
8351
8352         expected_len = struct_size(cp, params, param_count);
8353         if (expected_len != len) {
8354                 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
8355                            expected_len, len);
8356                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8357                                        MGMT_STATUS_INVALID_PARAMS);
8358         }
8359
8360         bt_dev_dbg(hdev, "param_count %u", param_count);
8361
8362         hci_dev_lock(hdev);
8363
8364         hci_conn_params_clear_disabled(hdev);
8365
8366         for (i = 0; i < param_count; i++) {
8367                 struct mgmt_conn_param *param = &cp->params[i];
8368                 struct hci_conn_params *hci_param;
8369                 u16 min, max, latency, timeout;
8370                 u8 addr_type;
8371
8372                 bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
8373                            param->addr.type);
8374
8375                 if (param->addr.type == BDADDR_LE_PUBLIC) {
8376                         addr_type = ADDR_LE_DEV_PUBLIC;
8377                 } else if (param->addr.type == BDADDR_LE_RANDOM) {
8378                         addr_type = ADDR_LE_DEV_RANDOM;
8379                 } else {
8380                         bt_dev_err(hdev, "ignoring invalid connection parameters");
8381                         continue;
8382                 }
8383
8384                 min = le16_to_cpu(param->min_interval);
8385                 max = le16_to_cpu(param->max_interval);
8386                 latency = le16_to_cpu(param->latency);
8387                 timeout = le16_to_cpu(param->timeout);
8388
8389                 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
8390                            min, max, latency, timeout);
8391
8392                 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
8393                         bt_dev_err(hdev, "ignoring invalid connection parameters");
8394                         continue;
8395                 }
8396
8397                 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
8398                                                 addr_type);
8399                 if (!hci_param) {
8400                         bt_dev_err(hdev, "failed to add connection parameters");
8401                         continue;
8402                 }
8403
8404                 hci_param->conn_min_interval = min;
8405                 hci_param->conn_max_interval = max;
8406                 hci_param->conn_latency = latency;
8407                 hci_param->supervision_timeout = timeout;
8408         }
8409
8410         hci_dev_unlock(hdev);
8411
8412         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
8413                                  NULL, 0);
8414 }
8415
8416 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
8417                                void *data, u16 len)
8418 {
8419         struct mgmt_cp_set_external_config *cp = data;
8420         bool changed;
8421         int err;
8422
8423         bt_dev_dbg(hdev, "sock %p", sk);
8424
8425         if (hdev_is_powered(hdev))
8426                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8427                                        MGMT_STATUS_REJECTED);
8428
8429         if (cp->config != 0x00 && cp->config != 0x01)
8430                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8431                                          MGMT_STATUS_INVALID_PARAMS);
8432
8433         if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
8434                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8435                                        MGMT_STATUS_NOT_SUPPORTED);
8436
8437         hci_dev_lock(hdev);
8438
8439         if (cp->config)
8440                 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
8441         else
8442                 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
8443
8444         err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
8445         if (err < 0)
8446                 goto unlock;
8447
8448         if (!changed)
8449                 goto unlock;
8450
8451         err = new_options(hdev, sk);
8452
8453         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8454                 mgmt_index_removed(hdev);
8455
8456                 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8457                         hci_dev_set_flag(hdev, HCI_CONFIG);
8458                         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8459
8460                         queue_work(hdev->req_workqueue, &hdev->power_on);
8461                 } else {
8462                         set_bit(HCI_RAW, &hdev->flags);
8463                         mgmt_index_added(hdev);
8464                 }
8465         }
8466
8467 unlock:
8468         hci_dev_unlock(hdev);
8469         return err;
8470 }
8471
8472 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8473                               void *data, u16 len)
8474 {
8475         struct mgmt_cp_set_public_address *cp = data;
8476         bool changed;
8477         int err;
8478
8479         bt_dev_dbg(hdev, "sock %p", sk);
8480
8481         if (hdev_is_powered(hdev))
8482                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8483                                        MGMT_STATUS_REJECTED);
8484
8485         if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8486                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8487                                        MGMT_STATUS_INVALID_PARAMS);
8488
8489         if (!hdev->set_bdaddr)
8490                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8491                                        MGMT_STATUS_NOT_SUPPORTED);
8492
8493         hci_dev_lock(hdev);
8494
8495         changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8496         bacpy(&hdev->public_addr, &cp->bdaddr);
8497
8498         err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8499         if (err < 0)
8500                 goto unlock;
8501
8502         if (!changed)
8503                 goto unlock;
8504
8505         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8506                 err = new_options(hdev, sk);
8507
8508         if (is_configured(hdev)) {
8509                 mgmt_index_removed(hdev);
8510
8511                 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8512
8513                 hci_dev_set_flag(hdev, HCI_CONFIG);
8514                 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8515
8516                 queue_work(hdev->req_workqueue, &hdev->power_on);
8517         }
8518
8519 unlock:
8520         hci_dev_unlock(hdev);
8521         return err;
8522 }
8523
8524 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8525                                              int err)
8526 {
8527         const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8528         struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8529         u8 *h192, *r192, *h256, *r256;
8530         struct mgmt_pending_cmd *cmd = data;
8531         struct sk_buff *skb = cmd->skb;
8532         u8 status = mgmt_status(err);
8533         u16 eir_len;
8534
8535         if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8536                 return;
8537
8538         if (!status) {
8539                 if (!skb)
8540                         status = MGMT_STATUS_FAILED;
8541                 else if (IS_ERR(skb))
8542                         status = mgmt_status(PTR_ERR(skb));
8543                 else
8544                         status = mgmt_status(skb->data[0]);
8545         }
8546
8547         bt_dev_dbg(hdev, "status %u", status);
8548
8549         mgmt_cp = cmd->param;
8550
8551         if (status) {
8552                 status = mgmt_status(status);
8553                 eir_len = 0;
8554
8555                 h192 = NULL;
8556                 r192 = NULL;
8557                 h256 = NULL;
8558                 r256 = NULL;
8559         } else if (!bredr_sc_enabled(hdev)) {
8560                 struct hci_rp_read_local_oob_data *rp;
8561
8562                 if (skb->len != sizeof(*rp)) {
8563                         status = MGMT_STATUS_FAILED;
8564                         eir_len = 0;
8565                 } else {
8566                         status = MGMT_STATUS_SUCCESS;
8567                         rp = (void *)skb->data;
8568
8569                         eir_len = 5 + 18 + 18;
8570                         h192 = rp->hash;
8571                         r192 = rp->rand;
8572                         h256 = NULL;
8573                         r256 = NULL;
8574                 }
8575         } else {
8576                 struct hci_rp_read_local_oob_ext_data *rp;
8577
8578                 if (skb->len != sizeof(*rp)) {
8579                         status = MGMT_STATUS_FAILED;
8580                         eir_len = 0;
8581                 } else {
8582                         status = MGMT_STATUS_SUCCESS;
8583                         rp = (void *)skb->data;
8584
8585                         if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8586                                 eir_len = 5 + 18 + 18;
8587                                 h192 = NULL;
8588                                 r192 = NULL;
8589                         } else {
8590                                 eir_len = 5 + 18 + 18 + 18 + 18;
8591                                 h192 = rp->hash192;
8592                                 r192 = rp->rand192;
8593                         }
8594
8595                         h256 = rp->hash256;
8596                         r256 = rp->rand256;
8597                 }
8598         }
8599
8600         mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8601         if (!mgmt_rp)
8602                 goto done;
8603
8604         if (eir_len == 0)
8605                 goto send_rsp;
8606
8607         eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8608                                   hdev->dev_class, 3);
8609
8610         if (h192 && r192) {
8611                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8612                                           EIR_SSP_HASH_C192, h192, 16);
8613                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8614                                           EIR_SSP_RAND_R192, r192, 16);
8615         }
8616
8617         if (h256 && r256) {
8618                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8619                                           EIR_SSP_HASH_C256, h256, 16);
8620                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8621                                           EIR_SSP_RAND_R256, r256, 16);
8622         }
8623
8624 send_rsp:
8625         mgmt_rp->type = mgmt_cp->type;
8626         mgmt_rp->eir_len = cpu_to_le16(eir_len);
8627
8628         err = mgmt_cmd_complete(cmd->sk, hdev->id,
8629                                 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8630                                 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8631         if (err < 0 || status)
8632                 goto done;
8633
8634         hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8635
8636         err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8637                                  mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8638                                  HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8639 done:
8640         if (skb && !IS_ERR(skb))
8641                 kfree_skb(skb);
8642
8643         kfree(mgmt_rp);
8644         mgmt_pending_remove(cmd);
8645 }
8646
8647 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8648                                   struct mgmt_cp_read_local_oob_ext_data *cp)
8649 {
8650         struct mgmt_pending_cmd *cmd;
8651         int err;
8652
8653         cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8654                                cp, sizeof(*cp));
8655         if (!cmd)
8656                 return -ENOMEM;
8657
8658         err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8659                                  read_local_oob_ext_data_complete);
8660
8661         if (err < 0) {
8662                 mgmt_pending_remove(cmd);
8663                 return err;
8664         }
8665
8666         return 0;
8667 }
8668
8669 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8670                                    void *data, u16 data_len)
8671 {
8672         struct mgmt_cp_read_local_oob_ext_data *cp = data;
8673         struct mgmt_rp_read_local_oob_ext_data *rp;
8674         size_t rp_len;
8675         u16 eir_len;
8676         u8 status, flags, role, addr[7], hash[16], rand[16];
8677         int err;
8678
8679         bt_dev_dbg(hdev, "sock %p", sk);
8680
8681         if (hdev_is_powered(hdev)) {
8682                 switch (cp->type) {
8683                 case BIT(BDADDR_BREDR):
8684                         status = mgmt_bredr_support(hdev);
8685                         if (status)
8686                                 eir_len = 0;
8687                         else
8688                                 eir_len = 5;
8689                         break;
8690                 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8691                         status = mgmt_le_support(hdev);
8692                         if (status)
8693                                 eir_len = 0;
8694                         else
8695                                 eir_len = 9 + 3 + 18 + 18 + 3;
8696                         break;
8697                 default:
8698                         status = MGMT_STATUS_INVALID_PARAMS;
8699                         eir_len = 0;
8700                         break;
8701                 }
8702         } else {
8703                 status = MGMT_STATUS_NOT_POWERED;
8704                 eir_len = 0;
8705         }
8706
8707         rp_len = sizeof(*rp) + eir_len;
8708         rp = kmalloc(rp_len, GFP_ATOMIC);
8709         if (!rp)
8710                 return -ENOMEM;
8711
8712         if (!status && !lmp_ssp_capable(hdev)) {
8713                 status = MGMT_STATUS_NOT_SUPPORTED;
8714                 eir_len = 0;
8715         }
8716
8717         if (status)
8718                 goto complete;
8719
8720         hci_dev_lock(hdev);
8721
8722         eir_len = 0;
8723         switch (cp->type) {
8724         case BIT(BDADDR_BREDR):
8725                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8726                         err = read_local_ssp_oob_req(hdev, sk, cp);
8727                         hci_dev_unlock(hdev);
8728                         if (!err)
8729                                 goto done;
8730
8731                         status = MGMT_STATUS_FAILED;
8732                         goto complete;
8733                 } else {
8734                         eir_len = eir_append_data(rp->eir, eir_len,
8735                                                   EIR_CLASS_OF_DEV,
8736                                                   hdev->dev_class, 3);
8737                 }
8738                 break;
8739         case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8740                 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8741                     smp_generate_oob(hdev, hash, rand) < 0) {
8742                         hci_dev_unlock(hdev);
8743                         status = MGMT_STATUS_FAILED;
8744                         goto complete;
8745                 }
8746
8747                 /* This should return the active RPA, but since the RPA
8748                  * is only programmed on demand, it is really hard to fill
8749                  * this in at the moment. For now disallow retrieving
8750                  * local out-of-band data when privacy is in use.
8751                  *
8752                  * Returning the identity address will not help here since
8753                  * pairing happens before the identity resolving key is
8754                  * known and thus the connection establishment happens
8755                  * based on the RPA and not the identity address.
8756                  */
8757                 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8758                         hci_dev_unlock(hdev);
8759                         status = MGMT_STATUS_REJECTED;
8760                         goto complete;
8761                 }
8762
8763                 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8764                    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8765                    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8766                     bacmp(&hdev->static_addr, BDADDR_ANY))) {
8767                         memcpy(addr, &hdev->static_addr, 6);
8768                         addr[6] = 0x01;
8769                 } else {
8770                         memcpy(addr, &hdev->bdaddr, 6);
8771                         addr[6] = 0x00;
8772                 }
8773
8774                 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8775                                           addr, sizeof(addr));
8776
8777                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8778                         role = 0x02;
8779                 else
8780                         role = 0x01;
8781
8782                 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8783                                           &role, sizeof(role));
8784
8785                 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8786                         eir_len = eir_append_data(rp->eir, eir_len,
8787                                                   EIR_LE_SC_CONFIRM,
8788                                                   hash, sizeof(hash));
8789
8790                         eir_len = eir_append_data(rp->eir, eir_len,
8791                                                   EIR_LE_SC_RANDOM,
8792                                                   rand, sizeof(rand));
8793                 }
8794
8795                 flags = mgmt_get_adv_discov_flags(hdev);
8796
8797                 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8798                         flags |= LE_AD_NO_BREDR;
8799
8800                 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8801                                           &flags, sizeof(flags));
8802                 break;
8803         }
8804
8805         hci_dev_unlock(hdev);
8806
8807         hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8808
8809         status = MGMT_STATUS_SUCCESS;
8810
8811 complete:
8812         rp->type = cp->type;
8813         rp->eir_len = cpu_to_le16(eir_len);
8814
8815         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8816                                 status, rp, sizeof(*rp) + eir_len);
8817         if (err < 0 || status)
8818                 goto done;
8819
8820         err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8821                                  rp, sizeof(*rp) + eir_len,
8822                                  HCI_MGMT_OOB_DATA_EVENTS, sk);
8823
8824 done:
8825         kfree(rp);
8826
8827         return err;
8828 }
8829
8830 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8831 {
8832         u32 flags = 0;
8833
8834         flags |= MGMT_ADV_FLAG_CONNECTABLE;
8835         flags |= MGMT_ADV_FLAG_DISCOV;
8836         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8837         flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8838         flags |= MGMT_ADV_FLAG_APPEARANCE;
8839         flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8840         flags |= MGMT_ADV_PARAM_DURATION;
8841         flags |= MGMT_ADV_PARAM_TIMEOUT;
8842         flags |= MGMT_ADV_PARAM_INTERVALS;
8843         flags |= MGMT_ADV_PARAM_TX_POWER;
8844         flags |= MGMT_ADV_PARAM_SCAN_RSP;
8845
8846         /* In extended adv TX_POWER returned from Set Adv Param
8847          * will be always valid.
8848          */
8849         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8850                 flags |= MGMT_ADV_FLAG_TX_POWER;
8851
8852         if (ext_adv_capable(hdev)) {
8853                 flags |= MGMT_ADV_FLAG_SEC_1M;
8854                 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8855                 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8856
8857                 if (hdev->le_features[1] & HCI_LE_PHY_2M)
8858                         flags |= MGMT_ADV_FLAG_SEC_2M;
8859
8860                 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
8861                         flags |= MGMT_ADV_FLAG_SEC_CODED;
8862         }
8863
8864         return flags;
8865 }
8866
8867 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8868                              void *data, u16 data_len)
8869 {
8870         struct mgmt_rp_read_adv_features *rp;
8871         size_t rp_len;
8872         int err;
8873         struct adv_info *adv_instance;
8874         u32 supported_flags;
8875         u8 *instance;
8876
8877         bt_dev_dbg(hdev, "sock %p", sk);
8878
8879         if (!lmp_le_capable(hdev))
8880                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8881                                        MGMT_STATUS_REJECTED);
8882
8883         hci_dev_lock(hdev);
8884
8885         rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8886         rp = kmalloc(rp_len, GFP_ATOMIC);
8887         if (!rp) {
8888                 hci_dev_unlock(hdev);
8889                 return -ENOMEM;
8890         }
8891
8892         supported_flags = get_supported_adv_flags(hdev);
8893
8894         rp->supported_flags = cpu_to_le32(supported_flags);
8895         rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
8896         rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
8897         rp->max_instances = hdev->le_num_of_adv_sets;
8898         rp->num_instances = hdev->adv_instance_cnt;
8899
8900         instance = rp->instance;
8901         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8902                 /* Only instances 1-le_num_of_adv_sets are externally visible */
8903                 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8904                         *instance = adv_instance->instance;
8905                         instance++;
8906                 } else {
8907                         rp->num_instances--;
8908                         rp_len--;
8909                 }
8910         }
8911
8912         hci_dev_unlock(hdev);
8913
8914         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8915                                 MGMT_STATUS_SUCCESS, rp, rp_len);
8916
8917         kfree(rp);
8918
8919         return err;
8920 }
8921
8922 static u8 calculate_name_len(struct hci_dev *hdev)
8923 {
8924         u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
8925
8926         return eir_append_local_name(hdev, buf, 0);
8927 }
8928
8929 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8930                            bool is_adv_data)
8931 {
8932         u8 max_len = HCI_MAX_AD_LENGTH;
8933
8934         if (is_adv_data) {
8935                 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8936                                  MGMT_ADV_FLAG_LIMITED_DISCOV |
8937                                  MGMT_ADV_FLAG_MANAGED_FLAGS))
8938                         max_len -= 3;
8939
8940                 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8941                         max_len -= 3;
8942         } else {
8943                 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8944                         max_len -= calculate_name_len(hdev);
8945
8946                 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8947                         max_len -= 4;
8948         }
8949
8950         return max_len;
8951 }
8952
8953 static bool flags_managed(u32 adv_flags)
8954 {
8955         return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8956                             MGMT_ADV_FLAG_LIMITED_DISCOV |
8957                             MGMT_ADV_FLAG_MANAGED_FLAGS);
8958 }
8959
8960 static bool tx_power_managed(u32 adv_flags)
8961 {
8962         return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8963 }
8964
8965 static bool name_managed(u32 adv_flags)
8966 {
8967         return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8968 }
8969
8970 static bool appearance_managed(u32 adv_flags)
8971 {
8972         return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8973 }
8974
8975 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8976                               u8 len, bool is_adv_data)
8977 {
8978         int i, cur_len;
8979         u8 max_len;
8980
8981         max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8982
8983         if (len > max_len)
8984                 return false;
8985
8986         /* Make sure that the data is correctly formatted. */
8987         for (i = 0; i < len; i += (cur_len + 1)) {
8988                 cur_len = data[i];
8989
8990                 if (!cur_len)
8991                         continue;
8992
8993                 if (data[i + 1] == EIR_FLAGS &&
8994                     (!is_adv_data || flags_managed(adv_flags)))
8995                         return false;
8996
8997                 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8998                         return false;
8999
9000                 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
9001                         return false;
9002
9003                 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
9004                         return false;
9005
9006                 if (data[i + 1] == EIR_APPEARANCE &&
9007                     appearance_managed(adv_flags))
9008                         return false;
9009
9010                 /* If the current field length would exceed the total data
9011                  * length, then it's invalid.
9012                  */
9013                 if (i + cur_len >= len)
9014                         return false;
9015         }
9016
9017         return true;
9018 }
9019
9020 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
9021 {
9022         u32 supported_flags, phy_flags;
9023
9024         /* The current implementation only supports a subset of the specified
9025          * flags. Also need to check mutual exclusiveness of sec flags.
9026          */
9027         supported_flags = get_supported_adv_flags(hdev);
9028         phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
9029         if (adv_flags & ~supported_flags ||
9030             ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
9031                 return false;
9032
9033         return true;
9034 }
9035
9036 static bool adv_busy(struct hci_dev *hdev)
9037 {
9038         return pending_find(MGMT_OP_SET_LE, hdev);
9039 }
9040
9041 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
9042                              int err)
9043 {
9044         struct adv_info *adv, *n;
9045
9046         bt_dev_dbg(hdev, "err %d", err);
9047
9048         hci_dev_lock(hdev);
9049
9050         list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
9051                 u8 instance;
9052
9053                 if (!adv->pending)
9054                         continue;
9055
9056                 if (!err) {
9057                         adv->pending = false;
9058                         continue;
9059                 }
9060
9061                 instance = adv->instance;
9062
9063                 if (hdev->cur_adv_instance == instance)
9064                         cancel_adv_timeout(hdev);
9065
9066                 hci_remove_adv_instance(hdev, instance);
9067                 mgmt_advertising_removed(sk, hdev, instance);
9068         }
9069
9070         hci_dev_unlock(hdev);
9071 }
9072
9073 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
9074 {
9075         struct mgmt_pending_cmd *cmd = data;
9076         struct mgmt_cp_add_advertising *cp = cmd->param;
9077         struct mgmt_rp_add_advertising rp;
9078
9079         memset(&rp, 0, sizeof(rp));
9080
9081         rp.instance = cp->instance;
9082
9083         if (err)
9084                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9085                                 mgmt_status(err));
9086         else
9087                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9088                                   mgmt_status(err), &rp, sizeof(rp));
9089
9090         add_adv_complete(hdev, cmd->sk, cp->instance, err);
9091
9092         mgmt_pending_free(cmd);
9093 }
9094
9095 static int add_advertising_sync(struct hci_dev *hdev, void *data)
9096 {
9097         struct mgmt_pending_cmd *cmd = data;
9098         struct mgmt_cp_add_advertising *cp = cmd->param;
9099
9100         return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
9101 }
9102
9103 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
9104                            void *data, u16 data_len)
9105 {
9106         struct mgmt_cp_add_advertising *cp = data;
9107         struct mgmt_rp_add_advertising rp;
9108         u32 flags;
9109         u8 status;
9110         u16 timeout, duration;
9111         unsigned int prev_instance_cnt;
9112         u8 schedule_instance = 0;
9113         struct adv_info *adv, *next_instance;
9114         int err;
9115         struct mgmt_pending_cmd *cmd;
9116
9117         bt_dev_dbg(hdev, "sock %p", sk);
9118
9119         status = mgmt_le_support(hdev);
9120         if (status)
9121                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9122                                        status);
9123
9124         if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9125                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9126                                        MGMT_STATUS_INVALID_PARAMS);
9127
9128         if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
9129                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9130                                        MGMT_STATUS_INVALID_PARAMS);
9131
9132         flags = __le32_to_cpu(cp->flags);
9133         timeout = __le16_to_cpu(cp->timeout);
9134         duration = __le16_to_cpu(cp->duration);
9135
9136         if (!requested_adv_flags_are_valid(hdev, flags))
9137                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9138                                        MGMT_STATUS_INVALID_PARAMS);
9139
9140         hci_dev_lock(hdev);
9141
9142         if (timeout && !hdev_is_powered(hdev)) {
9143                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9144                                       MGMT_STATUS_REJECTED);
9145                 goto unlock;
9146         }
9147
9148         if (adv_busy(hdev)) {
9149                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9150                                       MGMT_STATUS_BUSY);
9151                 goto unlock;
9152         }
9153
9154         if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
9155             !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
9156                                cp->scan_rsp_len, false)) {
9157                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9158                                       MGMT_STATUS_INVALID_PARAMS);
9159                 goto unlock;
9160         }
9161
9162         prev_instance_cnt = hdev->adv_instance_cnt;
9163
9164         adv = hci_add_adv_instance(hdev, cp->instance, flags,
9165                                    cp->adv_data_len, cp->data,
9166                                    cp->scan_rsp_len,
9167                                    cp->data + cp->adv_data_len,
9168                                    timeout, duration,
9169                                    HCI_ADV_TX_POWER_NO_PREFERENCE,
9170                                    hdev->le_adv_min_interval,
9171                                    hdev->le_adv_max_interval, 0);
9172         if (IS_ERR(adv)) {
9173                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9174                                       MGMT_STATUS_FAILED);
9175                 goto unlock;
9176         }
9177
9178         /* Only trigger an advertising added event if a new instance was
9179          * actually added.
9180          */
9181         if (hdev->adv_instance_cnt > prev_instance_cnt)
9182                 mgmt_advertising_added(sk, hdev, cp->instance);
9183
9184         if (hdev->cur_adv_instance == cp->instance) {
9185                 /* If the currently advertised instance is being changed then
9186                  * cancel the current advertising and schedule the next
9187                  * instance. If there is only one instance then the overridden
9188                  * advertising data will be visible right away.
9189                  */
9190                 cancel_adv_timeout(hdev);
9191
9192                 next_instance = hci_get_next_instance(hdev, cp->instance);
9193                 if (next_instance)
9194                         schedule_instance = next_instance->instance;
9195         } else if (!hdev->adv_instance_timeout) {
9196                 /* Immediately advertise the new instance if no other
9197                  * instance is currently being advertised.
9198                  */
9199                 schedule_instance = cp->instance;
9200         }
9201
9202         /* If the HCI_ADVERTISING flag is set or the device isn't powered or
9203          * there is no instance to be advertised then we have no HCI
9204          * communication to make. Simply return.
9205          */
9206         if (!hdev_is_powered(hdev) ||
9207             hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
9208             !schedule_instance) {
9209                 rp.instance = cp->instance;
9210                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9211                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9212                 goto unlock;
9213         }
9214
9215         /* We're good to go, update advertising data, parameters, and start
9216          * advertising.
9217          */
9218         cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
9219                                data_len);
9220         if (!cmd) {
9221                 err = -ENOMEM;
9222                 goto unlock;
9223         }
9224
9225         cp->instance = schedule_instance;
9226
9227         err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
9228                                  add_advertising_complete);
9229         if (err < 0)
9230                 mgmt_pending_free(cmd);
9231
9232 unlock:
9233         hci_dev_unlock(hdev);
9234
9235         return err;
9236 }
9237
9238 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
9239                                         int err)
9240 {
9241         struct mgmt_pending_cmd *cmd = data;
9242         struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
9243         struct mgmt_rp_add_ext_adv_params rp;
9244         struct adv_info *adv;
9245         u32 flags;
9246
9247         BT_DBG("%s", hdev->name);
9248
9249         hci_dev_lock(hdev);
9250
9251         adv = hci_find_adv_instance(hdev, cp->instance);
9252         if (!adv)
9253                 goto unlock;
9254
9255         rp.instance = cp->instance;
9256         rp.tx_power = adv->tx_power;
9257
9258         /* While we're at it, inform userspace of the available space for this
9259          * advertisement, given the flags that will be used.
9260          */
9261         flags = __le32_to_cpu(cp->flags);
9262         rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9263         rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9264
9265         if (err) {
9266                 /* If this advertisement was previously advertising and we
9267                  * failed to update it, we signal that it has been removed and
9268                  * delete its structure
9269                  */
9270                 if (!adv->pending)
9271                         mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
9272
9273                 hci_remove_adv_instance(hdev, cp->instance);
9274
9275                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9276                                 mgmt_status(err));
9277         } else {
9278                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9279                                   mgmt_status(err), &rp, sizeof(rp));
9280         }
9281
9282 unlock:
9283         if (cmd)
9284                 mgmt_pending_free(cmd);
9285
9286         hci_dev_unlock(hdev);
9287 }
9288
9289 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
9290 {
9291         struct mgmt_pending_cmd *cmd = data;
9292         struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
9293
9294         return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
9295 }
9296
9297 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
9298                               void *data, u16 data_len)
9299 {
9300         struct mgmt_cp_add_ext_adv_params *cp = data;
9301         struct mgmt_rp_add_ext_adv_params rp;
9302         struct mgmt_pending_cmd *cmd = NULL;
9303         struct adv_info *adv;
9304         u32 flags, min_interval, max_interval;
9305         u16 timeout, duration;
9306         u8 status;
9307         s8 tx_power;
9308         int err;
9309
9310         BT_DBG("%s", hdev->name);
9311
9312         status = mgmt_le_support(hdev);
9313         if (status)
9314                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9315                                        status);
9316
9317         if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9318                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9319                                        MGMT_STATUS_INVALID_PARAMS);
9320
9321         /* The purpose of breaking add_advertising into two separate MGMT calls
9322          * for params and data is to allow more parameters to be added to this
9323          * structure in the future. For this reason, we verify that we have the
9324          * bare minimum structure we know of when the interface was defined. Any
9325          * extra parameters we don't know about will be ignored in this request.
9326          */
9327         if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
9328                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9329                                        MGMT_STATUS_INVALID_PARAMS);
9330
9331         flags = __le32_to_cpu(cp->flags);
9332
9333         if (!requested_adv_flags_are_valid(hdev, flags))
9334                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9335                                        MGMT_STATUS_INVALID_PARAMS);
9336
9337         hci_dev_lock(hdev);
9338
9339         /* In new interface, we require that we are powered to register */
9340         if (!hdev_is_powered(hdev)) {
9341                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9342                                       MGMT_STATUS_REJECTED);
9343                 goto unlock;
9344         }
9345
9346         if (adv_busy(hdev)) {
9347                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9348                                       MGMT_STATUS_BUSY);
9349                 goto unlock;
9350         }
9351
9352         /* Parse defined parameters from request, use defaults otherwise */
9353         timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
9354                   __le16_to_cpu(cp->timeout) : 0;
9355
9356         duration = (flags & MGMT_ADV_PARAM_DURATION) ?
9357                    __le16_to_cpu(cp->duration) :
9358                    hdev->def_multi_adv_rotation_duration;
9359
9360         min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
9361                        __le32_to_cpu(cp->min_interval) :
9362                        hdev->le_adv_min_interval;
9363
9364         max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
9365                        __le32_to_cpu(cp->max_interval) :
9366                        hdev->le_adv_max_interval;
9367
9368         tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
9369                    cp->tx_power :
9370                    HCI_ADV_TX_POWER_NO_PREFERENCE;
9371
9372         /* Create advertising instance with no advertising or response data */
9373         adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
9374                                    timeout, duration, tx_power, min_interval,
9375                                    max_interval, 0);
9376
9377         if (IS_ERR(adv)) {
9378                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9379                                       MGMT_STATUS_FAILED);
9380                 goto unlock;
9381         }
9382
9383         /* Submit request for advertising params if ext adv available */
9384         if (ext_adv_capable(hdev)) {
9385                 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
9386                                        data, data_len);
9387                 if (!cmd) {
9388                         err = -ENOMEM;
9389                         hci_remove_adv_instance(hdev, cp->instance);
9390                         goto unlock;
9391                 }
9392
9393                 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
9394                                          add_ext_adv_params_complete);
9395                 if (err < 0)
9396                         mgmt_pending_free(cmd);
9397         } else {
9398                 rp.instance = cp->instance;
9399                 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
9400                 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9401                 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9402                 err = mgmt_cmd_complete(sk, hdev->id,
9403                                         MGMT_OP_ADD_EXT_ADV_PARAMS,
9404                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9405         }
9406
9407 unlock:
9408         hci_dev_unlock(hdev);
9409
9410         return err;
9411 }
9412
9413 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
9414 {
9415         struct mgmt_pending_cmd *cmd = data;
9416         struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
9417         struct mgmt_rp_add_advertising rp;
9418
9419         add_adv_complete(hdev, cmd->sk, cp->instance, err);
9420
9421         memset(&rp, 0, sizeof(rp));
9422
9423         rp.instance = cp->instance;
9424
9425         if (err)
9426                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9427                                 mgmt_status(err));
9428         else
9429                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9430                                   mgmt_status(err), &rp, sizeof(rp));
9431
9432         mgmt_pending_free(cmd);
9433 }
9434
9435 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
9436 {
9437         struct mgmt_pending_cmd *cmd = data;
9438         struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
9439         int err;
9440
9441         if (ext_adv_capable(hdev)) {
9442                 err = hci_update_adv_data_sync(hdev, cp->instance);
9443                 if (err)
9444                         return err;
9445
9446                 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
9447                 if (err)
9448                         return err;
9449
9450                 return hci_enable_ext_advertising_sync(hdev, cp->instance);
9451         }
9452
9453         return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
9454 }
9455
9456 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9457                             u16 data_len)
9458 {
9459         struct mgmt_cp_add_ext_adv_data *cp = data;
9460         struct mgmt_rp_add_ext_adv_data rp;
9461         u8 schedule_instance = 0;
9462         struct adv_info *next_instance;
9463         struct adv_info *adv_instance;
9464         int err = 0;
9465         struct mgmt_pending_cmd *cmd;
9466
9467         BT_DBG("%s", hdev->name);
9468
9469         hci_dev_lock(hdev);
9470
9471         adv_instance = hci_find_adv_instance(hdev, cp->instance);
9472
9473         if (!adv_instance) {
9474                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9475                                       MGMT_STATUS_INVALID_PARAMS);
9476                 goto unlock;
9477         }
9478
9479         /* In new interface, we require that we are powered to register */
9480         if (!hdev_is_powered(hdev)) {
9481                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9482                                       MGMT_STATUS_REJECTED);
9483                 goto clear_new_instance;
9484         }
9485
9486         if (adv_busy(hdev)) {
9487                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9488                                       MGMT_STATUS_BUSY);
9489                 goto clear_new_instance;
9490         }
9491
9492         /* Validate new data */
9493         if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9494                                cp->adv_data_len, true) ||
9495             !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9496                                cp->adv_data_len, cp->scan_rsp_len, false)) {
9497                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9498                                       MGMT_STATUS_INVALID_PARAMS);
9499                 goto clear_new_instance;
9500         }
9501
9502         /* Set the data in the advertising instance */
9503         hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9504                                   cp->data, cp->scan_rsp_len,
9505                                   cp->data + cp->adv_data_len);
9506
9507         /* If using software rotation, determine next instance to use */
9508         if (hdev->cur_adv_instance == cp->instance) {
9509                 /* If the currently advertised instance is being changed
9510                  * then cancel the current advertising and schedule the
9511                  * next instance. If there is only one instance then the
9512                  * overridden advertising data will be visible right
9513                  * away
9514                  */
9515                 cancel_adv_timeout(hdev);
9516
9517                 next_instance = hci_get_next_instance(hdev, cp->instance);
9518                 if (next_instance)
9519                         schedule_instance = next_instance->instance;
9520         } else if (!hdev->adv_instance_timeout) {
9521                 /* Immediately advertise the new instance if no other
9522                  * instance is currently being advertised.
9523                  */
9524                 schedule_instance = cp->instance;
9525         }
9526
9527         /* If the HCI_ADVERTISING flag is set or there is no instance to
9528          * be advertised then we have no HCI communication to make.
9529          * Simply return.
9530          */
9531         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9532                 if (adv_instance->pending) {
9533                         mgmt_advertising_added(sk, hdev, cp->instance);
9534                         adv_instance->pending = false;
9535                 }
9536                 rp.instance = cp->instance;
9537                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9538                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9539                 goto unlock;
9540         }
9541
9542         cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9543                                data_len);
9544         if (!cmd) {
9545                 err = -ENOMEM;
9546                 goto clear_new_instance;
9547         }
9548
9549         err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9550                                  add_ext_adv_data_complete);
9551         if (err < 0) {
9552                 mgmt_pending_free(cmd);
9553                 goto clear_new_instance;
9554         }
9555
9556         /* We were successful in updating data, so trigger advertising_added
9557          * event if this is an instance that wasn't previously advertising. If
9558          * a failure occurs in the requests we initiated, we will remove the
9559          * instance again in add_advertising_complete
9560          */
9561         if (adv_instance->pending)
9562                 mgmt_advertising_added(sk, hdev, cp->instance);
9563
9564         goto unlock;
9565
9566 clear_new_instance:
9567         hci_remove_adv_instance(hdev, cp->instance);
9568
9569 unlock:
9570         hci_dev_unlock(hdev);
9571
9572         return err;
9573 }
9574
9575 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9576                                         int err)
9577 {
9578         struct mgmt_pending_cmd *cmd = data;
9579         struct mgmt_cp_remove_advertising *cp = cmd->param;
9580         struct mgmt_rp_remove_advertising rp;
9581
9582         bt_dev_dbg(hdev, "err %d", err);
9583
9584         memset(&rp, 0, sizeof(rp));
9585         rp.instance = cp->instance;
9586
9587         if (err)
9588                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9589                                 mgmt_status(err));
9590         else
9591                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9592                                   MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9593
9594         mgmt_pending_free(cmd);
9595 }
9596
9597 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9598 {
9599         struct mgmt_pending_cmd *cmd = data;
9600         struct mgmt_cp_remove_advertising *cp = cmd->param;
9601         int err;
9602
9603         err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9604         if (err)
9605                 return err;
9606
9607         if (list_empty(&hdev->adv_instances))
9608                 err = hci_disable_advertising_sync(hdev);
9609
9610         return err;
9611 }
9612
9613 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9614                               void *data, u16 data_len)
9615 {
9616         struct mgmt_cp_remove_advertising *cp = data;
9617         struct mgmt_pending_cmd *cmd;
9618         int err;
9619
9620         bt_dev_dbg(hdev, "sock %p", sk);
9621
9622         hci_dev_lock(hdev);
9623
9624         if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9625                 err = mgmt_cmd_status(sk, hdev->id,
9626                                       MGMT_OP_REMOVE_ADVERTISING,
9627                                       MGMT_STATUS_INVALID_PARAMS);
9628                 goto unlock;
9629         }
9630
9631         if (pending_find(MGMT_OP_SET_LE, hdev)) {
9632                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9633                                       MGMT_STATUS_BUSY);
9634                 goto unlock;
9635         }
9636
9637         if (list_empty(&hdev->adv_instances)) {
9638                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9639                                       MGMT_STATUS_INVALID_PARAMS);
9640                 goto unlock;
9641         }
9642
9643         cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9644                                data_len);
9645         if (!cmd) {
9646                 err = -ENOMEM;
9647                 goto unlock;
9648         }
9649
9650         err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9651                                  remove_advertising_complete);
9652         if (err < 0)
9653                 mgmt_pending_free(cmd);
9654
9655 unlock:
9656         hci_dev_unlock(hdev);
9657
9658         return err;
9659 }
9660
9661 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9662                              void *data, u16 data_len)
9663 {
9664         struct mgmt_cp_get_adv_size_info *cp = data;
9665         struct mgmt_rp_get_adv_size_info rp;
9666         u32 flags, supported_flags;
9667
9668         bt_dev_dbg(hdev, "sock %p", sk);
9669
9670         if (!lmp_le_capable(hdev))
9671                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9672                                        MGMT_STATUS_REJECTED);
9673
9674         if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9675                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9676                                        MGMT_STATUS_INVALID_PARAMS);
9677
9678         flags = __le32_to_cpu(cp->flags);
9679
9680         /* The current implementation only supports a subset of the specified
9681          * flags.
9682          */
9683         supported_flags = get_supported_adv_flags(hdev);
9684         if (flags & ~supported_flags)
9685                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9686                                        MGMT_STATUS_INVALID_PARAMS);
9687
9688         rp.instance = cp->instance;
9689         rp.flags = cp->flags;
9690         rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9691         rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9692
9693         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9694                                  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9695 }
9696
9697 static const struct hci_mgmt_handler mgmt_handlers[] = {
9698         { NULL }, /* 0x0000 (no command) */
9699         { read_version,            MGMT_READ_VERSION_SIZE,
9700                                                 HCI_MGMT_NO_HDEV |
9701                                                 HCI_MGMT_UNTRUSTED },
9702         { read_commands,           MGMT_READ_COMMANDS_SIZE,
9703                                                 HCI_MGMT_NO_HDEV |
9704                                                 HCI_MGMT_UNTRUSTED },
9705         { read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
9706                                                 HCI_MGMT_NO_HDEV |
9707                                                 HCI_MGMT_UNTRUSTED },
9708         { read_controller_info,    MGMT_READ_INFO_SIZE,
9709                                                 HCI_MGMT_UNTRUSTED },
9710         { set_powered,             MGMT_SETTING_SIZE },
9711         { set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
9712         { set_connectable,         MGMT_SETTING_SIZE },
9713         { set_fast_connectable,    MGMT_SETTING_SIZE },
9714         { set_bondable,            MGMT_SETTING_SIZE },
9715         { set_link_security,       MGMT_SETTING_SIZE },
9716         { set_ssp,                 MGMT_SETTING_SIZE },
9717         { set_hs,                  MGMT_SETTING_SIZE },
9718         { set_le,                  MGMT_SETTING_SIZE },
9719         { set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
9720         { set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
9721         { add_uuid,                MGMT_ADD_UUID_SIZE },
9722         { remove_uuid,             MGMT_REMOVE_UUID_SIZE },
9723         { load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
9724                                                 HCI_MGMT_VAR_LEN },
9725         { load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9726                                                 HCI_MGMT_VAR_LEN },
9727         { disconnect,              MGMT_DISCONNECT_SIZE },
9728         { get_connections,         MGMT_GET_CONNECTIONS_SIZE },
9729         { pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
9730         { pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
9731         { set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
9732         { pair_device,             MGMT_PAIR_DEVICE_SIZE },
9733         { cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
9734         { unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
9735         { user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
9736         { user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9737         { user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
9738         { user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9739         { read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
9740         { add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9741                                                 HCI_MGMT_VAR_LEN },
9742         { remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9743         { start_discovery,         MGMT_START_DISCOVERY_SIZE },
9744         { stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
9745         { confirm_name,            MGMT_CONFIRM_NAME_SIZE },
9746         { block_device,            MGMT_BLOCK_DEVICE_SIZE },
9747         { unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
9748         { set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
9749         { set_advertising,         MGMT_SETTING_SIZE },
9750         { set_bredr,               MGMT_SETTING_SIZE },
9751         { set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
9752         { set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
9753         { set_secure_conn,         MGMT_SETTING_SIZE },
9754         { set_debug_keys,          MGMT_SETTING_SIZE },
9755         { set_privacy,             MGMT_SET_PRIVACY_SIZE },
9756         { load_irks,               MGMT_LOAD_IRKS_SIZE,
9757                                                 HCI_MGMT_VAR_LEN },
9758         { get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
9759         { get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
9760         { add_device,              MGMT_ADD_DEVICE_SIZE },
9761         { remove_device,           MGMT_REMOVE_DEVICE_SIZE },
9762         { load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
9763                                                 HCI_MGMT_VAR_LEN },
9764         { read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9765                                                 HCI_MGMT_NO_HDEV |
9766                                                 HCI_MGMT_UNTRUSTED },
9767         { read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
9768                                                 HCI_MGMT_UNCONFIGURED |
9769                                                 HCI_MGMT_UNTRUSTED },
9770         { set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
9771                                                 HCI_MGMT_UNCONFIGURED },
9772         { set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
9773                                                 HCI_MGMT_UNCONFIGURED },
9774         { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9775                                                 HCI_MGMT_VAR_LEN },
9776         { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9777         { read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
9778                                                 HCI_MGMT_NO_HDEV |
9779                                                 HCI_MGMT_UNTRUSTED },
9780         { read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
9781         { add_advertising,         MGMT_ADD_ADVERTISING_SIZE,
9782                                                 HCI_MGMT_VAR_LEN },
9783         { remove_advertising,      MGMT_REMOVE_ADVERTISING_SIZE },
9784         { get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
9785         { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9786         { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9787                                                 HCI_MGMT_UNTRUSTED },
9788         { set_appearance,          MGMT_SET_APPEARANCE_SIZE },
9789         { get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
9790         { set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
9791         { set_blocked_keys,        MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9792                                                 HCI_MGMT_VAR_LEN },
9793         { set_wideband_speech,     MGMT_SETTING_SIZE },
9794         { read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
9795                                                 HCI_MGMT_UNTRUSTED },
9796         { read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
9797                                                 HCI_MGMT_UNTRUSTED |
9798                                                 HCI_MGMT_HDEV_OPTIONAL },
9799         { set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
9800                                                 HCI_MGMT_VAR_LEN |
9801                                                 HCI_MGMT_HDEV_OPTIONAL },
9802         { read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9803                                                 HCI_MGMT_UNTRUSTED },
9804         { set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9805                                                 HCI_MGMT_VAR_LEN },
9806         { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9807                                                 HCI_MGMT_UNTRUSTED },
9808         { set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9809                                                 HCI_MGMT_VAR_LEN },
9810         { get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
9811         { set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
9812         { read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9813         { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9814                                                 HCI_MGMT_VAR_LEN },
9815         { remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
9816         { add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9817                                                 HCI_MGMT_VAR_LEN },
9818         { add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
9819                                                 HCI_MGMT_VAR_LEN },
9820         { add_adv_patterns_monitor_rssi,
9821                                    MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9822                                                 HCI_MGMT_VAR_LEN },
9823         { set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
9824                                                 HCI_MGMT_VAR_LEN },
9825         { mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
9826         { mesh_send,               MGMT_MESH_SEND_SIZE,
9827                                                 HCI_MGMT_VAR_LEN },
9828         { mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
9829 };
9830
9831 #ifdef TIZEN_BT
9832 static const struct hci_mgmt_handler tizen_mgmt_handlers[] = {
9833         { NULL }, /* 0x0000 (no command) */
9834         { set_advertising_params,  MGMT_SET_ADVERTISING_PARAMS_SIZE },
9835         { set_advertising_data,    MGMT_SET_ADV_MIN_APP_DATA_SIZE,
9836                                                 HCI_MGMT_VAR_LEN },
9837         { set_scan_rsp_data,       MGMT_SET_SCAN_RSP_MIN_APP_DATA_SIZE,
9838                                                 HCI_MGMT_VAR_LEN },
9839         { add_white_list,          MGMT_ADD_DEV_WHITE_LIST_SIZE },
9840         { remove_from_white_list,  MGMT_REMOVE_DEV_FROM_WHITE_LIST_SIZE },
9841         { clear_white_list,        MGMT_OP_CLEAR_DEV_WHITE_LIST_SIZE },
9842 };
9843 #endif
9844
9845 void mgmt_index_added(struct hci_dev *hdev)
9846 {
9847         struct mgmt_ev_ext_index ev;
9848
9849         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9850                 return;
9851
9852         switch (hdev->dev_type) {
9853         case HCI_PRIMARY:
9854                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9855                         mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9856                                          NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9857                         ev.type = 0x01;
9858                 } else {
9859                         mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9860                                          HCI_MGMT_INDEX_EVENTS);
9861                         ev.type = 0x00;
9862                 }
9863                 break;
9864         case HCI_AMP:
9865                 ev.type = 0x02;
9866                 break;
9867         default:
9868                 return;
9869         }
9870
9871         ev.bus = hdev->bus;
9872
9873         mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9874                          HCI_MGMT_EXT_INDEX_EVENTS);
9875 }
9876
9877 void mgmt_index_removed(struct hci_dev *hdev)
9878 {
9879         struct mgmt_ev_ext_index ev;
9880         u8 status = MGMT_STATUS_INVALID_INDEX;
9881
9882         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9883                 return;
9884
9885         switch (hdev->dev_type) {
9886         case HCI_PRIMARY:
9887                 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9888
9889                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9890                         mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9891                                          NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9892                         ev.type = 0x01;
9893                 } else {
9894                         mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9895                                          HCI_MGMT_INDEX_EVENTS);
9896                         ev.type = 0x00;
9897                 }
9898                 break;
9899         case HCI_AMP:
9900                 ev.type = 0x02;
9901                 break;
9902         default:
9903                 return;
9904         }
9905
9906         ev.bus = hdev->bus;
9907
9908         mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9909                          HCI_MGMT_EXT_INDEX_EVENTS);
9910
9911         /* Cancel any remaining timed work */
9912         if (!hci_dev_test_flag(hdev, HCI_MGMT))
9913                 return;
9914         cancel_delayed_work_sync(&hdev->discov_off);
9915         cancel_delayed_work_sync(&hdev->service_cache);
9916         cancel_delayed_work_sync(&hdev->rpa_expired);
9917 }
9918
9919 void mgmt_power_on(struct hci_dev *hdev, int err)
9920 {
9921         struct cmd_lookup match = { NULL, hdev };
9922
9923         bt_dev_dbg(hdev, "err %d", err);
9924
9925         hci_dev_lock(hdev);
9926
9927         if (!err) {
9928                 restart_le_actions(hdev);
9929                 hci_update_passive_scan(hdev);
9930         }
9931
9932         mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9933
9934         new_settings(hdev, match.sk);
9935
9936         if (match.sk)
9937                 sock_put(match.sk);
9938
9939         hci_dev_unlock(hdev);
9940 }
9941
9942 void __mgmt_power_off(struct hci_dev *hdev)
9943 {
9944         struct cmd_lookup match = { NULL, hdev };
9945         u8 status, zero_cod[] = { 0, 0, 0 };
9946
9947         mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9948
9949         /* If the power off is because of hdev unregistration let
9950          * use the appropriate INVALID_INDEX status. Otherwise use
9951          * NOT_POWERED. We cover both scenarios here since later in
9952          * mgmt_index_removed() any hci_conn callbacks will have already
9953          * been triggered, potentially causing misleading DISCONNECTED
9954          * status responses.
9955          */
9956         if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9957                 status = MGMT_STATUS_INVALID_INDEX;
9958         else
9959                 status = MGMT_STATUS_NOT_POWERED;
9960
9961         mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9962
9963         if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9964                 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9965                                    zero_cod, sizeof(zero_cod),
9966                                    HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9967                 ext_info_changed(hdev, NULL);
9968         }
9969
9970         new_settings(hdev, match.sk);
9971
9972         if (match.sk)
9973                 sock_put(match.sk);
9974 }
9975
9976 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9977 {
9978         struct mgmt_pending_cmd *cmd;
9979         u8 status;
9980
9981         cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9982         if (!cmd)
9983                 return;
9984
9985         if (err == -ERFKILL)
9986                 status = MGMT_STATUS_RFKILLED;
9987         else
9988                 status = MGMT_STATUS_FAILED;
9989
9990         mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9991
9992         mgmt_pending_remove(cmd);
9993 }
9994
9995 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9996                        bool persistent)
9997 {
9998         struct mgmt_ev_new_link_key ev;
9999
10000         memset(&ev, 0, sizeof(ev));
10001
10002         ev.store_hint = persistent;
10003         bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
10004         ev.key.addr.type = BDADDR_BREDR;
10005         ev.key.type = key->type;
10006         memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
10007         ev.key.pin_len = key->pin_len;
10008
10009         mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
10010 }
10011
10012 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
10013 {
10014         switch (ltk->type) {
10015         case SMP_LTK:
10016         case SMP_LTK_RESPONDER:
10017                 if (ltk->authenticated)
10018                         return MGMT_LTK_AUTHENTICATED;
10019                 return MGMT_LTK_UNAUTHENTICATED;
10020         case SMP_LTK_P256:
10021                 if (ltk->authenticated)
10022                         return MGMT_LTK_P256_AUTH;
10023                 return MGMT_LTK_P256_UNAUTH;
10024         case SMP_LTK_P256_DEBUG:
10025                 return MGMT_LTK_P256_DEBUG;
10026         }
10027
10028         return MGMT_LTK_UNAUTHENTICATED;
10029 }
10030
10031 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
10032 {
10033         struct mgmt_ev_new_long_term_key ev;
10034
10035         memset(&ev, 0, sizeof(ev));
10036
10037         /* Devices using resolvable or non-resolvable random addresses
10038          * without providing an identity resolving key don't require
10039          * to store long term keys. Their addresses will change the
10040          * next time around.
10041          *
10042          * Only when a remote device provides an identity address
10043          * make sure the long term key is stored. If the remote
10044          * identity is known, the long term keys are internally
10045          * mapped to the identity address. So allow static random
10046          * and public addresses here.
10047          */
10048         if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
10049             (key->bdaddr.b[5] & 0xc0) != 0xc0)
10050                 ev.store_hint = 0x00;
10051         else
10052                 ev.store_hint = persistent;
10053
10054         bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
10055         ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
10056         ev.key.type = mgmt_ltk_type(key);
10057         ev.key.enc_size = key->enc_size;
10058         ev.key.ediv = key->ediv;
10059         ev.key.rand = key->rand;
10060
10061         if (key->type == SMP_LTK)
10062                 ev.key.initiator = 1;
10063
10064         /* Make sure we copy only the significant bytes based on the
10065          * encryption key size, and set the rest of the value to zeroes.
10066          */
10067         memcpy(ev.key.val, key->val, key->enc_size);
10068         memset(ev.key.val + key->enc_size, 0,
10069                sizeof(ev.key.val) - key->enc_size);
10070
10071         mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
10072 }
10073
10074 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
10075 {
10076         struct mgmt_ev_new_irk ev;
10077
10078         memset(&ev, 0, sizeof(ev));
10079
10080         ev.store_hint = persistent;
10081
10082         bacpy(&ev.rpa, &irk->rpa);
10083         bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
10084         ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
10085         memcpy(ev.irk.val, irk->val, sizeof(irk->val));
10086
10087         mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
10088 }
10089
10090 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
10091                    bool persistent)
10092 {
10093         struct mgmt_ev_new_csrk ev;
10094
10095         memset(&ev, 0, sizeof(ev));
10096
10097         /* Devices using resolvable or non-resolvable random addresses
10098          * without providing an identity resolving key don't require
10099          * to store signature resolving keys. Their addresses will change
10100          * the next time around.
10101          *
10102          * Only when a remote device provides an identity address
10103          * make sure the signature resolving key is stored. So allow
10104          * static random and public addresses here.
10105          */
10106         if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
10107             (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
10108                 ev.store_hint = 0x00;
10109         else
10110                 ev.store_hint = persistent;
10111
10112         bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
10113         ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
10114         ev.key.type = csrk->type;
10115         memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
10116
10117         mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
10118 }
10119
10120 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
10121                          u8 bdaddr_type, u8 store_hint, u16 min_interval,
10122                          u16 max_interval, u16 latency, u16 timeout)
10123 {
10124         struct mgmt_ev_new_conn_param ev;
10125
10126         if (!hci_is_identity_address(bdaddr, bdaddr_type))
10127                 return;
10128
10129         memset(&ev, 0, sizeof(ev));
10130         bacpy(&ev.addr.bdaddr, bdaddr);
10131         ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
10132         ev.store_hint = store_hint;
10133         ev.min_interval = cpu_to_le16(min_interval);
10134         ev.max_interval = cpu_to_le16(max_interval);
10135         ev.latency = cpu_to_le16(latency);
10136         ev.timeout = cpu_to_le16(timeout);
10137
10138         mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
10139 }
10140
10141 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
10142                            u8 *name, u8 name_len)
10143 {
10144         struct sk_buff *skb;
10145         struct mgmt_ev_device_connected *ev;
10146         u16 eir_len = 0;
10147         u32 flags = 0;
10148
10149         /* allocate buff for LE or BR/EDR adv */
10150         if (conn->le_adv_data_len > 0)
10151                 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
10152                                      sizeof(*ev) + conn->le_adv_data_len);
10153         else
10154                 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
10155                                      sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
10156                                      eir_precalc_len(sizeof(conn->dev_class)));
10157
10158         ev = skb_put(skb, sizeof(*ev));
10159         bacpy(&ev->addr.bdaddr, &conn->dst);
10160         ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
10161
10162         if (conn->out)
10163                 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
10164
10165         ev->flags = __cpu_to_le32(flags);
10166
10167         /* We must ensure that the EIR Data fields are ordered and
10168          * unique. Keep it simple for now and avoid the problem by not
10169          * adding any BR/EDR data to the LE adv.
10170          */
10171         if (conn->le_adv_data_len > 0) {
10172                 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
10173                 eir_len = conn->le_adv_data_len;
10174         } else {
10175                 if (name)
10176                         eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10177
10178                 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
10179                         eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
10180                                                     conn->dev_class, sizeof(conn->dev_class));
10181         }
10182
10183         ev->eir_len = cpu_to_le16(eir_len);
10184
10185         mgmt_event_skb(skb, NULL);
10186 }
10187
10188 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
10189 {
10190         struct sock **sk = data;
10191
10192         cmd->cmd_complete(cmd, 0);
10193
10194         *sk = cmd->sk;
10195         sock_hold(*sk);
10196
10197         mgmt_pending_remove(cmd);
10198 }
10199
10200 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
10201 {
10202         struct hci_dev *hdev = data;
10203         struct mgmt_cp_unpair_device *cp = cmd->param;
10204
10205         device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
10206
10207         cmd->cmd_complete(cmd, 0);
10208         mgmt_pending_remove(cmd);
10209 }
10210
10211 bool mgmt_powering_down(struct hci_dev *hdev)
10212 {
10213         struct mgmt_pending_cmd *cmd;
10214         struct mgmt_mode *cp;
10215
10216         cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
10217         if (!cmd)
10218                 return false;
10219
10220         cp = cmd->param;
10221         if (!cp->val)
10222                 return true;
10223
10224         return false;
10225 }
10226
10227 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
10228                               u8 link_type, u8 addr_type, u8 reason,
10229                               bool mgmt_connected)
10230 {
10231         struct mgmt_ev_device_disconnected ev;
10232         struct sock *sk = NULL;
10233
10234         /* The connection is still in hci_conn_hash so test for 1
10235          * instead of 0 to know if this is the last one.
10236          */
10237         if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
10238                 cancel_delayed_work(&hdev->power_off);
10239                 queue_work(hdev->req_workqueue, &hdev->power_off.work);
10240         }
10241
10242         if (!mgmt_connected)
10243                 return;
10244
10245         if (link_type != ACL_LINK && link_type != LE_LINK)
10246                 return;
10247
10248         mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
10249
10250         bacpy(&ev.addr.bdaddr, bdaddr);
10251         ev.addr.type = link_to_bdaddr(link_type, addr_type);
10252         ev.reason = reason;
10253
10254         /* Report disconnects due to suspend */
10255         if (hdev->suspended)
10256                 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
10257
10258         mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
10259
10260         if (sk)
10261                 sock_put(sk);
10262
10263         mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
10264                              hdev);
10265 }
10266
10267 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
10268                             u8 link_type, u8 addr_type, u8 status)
10269 {
10270         u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
10271         struct mgmt_cp_disconnect *cp;
10272         struct mgmt_pending_cmd *cmd;
10273
10274         mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
10275                              hdev);
10276
10277         cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
10278         if (!cmd)
10279                 return;
10280
10281         cp = cmd->param;
10282
10283         if (bacmp(bdaddr, &cp->addr.bdaddr))
10284                 return;
10285
10286         if (cp->addr.type != bdaddr_type)
10287                 return;
10288
10289         cmd->cmd_complete(cmd, mgmt_status(status));
10290         mgmt_pending_remove(cmd);
10291 }
10292
10293 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10294                          u8 addr_type, u8 status)
10295 {
10296         struct mgmt_ev_connect_failed ev;
10297
10298         /* The connection is still in hci_conn_hash so test for 1
10299          * instead of 0 to know if this is the last one.
10300          */
10301         if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
10302                 cancel_delayed_work(&hdev->power_off);
10303                 queue_work(hdev->req_workqueue, &hdev->power_off.work);
10304         }
10305
10306         bacpy(&ev.addr.bdaddr, bdaddr);
10307         ev.addr.type = link_to_bdaddr(link_type, addr_type);
10308         ev.status = mgmt_status(status);
10309
10310         mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
10311 }
10312
10313 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
10314 {
10315         struct mgmt_ev_pin_code_request ev;
10316
10317         bacpy(&ev.addr.bdaddr, bdaddr);
10318         ev.addr.type = BDADDR_BREDR;
10319         ev.secure = secure;
10320
10321         mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
10322 }
10323
10324 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10325                                   u8 status)
10326 {
10327         struct mgmt_pending_cmd *cmd;
10328
10329         cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
10330         if (!cmd)
10331                 return;
10332
10333         cmd->cmd_complete(cmd, mgmt_status(status));
10334         mgmt_pending_remove(cmd);
10335 }
10336
10337 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10338                                       u8 status)
10339 {
10340         struct mgmt_pending_cmd *cmd;
10341
10342         cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
10343         if (!cmd)
10344                 return;
10345
10346         cmd->cmd_complete(cmd, mgmt_status(status));
10347         mgmt_pending_remove(cmd);
10348 }
10349
10350 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
10351                               u8 link_type, u8 addr_type, u32 value,
10352                               u8 confirm_hint)
10353 {
10354         struct mgmt_ev_user_confirm_request ev;
10355
10356         bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10357
10358         bacpy(&ev.addr.bdaddr, bdaddr);
10359         ev.addr.type = link_to_bdaddr(link_type, addr_type);
10360         ev.confirm_hint = confirm_hint;
10361         ev.value = cpu_to_le32(value);
10362
10363         return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
10364                           NULL);
10365 }
10366
10367 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
10368                               u8 link_type, u8 addr_type)
10369 {
10370         struct mgmt_ev_user_passkey_request ev;
10371
10372         bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10373
10374         bacpy(&ev.addr.bdaddr, bdaddr);
10375         ev.addr.type = link_to_bdaddr(link_type, addr_type);
10376
10377         return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
10378                           NULL);
10379 }
10380
10381 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10382                                       u8 link_type, u8 addr_type, u8 status,
10383                                       u8 opcode)
10384 {
10385         struct mgmt_pending_cmd *cmd;
10386
10387         cmd = pending_find(opcode, hdev);
10388         if (!cmd)
10389                 return -ENOENT;
10390
10391         cmd->cmd_complete(cmd, mgmt_status(status));
10392         mgmt_pending_remove(cmd);
10393
10394         return 0;
10395 }
10396
10397 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10398                                      u8 link_type, u8 addr_type, u8 status)
10399 {
10400         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10401                                           status, MGMT_OP_USER_CONFIRM_REPLY);
10402 }
10403
10404 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10405                                          u8 link_type, u8 addr_type, u8 status)
10406 {
10407         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10408                                           status,
10409                                           MGMT_OP_USER_CONFIRM_NEG_REPLY);
10410 }
10411
10412 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10413                                      u8 link_type, u8 addr_type, u8 status)
10414 {
10415         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10416                                           status, MGMT_OP_USER_PASSKEY_REPLY);
10417 }
10418
10419 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10420                                          u8 link_type, u8 addr_type, u8 status)
10421 {
10422         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10423                                           status,
10424                                           MGMT_OP_USER_PASSKEY_NEG_REPLY);
10425 }
10426
10427 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
10428                              u8 link_type, u8 addr_type, u32 passkey,
10429                              u8 entered)
10430 {
10431         struct mgmt_ev_passkey_notify ev;
10432
10433         bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10434
10435         bacpy(&ev.addr.bdaddr, bdaddr);
10436         ev.addr.type = link_to_bdaddr(link_type, addr_type);
10437         ev.passkey = __cpu_to_le32(passkey);
10438         ev.entered = entered;
10439
10440         return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
10441 }
10442
10443 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
10444 {
10445         struct mgmt_ev_auth_failed ev;
10446         struct mgmt_pending_cmd *cmd;
10447         u8 status = mgmt_status(hci_status);
10448
10449         bacpy(&ev.addr.bdaddr, &conn->dst);
10450         ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
10451         ev.status = status;
10452
10453         cmd = find_pairing(conn);
10454
10455         mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
10456                     cmd ? cmd->sk : NULL);
10457
10458         if (cmd) {
10459                 cmd->cmd_complete(cmd, status);
10460                 mgmt_pending_remove(cmd);
10461         }
10462 }
10463
10464 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
10465 {
10466         struct cmd_lookup match = { NULL, hdev };
10467         bool changed;
10468
10469         if (status) {
10470                 u8 mgmt_err = mgmt_status(status);
10471                 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
10472                                      cmd_status_rsp, &mgmt_err);
10473                 return;
10474         }
10475
10476         if (test_bit(HCI_AUTH, &hdev->flags))
10477                 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10478         else
10479                 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10480
10481         mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
10482                              &match);
10483
10484         if (changed)
10485                 new_settings(hdev, match.sk);
10486
10487         if (match.sk)
10488                 sock_put(match.sk);
10489 }
10490
10491 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10492 {
10493         struct cmd_lookup *match = data;
10494
10495         if (match->sk == NULL) {
10496                 match->sk = cmd->sk;
10497                 sock_hold(match->sk);
10498         }
10499 }
10500
10501 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10502                                     u8 status)
10503 {
10504         struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10505
10506         mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
10507         mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
10508         mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
10509
10510         if (!status) {
10511                 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10512                                    3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10513                 ext_info_changed(hdev, NULL);
10514         }
10515
10516         if (match.sk)
10517                 sock_put(match.sk);
10518 }
10519
10520 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10521 {
10522         struct mgmt_cp_set_local_name ev;
10523         struct mgmt_pending_cmd *cmd;
10524
10525         if (status)
10526                 return;
10527
10528         memset(&ev, 0, sizeof(ev));
10529         memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10530         memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10531
10532         cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10533         if (!cmd) {
10534                 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10535
10536                 /* If this is a HCI command related to powering on the
10537                  * HCI dev don't send any mgmt signals.
10538                  */
10539                 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10540                         return;
10541         }
10542
10543         mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10544                            HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10545         ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10546 }
10547
10548 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10549 {
10550         int i;
10551
10552         for (i = 0; i < uuid_count; i++) {
10553                 if (!memcmp(uuid, uuids[i], 16))
10554                         return true;
10555         }
10556
10557         return false;
10558 }
10559
10560 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10561 {
10562         u16 parsed = 0;
10563
10564         while (parsed < eir_len) {
10565                 u8 field_len = eir[0];
10566                 u8 uuid[16];
10567                 int i;
10568
10569                 if (field_len == 0)
10570                         break;
10571
10572                 if (eir_len - parsed < field_len + 1)
10573                         break;
10574
10575                 switch (eir[1]) {
10576                 case EIR_UUID16_ALL:
10577                 case EIR_UUID16_SOME:
10578                         for (i = 0; i + 3 <= field_len; i += 2) {
10579                                 memcpy(uuid, bluetooth_base_uuid, 16);
10580                                 uuid[13] = eir[i + 3];
10581                                 uuid[12] = eir[i + 2];
10582                                 if (has_uuid(uuid, uuid_count, uuids))
10583                                         return true;
10584                         }
10585                         break;
10586                 case EIR_UUID32_ALL:
10587                 case EIR_UUID32_SOME:
10588                         for (i = 0; i + 5 <= field_len; i += 4) {
10589                                 memcpy(uuid, bluetooth_base_uuid, 16);
10590                                 uuid[15] = eir[i + 5];
10591                                 uuid[14] = eir[i + 4];
10592                                 uuid[13] = eir[i + 3];
10593                                 uuid[12] = eir[i + 2];
10594                                 if (has_uuid(uuid, uuid_count, uuids))
10595                                         return true;
10596                         }
10597                         break;
10598                 case EIR_UUID128_ALL:
10599                 case EIR_UUID128_SOME:
10600                         for (i = 0; i + 17 <= field_len; i += 16) {
10601                                 memcpy(uuid, eir + i + 2, 16);
10602                                 if (has_uuid(uuid, uuid_count, uuids))
10603                                         return true;
10604                         }
10605                         break;
10606                 }
10607
10608                 parsed += field_len + 1;
10609                 eir += field_len + 1;
10610         }
10611
10612         return false;
10613 }
10614
10615 static void restart_le_scan(struct hci_dev *hdev)
10616 {
10617         /* If controller is not scanning we are done. */
10618         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10619                 return;
10620
10621         if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10622                        hdev->discovery.scan_start +
10623                        hdev->discovery.scan_duration))
10624                 return;
10625
10626         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10627                            DISCOV_LE_RESTART_DELAY);
10628 }
10629
10630 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10631                             u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10632 {
10633         /* If a RSSI threshold has been specified, and
10634          * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10635          * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10636          * is set, let it through for further processing, as we might need to
10637          * restart the scan.
10638          *
10639          * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10640          * the results are also dropped.
10641          */
10642         if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10643             (rssi == HCI_RSSI_INVALID ||
10644             (rssi < hdev->discovery.rssi &&
10645              !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10646                 return  false;
10647
10648         if (hdev->discovery.uuid_count != 0) {
10649                 /* If a list of UUIDs is provided in filter, results with no
10650                  * matching UUID should be dropped.
10651                  */
10652                 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10653                                    hdev->discovery.uuids) &&
10654                     !eir_has_uuids(scan_rsp, scan_rsp_len,
10655                                    hdev->discovery.uuid_count,
10656                                    hdev->discovery.uuids))
10657                         return false;
10658         }
10659
10660         /* If duplicate filtering does not report RSSI changes, then restart
10661          * scanning to ensure updated result with updated RSSI values.
10662          */
10663         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10664                 restart_le_scan(hdev);
10665
10666                 /* Validate RSSI value against the RSSI threshold once more. */
10667                 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10668                     rssi < hdev->discovery.rssi)
10669                         return false;
10670         }
10671
10672         return true;
10673 }
10674
10675 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10676                                   bdaddr_t *bdaddr, u8 addr_type)
10677 {
10678         struct mgmt_ev_adv_monitor_device_lost ev;
10679
10680         ev.monitor_handle = cpu_to_le16(handle);
10681         bacpy(&ev.addr.bdaddr, bdaddr);
10682         ev.addr.type = addr_type;
10683
10684         mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10685                    NULL);
10686 }
10687
10688 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10689                                                struct sk_buff *skb,
10690                                                struct sock *skip_sk,
10691                                                u16 handle)
10692 {
10693         struct sk_buff *advmon_skb;
10694         size_t advmon_skb_len;
10695         __le16 *monitor_handle;
10696
10697         if (!skb)
10698                 return;
10699
10700         advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10701                           sizeof(struct mgmt_ev_device_found)) + skb->len;
10702         advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10703                                     advmon_skb_len);
10704         if (!advmon_skb)
10705                 return;
10706
10707         /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10708          * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10709          * store monitor_handle of the matched monitor.
10710          */
10711         monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10712         *monitor_handle = cpu_to_le16(handle);
10713         skb_put_data(advmon_skb, skb->data, skb->len);
10714
10715         mgmt_event_skb(advmon_skb, skip_sk);
10716 }
10717
10718 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10719                                           bdaddr_t *bdaddr, bool report_device,
10720                                           struct sk_buff *skb,
10721                                           struct sock *skip_sk)
10722 {
10723         struct monitored_device *dev, *tmp;
10724         bool matched = false;
10725         bool notified = false;
10726
10727         /* We have received the Advertisement Report because:
10728          * 1. the kernel has initiated active discovery
10729          * 2. if not, we have pend_le_reports > 0 in which case we are doing
10730          *    passive scanning
10731          * 3. if none of the above is true, we have one or more active
10732          *    Advertisement Monitor
10733          *
10734          * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10735          * and report ONLY one advertisement per device for the matched Monitor
10736          * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10737          *
10738          * For case 3, since we are not active scanning and all advertisements
10739          * received are due to a matched Advertisement Monitor, report all
10740          * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10741          */
10742         if (report_device && !hdev->advmon_pend_notify) {
10743                 mgmt_event_skb(skb, skip_sk);
10744                 return;
10745         }
10746
10747         hdev->advmon_pend_notify = false;
10748
10749         list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10750                 if (!bacmp(&dev->bdaddr, bdaddr)) {
10751                         matched = true;
10752
10753                         if (!dev->notified) {
10754                                 mgmt_send_adv_monitor_device_found(hdev, skb,
10755                                                                    skip_sk,
10756                                                                    dev->handle);
10757                                 notified = true;
10758                                 dev->notified = true;
10759                         }
10760                 }
10761
10762                 if (!dev->notified)
10763                         hdev->advmon_pend_notify = true;
10764         }
10765
10766         if (!report_device &&
10767             ((matched && !notified) || !msft_monitor_supported(hdev))) {
10768                 /* Handle 0 indicates that we are not active scanning and this
10769                  * is a subsequent advertisement report for an already matched
10770                  * Advertisement Monitor or the controller offloading support
10771                  * is not available.
10772                  */
10773                 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10774         }
10775
10776         if (report_device)
10777                 mgmt_event_skb(skb, skip_sk);
10778         else
10779                 kfree_skb(skb);
10780 }
10781
10782 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10783                               u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10784                               u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10785                               u64 instant)
10786 {
10787         struct sk_buff *skb;
10788         struct mgmt_ev_mesh_device_found *ev;
10789         int i, j;
10790
10791         if (!hdev->mesh_ad_types[0])
10792                 goto accepted;
10793
10794         /* Scan for requested AD types */
10795         if (eir_len > 0) {
10796                 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10797                         for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10798                                 if (!hdev->mesh_ad_types[j])
10799                                         break;
10800
10801                                 if (hdev->mesh_ad_types[j] == eir[i + 1])
10802                                         goto accepted;
10803                         }
10804                 }
10805         }
10806
10807         if (scan_rsp_len > 0) {
10808                 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10809                         for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10810                                 if (!hdev->mesh_ad_types[j])
10811                                         break;
10812
10813                                 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10814                                         goto accepted;
10815                         }
10816                 }
10817         }
10818
10819         return;
10820
10821 accepted:
10822         skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10823                              sizeof(*ev) + eir_len + scan_rsp_len);
10824         if (!skb)
10825                 return;
10826
10827         ev = skb_put(skb, sizeof(*ev));
10828
10829         bacpy(&ev->addr.bdaddr, bdaddr);
10830         ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10831         ev->rssi = rssi;
10832         ev->flags = cpu_to_le32(flags);
10833         ev->instant = cpu_to_le64(instant);
10834
10835         if (eir_len > 0)
10836                 /* Copy EIR or advertising data into event */
10837                 skb_put_data(skb, eir, eir_len);
10838
10839         if (scan_rsp_len > 0)
10840                 /* Append scan response data to event */
10841                 skb_put_data(skb, scan_rsp, scan_rsp_len);
10842
10843         ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10844
10845         mgmt_event_skb(skb, NULL);
10846 }
10847
10848 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10849                        u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10850                        u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10851                        u64 instant)
10852 {
10853         struct sk_buff *skb;
10854         struct mgmt_ev_device_found *ev;
10855         bool report_device = hci_discovery_active(hdev);
10856
10857         if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10858                 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10859                                   eir, eir_len, scan_rsp, scan_rsp_len,
10860                                   instant);
10861
10862         /* Don't send events for a non-kernel initiated discovery. With
10863          * LE one exception is if we have pend_le_reports > 0 in which
10864          * case we're doing passive scanning and want these events.
10865          */
10866         if (!hci_discovery_active(hdev)) {
10867                 if (link_type == ACL_LINK)
10868                         return;
10869                 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10870                         report_device = true;
10871                 else if (!hci_is_adv_monitoring(hdev))
10872                         return;
10873         }
10874
10875         if (hdev->discovery.result_filtering) {
10876                 /* We are using service discovery */
10877                 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10878                                      scan_rsp_len))
10879                         return;
10880         }
10881
10882         if (hdev->discovery.limited) {
10883                 /* Check for limited discoverable bit */
10884                 if (dev_class) {
10885                         if (!(dev_class[1] & 0x20))
10886                                 return;
10887                 } else {
10888                         u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10889                         if (!flags || !(flags[0] & LE_AD_LIMITED))
10890                                 return;
10891                 }
10892         }
10893
10894         /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10895         skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10896                              sizeof(*ev) + eir_len + scan_rsp_len + 5);
10897         if (!skb)
10898                 return;
10899
10900         ev = skb_put(skb, sizeof(*ev));
10901
10902         /* In case of device discovery with BR/EDR devices (pre 1.2), the
10903          * RSSI value was reported as 0 when not available. This behavior
10904          * is kept when using device discovery. This is required for full
10905          * backwards compatibility with the API.
10906          *
10907          * However when using service discovery, the value 127 will be
10908          * returned when the RSSI is not available.
10909          */
10910         if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10911             link_type == ACL_LINK)
10912                 rssi = 0;
10913
10914         bacpy(&ev->addr.bdaddr, bdaddr);
10915         ev->addr.type = link_to_bdaddr(link_type, addr_type);
10916         ev->rssi = rssi;
10917         ev->flags = cpu_to_le32(flags);
10918
10919         if (eir_len > 0)
10920                 /* Copy EIR or advertising data into event */
10921                 skb_put_data(skb, eir, eir_len);
10922
10923         if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10924                 u8 eir_cod[5];
10925
10926                 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10927                                            dev_class, 3);
10928                 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10929         }
10930
10931         if (scan_rsp_len > 0)
10932                 /* Append scan response data to event */
10933                 skb_put_data(skb, scan_rsp, scan_rsp_len);
10934
10935         ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10936
10937         mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10938 }
10939
10940 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10941                       u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10942 {
10943         struct sk_buff *skb;
10944         struct mgmt_ev_device_found *ev;
10945         u16 eir_len = 0;
10946         u32 flags = 0;
10947
10948         skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10949                              sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10950
10951         ev = skb_put(skb, sizeof(*ev));
10952         bacpy(&ev->addr.bdaddr, bdaddr);
10953         ev->addr.type = link_to_bdaddr(link_type, addr_type);
10954         ev->rssi = rssi;
10955
10956         if (name)
10957                 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10958         else
10959                 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10960
10961         ev->eir_len = cpu_to_le16(eir_len);
10962         ev->flags = cpu_to_le32(flags);
10963
10964         mgmt_event_skb(skb, NULL);
10965 }
10966
10967 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10968 {
10969         struct mgmt_ev_discovering ev;
10970
10971         bt_dev_dbg(hdev, "discovering %u", discovering);
10972
10973         memset(&ev, 0, sizeof(ev));
10974         ev.type = hdev->discovery.type;
10975         ev.discovering = discovering;
10976
10977         mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10978 }
10979
10980 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10981 {
10982         struct mgmt_ev_controller_suspend ev;
10983
10984         ev.suspend_state = state;
10985         mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10986 }
10987
10988 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10989                    u8 addr_type)
10990 {
10991         struct mgmt_ev_controller_resume ev;
10992
10993         ev.wake_reason = reason;
10994         if (bdaddr) {
10995                 bacpy(&ev.addr.bdaddr, bdaddr);
10996                 ev.addr.type = addr_type;
10997         } else {
10998                 memset(&ev.addr, 0, sizeof(ev.addr));
10999         }
11000
11001         mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
11002 }
11003
11004 static struct hci_mgmt_chan chan = {
11005         .channel        = HCI_CHANNEL_CONTROL,
11006         .handler_count  = ARRAY_SIZE(mgmt_handlers),
11007         .handlers       = mgmt_handlers,
11008 #ifdef TIZEN_BT
11009         .tizen_handler_count    = ARRAY_SIZE(tizen_mgmt_handlers),
11010         .tizen_handlers = tizen_mgmt_handlers,
11011 #endif
11012         .hdev_init      = mgmt_init_hdev,
11013 };
11014
11015 int mgmt_init(void)
11016 {
11017         return hci_mgmt_chan_register(&chan);
11018 }
11019
11020 void mgmt_exit(void)
11021 {
11022         hci_mgmt_chan_unregister(&chan);
11023 }
11024
11025 void mgmt_cleanup(struct sock *sk)
11026 {
11027         struct mgmt_mesh_tx *mesh_tx;
11028         struct hci_dev *hdev;
11029
11030         read_lock(&hci_dev_list_lock);
11031
11032         list_for_each_entry(hdev, &hci_dev_list, list) {
11033                 do {
11034                         mesh_tx = mgmt_mesh_next(hdev, sk);
11035
11036                         if (mesh_tx)
11037                                 mesh_send_complete(hdev, mesh_tx, true);
11038                 } while (mesh_tx);
11039         }
11040
11041         read_unlock(&hci_dev_list_lock);
11042 }