Bluetooth: Add BT LE discovery feature
[platform/kernel/linux-starfive.git] / net / bluetooth / hci_event.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
38 #include "msft.h"
39 #include "eir.h"
40
41 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
42                  "\x00\x00\x00\x00\x00\x00\x00\x00"
43
44 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
45
46 /* Handle HCI Event packets */
47
48 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
49                              u8 ev, size_t len)
50 {
51         void *data;
52
53         data = skb_pull_data(skb, len);
54         if (!data)
55                 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
56
57         return data;
58 }
59
60 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
61                              u16 op, size_t len)
62 {
63         void *data;
64
65         data = skb_pull_data(skb, len);
66         if (!data)
67                 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
68
69         return data;
70 }
71
72 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
73                                 u8 ev, size_t len)
74 {
75         void *data;
76
77         data = skb_pull_data(skb, len);
78         if (!data)
79                 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
80
81         return data;
82 }
83
84 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
85                                 struct sk_buff *skb)
86 {
87         struct hci_ev_status *rp = data;
88
89         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
90
91         /* It is possible that we receive Inquiry Complete event right
92          * before we receive Inquiry Cancel Command Complete event, in
93          * which case the latter event should have status of Command
94          * Disallowed (0x0c). This should not be treated as error, since
95          * we actually achieve what Inquiry Cancel wants to achieve,
96          * which is to end the last Inquiry session.
97          */
98         if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
99                 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
100                 rp->status = 0x00;
101         }
102
103         if (rp->status)
104                 return rp->status;
105
106         clear_bit(HCI_INQUIRY, &hdev->flags);
107         smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
108         wake_up_bit(&hdev->flags, HCI_INQUIRY);
109
110         hci_dev_lock(hdev);
111         /* Set discovery state to stopped if we're not doing LE active
112          * scanning.
113          */
114         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
115             hdev->le_scan_type != LE_SCAN_ACTIVE)
116                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
117         hci_dev_unlock(hdev);
118
119         hci_conn_check_pending(hdev);
120
121         return rp->status;
122 }
123
124 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
125                               struct sk_buff *skb)
126 {
127         struct hci_ev_status *rp = data;
128
129         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
130
131         if (rp->status)
132                 return rp->status;
133
134         hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
135
136         return rp->status;
137 }
138
139 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
140                                    struct sk_buff *skb)
141 {
142         struct hci_ev_status *rp = data;
143
144         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
145
146         if (rp->status)
147                 return rp->status;
148
149         hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
150
151         hci_conn_check_pending(hdev);
152
153         return rp->status;
154 }
155
156 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
157                                         struct sk_buff *skb)
158 {
159         struct hci_ev_status *rp = data;
160
161         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
162
163         return rp->status;
164 }
165
166 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
167                                 struct sk_buff *skb)
168 {
169         struct hci_rp_role_discovery *rp = data;
170         struct hci_conn *conn;
171
172         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
173
174         if (rp->status)
175                 return rp->status;
176
177         hci_dev_lock(hdev);
178
179         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
180         if (conn)
181                 conn->role = rp->role;
182
183         hci_dev_unlock(hdev);
184
185         return rp->status;
186 }
187
188 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
189                                   struct sk_buff *skb)
190 {
191         struct hci_rp_read_link_policy *rp = data;
192         struct hci_conn *conn;
193
194         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
195
196         if (rp->status)
197                 return rp->status;
198
199         hci_dev_lock(hdev);
200
201         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
202         if (conn)
203                 conn->link_policy = __le16_to_cpu(rp->policy);
204
205         hci_dev_unlock(hdev);
206
207         return rp->status;
208 }
209
210 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
211                                    struct sk_buff *skb)
212 {
213         struct hci_rp_write_link_policy *rp = data;
214         struct hci_conn *conn;
215         void *sent;
216
217         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
218
219         if (rp->status)
220                 return rp->status;
221
222         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
223         if (!sent)
224                 return rp->status;
225
226         hci_dev_lock(hdev);
227
228         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
229         if (conn)
230                 conn->link_policy = get_unaligned_le16(sent + 2);
231
232         hci_dev_unlock(hdev);
233
234         return rp->status;
235 }
236
237 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
238                                       struct sk_buff *skb)
239 {
240         struct hci_rp_read_def_link_policy *rp = data;
241
242         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
243
244         if (rp->status)
245                 return rp->status;
246
247         hdev->link_policy = __le16_to_cpu(rp->policy);
248
249         return rp->status;
250 }
251
252 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
253                                        struct sk_buff *skb)
254 {
255         struct hci_ev_status *rp = data;
256         void *sent;
257
258         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
259
260         if (rp->status)
261                 return rp->status;
262
263         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
264         if (!sent)
265                 return rp->status;
266
267         hdev->link_policy = get_unaligned_le16(sent);
268
269         return rp->status;
270 }
271
272 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
273 {
274         struct hci_ev_status *rp = data;
275
276         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
277
278         clear_bit(HCI_RESET, &hdev->flags);
279
280         if (rp->status)
281                 return rp->status;
282
283         /* Reset all non-persistent flags */
284         hci_dev_clear_volatile_flags(hdev);
285
286         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
287
288         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
289         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
290
291         memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
292         hdev->adv_data_len = 0;
293
294         memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
295         hdev->scan_rsp_data_len = 0;
296
297         hdev->le_scan_type = LE_SCAN_PASSIVE;
298
299         hdev->ssp_debug_mode = 0;
300
301         hci_bdaddr_list_clear(&hdev->le_accept_list);
302         hci_bdaddr_list_clear(&hdev->le_resolv_list);
303
304         return rp->status;
305 }
306
307 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
308                                       struct sk_buff *skb)
309 {
310         struct hci_rp_read_stored_link_key *rp = data;
311         struct hci_cp_read_stored_link_key *sent;
312
313         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
314
315         sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
316         if (!sent)
317                 return rp->status;
318
319         if (!rp->status && sent->read_all == 0x01) {
320                 hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
321                 hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
322         }
323
324         return rp->status;
325 }
326
327 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
328                                         struct sk_buff *skb)
329 {
330         struct hci_rp_delete_stored_link_key *rp = data;
331         u16 num_keys;
332
333         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
334
335         if (rp->status)
336                 return rp->status;
337
338         num_keys = le16_to_cpu(rp->num_keys);
339
340         if (num_keys <= hdev->stored_num_keys)
341                 hdev->stored_num_keys -= num_keys;
342         else
343                 hdev->stored_num_keys = 0;
344
345         return rp->status;
346 }
347
348 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
349                                   struct sk_buff *skb)
350 {
351         struct hci_ev_status *rp = data;
352         void *sent;
353
354         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
355
356         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
357         if (!sent)
358                 return rp->status;
359
360         hci_dev_lock(hdev);
361
362         if (hci_dev_test_flag(hdev, HCI_MGMT))
363                 mgmt_set_local_name_complete(hdev, sent, rp->status);
364         else if (!rp->status)
365                 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
366
367         hci_dev_unlock(hdev);
368
369         return rp->status;
370 }
371
372 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
373                                  struct sk_buff *skb)
374 {
375         struct hci_rp_read_local_name *rp = data;
376
377         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
378
379         if (rp->status)
380                 return rp->status;
381
382         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
383             hci_dev_test_flag(hdev, HCI_CONFIG))
384                 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
385
386         return rp->status;
387 }
388
389 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
390                                    struct sk_buff *skb)
391 {
392         struct hci_ev_status *rp = data;
393         void *sent;
394
395         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
396
397         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
398         if (!sent)
399                 return rp->status;
400
401         hci_dev_lock(hdev);
402
403         if (!rp->status) {
404                 __u8 param = *((__u8 *) sent);
405
406                 if (param == AUTH_ENABLED)
407                         set_bit(HCI_AUTH, &hdev->flags);
408                 else
409                         clear_bit(HCI_AUTH, &hdev->flags);
410         }
411
412         if (hci_dev_test_flag(hdev, HCI_MGMT))
413                 mgmt_auth_enable_complete(hdev, rp->status);
414
415         hci_dev_unlock(hdev);
416
417         return rp->status;
418 }
419
420 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
421                                     struct sk_buff *skb)
422 {
423         struct hci_ev_status *rp = data;
424         __u8 param;
425         void *sent;
426
427         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
428
429         if (rp->status)
430                 return rp->status;
431
432         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
433         if (!sent)
434                 return rp->status;
435
436         param = *((__u8 *) sent);
437
438         if (param)
439                 set_bit(HCI_ENCRYPT, &hdev->flags);
440         else
441                 clear_bit(HCI_ENCRYPT, &hdev->flags);
442
443         return rp->status;
444 }
445
446 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
447                                    struct sk_buff *skb)
448 {
449         struct hci_ev_status *rp = data;
450         __u8 param;
451         void *sent;
452
453         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
454
455         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
456         if (!sent)
457                 return rp->status;
458
459         param = *((__u8 *) sent);
460
461         hci_dev_lock(hdev);
462
463         if (rp->status) {
464                 hdev->discov_timeout = 0;
465                 goto done;
466         }
467
468         if (param & SCAN_INQUIRY)
469                 set_bit(HCI_ISCAN, &hdev->flags);
470         else
471                 clear_bit(HCI_ISCAN, &hdev->flags);
472
473         if (param & SCAN_PAGE)
474                 set_bit(HCI_PSCAN, &hdev->flags);
475         else
476                 clear_bit(HCI_PSCAN, &hdev->flags);
477
478 done:
479         hci_dev_unlock(hdev);
480
481         return rp->status;
482 }
483
484 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
485                                   struct sk_buff *skb)
486 {
487         struct hci_ev_status *rp = data;
488         struct hci_cp_set_event_filter *cp;
489         void *sent;
490
491         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
492
493         if (rp->status)
494                 return rp->status;
495
496         sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
497         if (!sent)
498                 return rp->status;
499
500         cp = (struct hci_cp_set_event_filter *)sent;
501
502         if (cp->flt_type == HCI_FLT_CLEAR_ALL)
503                 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
504         else
505                 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
506
507         return rp->status;
508 }
509
510 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
511                                    struct sk_buff *skb)
512 {
513         struct hci_rp_read_class_of_dev *rp = data;
514
515         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
516
517         if (rp->status)
518                 return rp->status;
519
520         memcpy(hdev->dev_class, rp->dev_class, 3);
521
522         bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
523                    hdev->dev_class[1], hdev->dev_class[0]);
524
525         return rp->status;
526 }
527
528 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
529                                     struct sk_buff *skb)
530 {
531         struct hci_ev_status *rp = data;
532         void *sent;
533
534         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
535
536         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
537         if (!sent)
538                 return rp->status;
539
540         hci_dev_lock(hdev);
541
542         if (!rp->status)
543                 memcpy(hdev->dev_class, sent, 3);
544
545         if (hci_dev_test_flag(hdev, HCI_MGMT))
546                 mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
547
548         hci_dev_unlock(hdev);
549
550         return rp->status;
551 }
552
553 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
554                                     struct sk_buff *skb)
555 {
556         struct hci_rp_read_voice_setting *rp = data;
557         __u16 setting;
558
559         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
560
561         if (rp->status)
562                 return rp->status;
563
564         setting = __le16_to_cpu(rp->voice_setting);
565
566         if (hdev->voice_setting == setting)
567                 return rp->status;
568
569         hdev->voice_setting = setting;
570
571         bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
572
573         if (hdev->notify)
574                 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
575
576         return rp->status;
577 }
578
579 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
580                                      struct sk_buff *skb)
581 {
582         struct hci_ev_status *rp = data;
583         __u16 setting;
584         void *sent;
585
586         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
587
588         if (rp->status)
589                 return rp->status;
590
591         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
592         if (!sent)
593                 return rp->status;
594
595         setting = get_unaligned_le16(sent);
596
597         if (hdev->voice_setting == setting)
598                 return rp->status;
599
600         hdev->voice_setting = setting;
601
602         bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
603
604         if (hdev->notify)
605                 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
606
607         return rp->status;
608 }
609
610 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
611                                         struct sk_buff *skb)
612 {
613         struct hci_rp_read_num_supported_iac *rp = data;
614
615         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
616
617         if (rp->status)
618                 return rp->status;
619
620         hdev->num_iac = rp->num_iac;
621
622         bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
623
624         return rp->status;
625 }
626
627 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
628                                 struct sk_buff *skb)
629 {
630         struct hci_ev_status *rp = data;
631         struct hci_cp_write_ssp_mode *sent;
632
633         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
634
635         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
636         if (!sent)
637                 return rp->status;
638
639         hci_dev_lock(hdev);
640
641         if (!rp->status) {
642                 if (sent->mode)
643                         hdev->features[1][0] |= LMP_HOST_SSP;
644                 else
645                         hdev->features[1][0] &= ~LMP_HOST_SSP;
646         }
647
648         if (!rp->status) {
649                 if (sent->mode)
650                         hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
651                 else
652                         hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
653         }
654
655         hci_dev_unlock(hdev);
656
657         return rp->status;
658 }
659
660 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
661                                   struct sk_buff *skb)
662 {
663         struct hci_ev_status *rp = data;
664         struct hci_cp_write_sc_support *sent;
665
666         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
667
668         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
669         if (!sent)
670                 return rp->status;
671
672         hci_dev_lock(hdev);
673
674         if (!rp->status) {
675                 if (sent->support)
676                         hdev->features[1][0] |= LMP_HOST_SC;
677                 else
678                         hdev->features[1][0] &= ~LMP_HOST_SC;
679         }
680
681         if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
682                 if (sent->support)
683                         hci_dev_set_flag(hdev, HCI_SC_ENABLED);
684                 else
685                         hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
686         }
687
688         hci_dev_unlock(hdev);
689
690         return rp->status;
691 }
692
693 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
694                                     struct sk_buff *skb)
695 {
696         struct hci_rp_read_local_version *rp = data;
697
698         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
699
700         if (rp->status)
701                 return rp->status;
702
703         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
704             hci_dev_test_flag(hdev, HCI_CONFIG)) {
705                 hdev->hci_ver = rp->hci_ver;
706                 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
707                 hdev->lmp_ver = rp->lmp_ver;
708                 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
709                 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
710         }
711
712         return rp->status;
713 }
714
715 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
716                                    struct sk_buff *skb)
717 {
718         struct hci_rp_read_enc_key_size *rp = data;
719         struct hci_conn *conn;
720         u16 handle;
721         u8 status = rp->status;
722
723         bt_dev_dbg(hdev, "status 0x%2.2x", status);
724
725         handle = le16_to_cpu(rp->handle);
726
727         hci_dev_lock(hdev);
728
729         conn = hci_conn_hash_lookup_handle(hdev, handle);
730         if (!conn) {
731                 status = 0xFF;
732                 goto done;
733         }
734
735         /* While unexpected, the read_enc_key_size command may fail. The most
736          * secure approach is to then assume the key size is 0 to force a
737          * disconnection.
738          */
739         if (status) {
740                 bt_dev_err(hdev, "failed to read key size for handle %u",
741                            handle);
742                 conn->enc_key_size = 0;
743         } else {
744                 conn->enc_key_size = rp->key_size;
745                 status = 0;
746         }
747
748         hci_encrypt_cfm(conn, 0);
749
750 done:
751         hci_dev_unlock(hdev);
752
753         return status;
754 }
755
756 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
757                                      struct sk_buff *skb)
758 {
759         struct hci_rp_read_local_commands *rp = data;
760
761         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
762
763         if (rp->status)
764                 return rp->status;
765
766         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
767             hci_dev_test_flag(hdev, HCI_CONFIG))
768                 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
769
770         return rp->status;
771 }
772
773 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
774                                            struct sk_buff *skb)
775 {
776         struct hci_rp_read_auth_payload_to *rp = data;
777         struct hci_conn *conn;
778
779         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
780
781         if (rp->status)
782                 return rp->status;
783
784         hci_dev_lock(hdev);
785
786         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
787         if (conn)
788                 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
789
790         hci_dev_unlock(hdev);
791
792         return rp->status;
793 }
794
795 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
796                                             struct sk_buff *skb)
797 {
798         struct hci_rp_write_auth_payload_to *rp = data;
799         struct hci_conn *conn;
800         void *sent;
801
802         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
803
804         if (rp->status)
805                 return rp->status;
806
807         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
808         if (!sent)
809                 return rp->status;
810
811         hci_dev_lock(hdev);
812
813         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
814         if (conn)
815                 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
816
817         hci_dev_unlock(hdev);
818
819         return rp->status;
820 }
821
822 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
823                                      struct sk_buff *skb)
824 {
825         struct hci_rp_read_local_features *rp = data;
826
827         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
828
829         if (rp->status)
830                 return rp->status;
831
832         memcpy(hdev->features, rp->features, 8);
833
834         /* Adjust default settings according to features
835          * supported by device. */
836
837         if (hdev->features[0][0] & LMP_3SLOT)
838                 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
839
840         if (hdev->features[0][0] & LMP_5SLOT)
841                 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
842
843         if (hdev->features[0][1] & LMP_HV2) {
844                 hdev->pkt_type  |= (HCI_HV2);
845                 hdev->esco_type |= (ESCO_HV2);
846         }
847
848         if (hdev->features[0][1] & LMP_HV3) {
849                 hdev->pkt_type  |= (HCI_HV3);
850                 hdev->esco_type |= (ESCO_HV3);
851         }
852
853         if (lmp_esco_capable(hdev))
854                 hdev->esco_type |= (ESCO_EV3);
855
856         if (hdev->features[0][4] & LMP_EV4)
857                 hdev->esco_type |= (ESCO_EV4);
858
859         if (hdev->features[0][4] & LMP_EV5)
860                 hdev->esco_type |= (ESCO_EV5);
861
862         if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
863                 hdev->esco_type |= (ESCO_2EV3);
864
865         if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
866                 hdev->esco_type |= (ESCO_3EV3);
867
868         if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
869                 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
870
871         return rp->status;
872 }
873
874 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
875                                          struct sk_buff *skb)
876 {
877         struct hci_rp_read_local_ext_features *rp = data;
878
879         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
880
881         if (rp->status)
882                 return rp->status;
883
884         if (hdev->max_page < rp->max_page) {
885                 if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
886                              &hdev->quirks))
887                         bt_dev_warn(hdev, "broken local ext features page 2");
888                 else
889                         hdev->max_page = rp->max_page;
890         }
891
892         if (rp->page < HCI_MAX_PAGES)
893                 memcpy(hdev->features[rp->page], rp->features, 8);
894
895         return rp->status;
896 }
897
898 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
899                                         struct sk_buff *skb)
900 {
901         struct hci_rp_read_flow_control_mode *rp = data;
902
903         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
904
905         if (rp->status)
906                 return rp->status;
907
908         hdev->flow_ctl_mode = rp->mode;
909
910         return rp->status;
911 }
912
913 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
914                                   struct sk_buff *skb)
915 {
916         struct hci_rp_read_buffer_size *rp = data;
917
918         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
919
920         if (rp->status)
921                 return rp->status;
922
923         hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
924         hdev->sco_mtu  = rp->sco_mtu;
925         hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
926         hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
927
928         if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
929                 hdev->sco_mtu  = 64;
930                 hdev->sco_pkts = 8;
931         }
932
933         hdev->acl_cnt = hdev->acl_pkts;
934         hdev->sco_cnt = hdev->sco_pkts;
935
936         BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
937                hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
938
939         return rp->status;
940 }
941
942 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
943                               struct sk_buff *skb)
944 {
945         struct hci_rp_read_bd_addr *rp = data;
946
947         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
948
949         if (rp->status)
950                 return rp->status;
951
952         if (test_bit(HCI_INIT, &hdev->flags))
953                 bacpy(&hdev->bdaddr, &rp->bdaddr);
954
955         if (hci_dev_test_flag(hdev, HCI_SETUP))
956                 bacpy(&hdev->setup_addr, &rp->bdaddr);
957
958         return rp->status;
959 }
960
961 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
962                                          struct sk_buff *skb)
963 {
964         struct hci_rp_read_local_pairing_opts *rp = data;
965
966         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
967
968         if (rp->status)
969                 return rp->status;
970
971         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
972             hci_dev_test_flag(hdev, HCI_CONFIG)) {
973                 hdev->pairing_opts = rp->pairing_opts;
974                 hdev->max_enc_key_size = rp->max_key_size;
975         }
976
977         return rp->status;
978 }
979
980 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
981                                          struct sk_buff *skb)
982 {
983         struct hci_rp_read_page_scan_activity *rp = data;
984
985         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
986
987         if (rp->status)
988                 return rp->status;
989
990         if (test_bit(HCI_INIT, &hdev->flags)) {
991                 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
992                 hdev->page_scan_window = __le16_to_cpu(rp->window);
993         }
994
995         return rp->status;
996 }
997
998 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
999                                           struct sk_buff *skb)
1000 {
1001         struct hci_ev_status *rp = data;
1002         struct hci_cp_write_page_scan_activity *sent;
1003
1004         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1005
1006         if (rp->status)
1007                 return rp->status;
1008
1009         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1010         if (!sent)
1011                 return rp->status;
1012
1013         hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1014         hdev->page_scan_window = __le16_to_cpu(sent->window);
1015
1016         return rp->status;
1017 }
1018
1019 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1020                                      struct sk_buff *skb)
1021 {
1022         struct hci_rp_read_page_scan_type *rp = data;
1023
1024         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1025
1026         if (rp->status)
1027                 return rp->status;
1028
1029         if (test_bit(HCI_INIT, &hdev->flags))
1030                 hdev->page_scan_type = rp->type;
1031
1032         return rp->status;
1033 }
1034
1035 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1036                                       struct sk_buff *skb)
1037 {
1038         struct hci_ev_status *rp = data;
1039         u8 *type;
1040
1041         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1042
1043         if (rp->status)
1044                 return rp->status;
1045
1046         type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1047         if (type)
1048                 hdev->page_scan_type = *type;
1049
1050         return rp->status;
1051 }
1052
1053 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
1054                                       struct sk_buff *skb)
1055 {
1056         struct hci_rp_read_data_block_size *rp = data;
1057
1058         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1059
1060         if (rp->status)
1061                 return rp->status;
1062
1063         hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
1064         hdev->block_len = __le16_to_cpu(rp->block_len);
1065         hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
1066
1067         hdev->block_cnt = hdev->num_blocks;
1068
1069         BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
1070                hdev->block_cnt, hdev->block_len);
1071
1072         return rp->status;
1073 }
1074
1075 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1076                             struct sk_buff *skb)
1077 {
1078         struct hci_rp_read_clock *rp = data;
1079         struct hci_cp_read_clock *cp;
1080         struct hci_conn *conn;
1081
1082         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1083
1084         if (rp->status)
1085                 return rp->status;
1086
1087         hci_dev_lock(hdev);
1088
1089         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1090         if (!cp)
1091                 goto unlock;
1092
1093         if (cp->which == 0x00) {
1094                 hdev->clock = le32_to_cpu(rp->clock);
1095                 goto unlock;
1096         }
1097
1098         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1099         if (conn) {
1100                 conn->clock = le32_to_cpu(rp->clock);
1101                 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1102         }
1103
1104 unlock:
1105         hci_dev_unlock(hdev);
1106         return rp->status;
1107 }
1108
1109 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
1110                                      struct sk_buff *skb)
1111 {
1112         struct hci_rp_read_local_amp_info *rp = data;
1113
1114         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1115
1116         if (rp->status)
1117                 return rp->status;
1118
1119         hdev->amp_status = rp->amp_status;
1120         hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
1121         hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
1122         hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
1123         hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
1124         hdev->amp_type = rp->amp_type;
1125         hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
1126         hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
1127         hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
1128         hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
1129
1130         return rp->status;
1131 }
1132
1133 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1134                                        struct sk_buff *skb)
1135 {
1136         struct hci_rp_read_inq_rsp_tx_power *rp = data;
1137
1138         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1139
1140         if (rp->status)
1141                 return rp->status;
1142
1143         hdev->inq_tx_power = rp->tx_power;
1144
1145         return rp->status;
1146 }
1147
1148 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1149                                              struct sk_buff *skb)
1150 {
1151         struct hci_rp_read_def_err_data_reporting *rp = data;
1152
1153         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1154
1155         if (rp->status)
1156                 return rp->status;
1157
1158         hdev->err_data_reporting = rp->err_data_reporting;
1159
1160         return rp->status;
1161 }
1162
1163 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1164                                               struct sk_buff *skb)
1165 {
1166         struct hci_ev_status *rp = data;
1167         struct hci_cp_write_def_err_data_reporting *cp;
1168
1169         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1170
1171         if (rp->status)
1172                 return rp->status;
1173
1174         cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1175         if (!cp)
1176                 return rp->status;
1177
1178         hdev->err_data_reporting = cp->err_data_reporting;
1179
1180         return rp->status;
1181 }
1182
1183 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1184                                 struct sk_buff *skb)
1185 {
1186         struct hci_rp_pin_code_reply *rp = data;
1187         struct hci_cp_pin_code_reply *cp;
1188         struct hci_conn *conn;
1189
1190         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1191
1192         hci_dev_lock(hdev);
1193
1194         if (hci_dev_test_flag(hdev, HCI_MGMT))
1195                 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1196
1197         if (rp->status)
1198                 goto unlock;
1199
1200         cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1201         if (!cp)
1202                 goto unlock;
1203
1204         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1205         if (conn)
1206                 conn->pin_length = cp->pin_len;
1207
1208 unlock:
1209         hci_dev_unlock(hdev);
1210         return rp->status;
1211 }
1212
1213 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1214                                     struct sk_buff *skb)
1215 {
1216         struct hci_rp_pin_code_neg_reply *rp = data;
1217
1218         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1219
1220         hci_dev_lock(hdev);
1221
1222         if (hci_dev_test_flag(hdev, HCI_MGMT))
1223                 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1224                                                  rp->status);
1225
1226         hci_dev_unlock(hdev);
1227
1228         return rp->status;
1229 }
1230
1231 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1232                                      struct sk_buff *skb)
1233 {
1234         struct hci_rp_le_read_buffer_size *rp = data;
1235
1236         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1237
1238         if (rp->status)
1239                 return rp->status;
1240
1241         hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1242         hdev->le_pkts = rp->le_max_pkt;
1243
1244         hdev->le_cnt = hdev->le_pkts;
1245
1246         BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1247
1248         return rp->status;
1249 }
1250
1251 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1252                                         struct sk_buff *skb)
1253 {
1254         struct hci_rp_le_read_local_features *rp = data;
1255
1256         BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1257
1258         if (rp->status)
1259                 return rp->status;
1260
1261         memcpy(hdev->le_features, rp->features, 8);
1262
1263         return rp->status;
1264 }
1265
1266 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1267                                       struct sk_buff *skb)
1268 {
1269         struct hci_rp_le_read_adv_tx_power *rp = data;
1270
1271         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1272
1273         if (rp->status)
1274                 return rp->status;
1275
1276         hdev->adv_tx_power = rp->tx_power;
1277
1278         return rp->status;
1279 }
1280
1281 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1282                                     struct sk_buff *skb)
1283 {
1284         struct hci_rp_user_confirm_reply *rp = data;
1285
1286         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1287
1288         hci_dev_lock(hdev);
1289
1290         if (hci_dev_test_flag(hdev, HCI_MGMT))
1291                 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1292                                                  rp->status);
1293
1294         hci_dev_unlock(hdev);
1295
1296         return rp->status;
1297 }
1298
1299 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1300                                         struct sk_buff *skb)
1301 {
1302         struct hci_rp_user_confirm_reply *rp = data;
1303
1304         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1305
1306         hci_dev_lock(hdev);
1307
1308         if (hci_dev_test_flag(hdev, HCI_MGMT))
1309                 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1310                                                      ACL_LINK, 0, rp->status);
1311
1312         hci_dev_unlock(hdev);
1313
1314         return rp->status;
1315 }
1316
1317 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1318                                     struct sk_buff *skb)
1319 {
1320         struct hci_rp_user_confirm_reply *rp = data;
1321
1322         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1323
1324         hci_dev_lock(hdev);
1325
1326         if (hci_dev_test_flag(hdev, HCI_MGMT))
1327                 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1328                                                  0, rp->status);
1329
1330         hci_dev_unlock(hdev);
1331
1332         return rp->status;
1333 }
1334
1335 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1336                                         struct sk_buff *skb)
1337 {
1338         struct hci_rp_user_confirm_reply *rp = data;
1339
1340         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1341
1342         hci_dev_lock(hdev);
1343
1344         if (hci_dev_test_flag(hdev, HCI_MGMT))
1345                 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1346                                                      ACL_LINK, 0, rp->status);
1347
1348         hci_dev_unlock(hdev);
1349
1350         return rp->status;
1351 }
1352
1353 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1354                                      struct sk_buff *skb)
1355 {
1356         struct hci_rp_read_local_oob_data *rp = data;
1357
1358         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1359
1360         return rp->status;
1361 }
1362
1363 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1364                                          struct sk_buff *skb)
1365 {
1366         struct hci_rp_read_local_oob_ext_data *rp = data;
1367
1368         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1369
1370         return rp->status;
1371 }
1372
1373 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1374                                     struct sk_buff *skb)
1375 {
1376         struct hci_ev_status *rp = data;
1377         bdaddr_t *sent;
1378
1379         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1380
1381         if (rp->status)
1382                 return rp->status;
1383
1384         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1385         if (!sent)
1386                 return rp->status;
1387
1388         hci_dev_lock(hdev);
1389
1390         bacpy(&hdev->random_addr, sent);
1391
1392         if (!bacmp(&hdev->rpa, sent)) {
1393                 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1394                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1395                                    secs_to_jiffies(hdev->rpa_timeout));
1396         }
1397
1398         hci_dev_unlock(hdev);
1399
1400         return rp->status;
1401 }
1402
1403 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1404                                     struct sk_buff *skb)
1405 {
1406         struct hci_ev_status *rp = data;
1407         struct hci_cp_le_set_default_phy *cp;
1408
1409         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1410
1411         if (rp->status)
1412                 return rp->status;
1413
1414         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1415         if (!cp)
1416                 return rp->status;
1417
1418         hci_dev_lock(hdev);
1419
1420         hdev->le_tx_def_phys = cp->tx_phys;
1421         hdev->le_rx_def_phys = cp->rx_phys;
1422
1423         hci_dev_unlock(hdev);
1424
1425         return rp->status;
1426 }
1427
1428 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1429                                             struct sk_buff *skb)
1430 {
1431         struct hci_ev_status *rp = data;
1432         struct hci_cp_le_set_adv_set_rand_addr *cp;
1433         struct adv_info *adv;
1434
1435         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1436
1437         if (rp->status)
1438                 return rp->status;
1439
1440         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1441         /* Update only in case the adv instance since handle 0x00 shall be using
1442          * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1443          * non-extended adverting.
1444          */
1445         if (!cp || !cp->handle)
1446                 return rp->status;
1447
1448         hci_dev_lock(hdev);
1449
1450         adv = hci_find_adv_instance(hdev, cp->handle);
1451         if (adv) {
1452                 bacpy(&adv->random_addr, &cp->bdaddr);
1453                 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1454                         adv->rpa_expired = false;
1455                         queue_delayed_work(hdev->workqueue,
1456                                            &adv->rpa_expired_cb,
1457                                            secs_to_jiffies(hdev->rpa_timeout));
1458                 }
1459         }
1460
1461         hci_dev_unlock(hdev);
1462
1463         return rp->status;
1464 }
1465
1466 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1467                                    struct sk_buff *skb)
1468 {
1469         struct hci_ev_status *rp = data;
1470         u8 *instance;
1471         int err;
1472
1473         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1474
1475         if (rp->status)
1476                 return rp->status;
1477
1478         instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1479         if (!instance)
1480                 return rp->status;
1481
1482         hci_dev_lock(hdev);
1483
1484         err = hci_remove_adv_instance(hdev, *instance);
1485         if (!err)
1486                 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1487                                          *instance);
1488
1489         hci_dev_unlock(hdev);
1490
1491         return rp->status;
1492 }
1493
1494 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1495                                    struct sk_buff *skb)
1496 {
1497         struct hci_ev_status *rp = data;
1498         struct adv_info *adv, *n;
1499         int err;
1500
1501         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1502
1503         if (rp->status)
1504                 return rp->status;
1505
1506         if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1507                 return rp->status;
1508
1509         hci_dev_lock(hdev);
1510
1511         list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1512                 u8 instance = adv->instance;
1513
1514                 err = hci_remove_adv_instance(hdev, instance);
1515                 if (!err)
1516                         mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1517                                                  hdev, instance);
1518         }
1519
1520         hci_dev_unlock(hdev);
1521
1522         return rp->status;
1523 }
1524
1525 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1526                                         struct sk_buff *skb)
1527 {
1528         struct hci_rp_le_read_transmit_power *rp = data;
1529
1530         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1531
1532         if (rp->status)
1533                 return rp->status;
1534
1535         hdev->min_le_tx_power = rp->min_le_tx_power;
1536         hdev->max_le_tx_power = rp->max_le_tx_power;
1537
1538         return rp->status;
1539 }
1540
1541 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1542                                      struct sk_buff *skb)
1543 {
1544         struct hci_ev_status *rp = data;
1545         struct hci_cp_le_set_privacy_mode *cp;
1546         struct hci_conn_params *params;
1547
1548         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1549
1550         if (rp->status)
1551                 return rp->status;
1552
1553         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1554         if (!cp)
1555                 return rp->status;
1556
1557         hci_dev_lock(hdev);
1558
1559         params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1560         if (params)
1561                 params->privacy_mode = cp->mode;
1562
1563         hci_dev_unlock(hdev);
1564
1565         return rp->status;
1566 }
1567
1568 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1569                                    struct sk_buff *skb)
1570 {
1571         struct hci_ev_status *rp = data;
1572         __u8 *sent;
1573
1574         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1575
1576         if (rp->status)
1577                 return rp->status;
1578
1579         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1580         if (!sent)
1581                 return rp->status;
1582
1583         hci_dev_lock(hdev);
1584
1585         /* If we're doing connection initiation as peripheral. Set a
1586          * timeout in case something goes wrong.
1587          */
1588         if (*sent) {
1589                 struct hci_conn *conn;
1590
1591                 hci_dev_set_flag(hdev, HCI_LE_ADV);
1592
1593                 conn = hci_lookup_le_connect(hdev);
1594                 if (conn)
1595                         queue_delayed_work(hdev->workqueue,
1596                                            &conn->le_conn_timeout,
1597                                            conn->conn_timeout);
1598         } else {
1599                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1600         }
1601
1602         hci_dev_unlock(hdev);
1603
1604         return rp->status;
1605 }
1606
1607 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1608                                        struct sk_buff *skb)
1609 {
1610         struct hci_cp_le_set_ext_adv_enable *cp;
1611         struct hci_cp_ext_adv_set *set;
1612         struct adv_info *adv = NULL, *n;
1613         struct hci_ev_status *rp = data;
1614
1615         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1616
1617         if (rp->status)
1618                 return rp->status;
1619
1620         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1621         if (!cp)
1622                 return rp->status;
1623
1624         set = (void *)cp->data;
1625
1626         hci_dev_lock(hdev);
1627
1628         if (cp->num_of_sets)
1629                 adv = hci_find_adv_instance(hdev, set->handle);
1630
1631         if (cp->enable) {
1632                 struct hci_conn *conn;
1633
1634                 hci_dev_set_flag(hdev, HCI_LE_ADV);
1635
1636                 if (adv)
1637                         adv->enabled = true;
1638
1639                 conn = hci_lookup_le_connect(hdev);
1640                 if (conn)
1641                         queue_delayed_work(hdev->workqueue,
1642                                            &conn->le_conn_timeout,
1643                                            conn->conn_timeout);
1644         } else {
1645                 if (cp->num_of_sets) {
1646                         if (adv)
1647                                 adv->enabled = false;
1648
1649                         /* If just one instance was disabled check if there are
1650                          * any other instance enabled before clearing HCI_LE_ADV
1651                          */
1652                         list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1653                                                  list) {
1654                                 if (adv->enabled)
1655                                         goto unlock;
1656                         }
1657                 } else {
1658                         /* All instances shall be considered disabled */
1659                         list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1660                                                  list)
1661                                 adv->enabled = false;
1662                 }
1663
1664                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1665         }
1666
1667 unlock:
1668         hci_dev_unlock(hdev);
1669         return rp->status;
1670 }
1671
1672 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1673                                    struct sk_buff *skb)
1674 {
1675         struct hci_cp_le_set_scan_param *cp;
1676         struct hci_ev_status *rp = data;
1677
1678         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1679
1680         if (rp->status)
1681                 return rp->status;
1682
1683         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1684         if (!cp)
1685                 return rp->status;
1686
1687         hci_dev_lock(hdev);
1688
1689         hdev->le_scan_type = cp->type;
1690
1691         hci_dev_unlock(hdev);
1692
1693         return rp->status;
1694 }
1695
1696 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1697                                        struct sk_buff *skb)
1698 {
1699         struct hci_cp_le_set_ext_scan_params *cp;
1700         struct hci_ev_status *rp = data;
1701         struct hci_cp_le_scan_phy_params *phy_param;
1702
1703         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1704
1705         if (rp->status)
1706                 return rp->status;
1707
1708         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1709         if (!cp)
1710                 return rp->status;
1711
1712         phy_param = (void *)cp->data;
1713
1714         hci_dev_lock(hdev);
1715
1716         hdev->le_scan_type = phy_param->type;
1717
1718         hci_dev_unlock(hdev);
1719
1720         return rp->status;
1721 }
1722
1723 static bool has_pending_adv_report(struct hci_dev *hdev)
1724 {
1725         struct discovery_state *d = &hdev->discovery;
1726
1727         return bacmp(&d->last_adv_addr, BDADDR_ANY);
1728 }
1729
1730 static void clear_pending_adv_report(struct hci_dev *hdev)
1731 {
1732         struct discovery_state *d = &hdev->discovery;
1733
1734         bacpy(&d->last_adv_addr, BDADDR_ANY);
1735         d->last_adv_data_len = 0;
1736 }
1737
1738 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1739                                      u8 bdaddr_type, s8 rssi, u32 flags,
1740                                      u8 *data, u8 len)
1741 {
1742         struct discovery_state *d = &hdev->discovery;
1743
1744         if (len > HCI_MAX_AD_LENGTH)
1745                 return;
1746
1747         bacpy(&d->last_adv_addr, bdaddr);
1748         d->last_adv_addr_type = bdaddr_type;
1749         d->last_adv_rssi = rssi;
1750         d->last_adv_flags = flags;
1751         memcpy(d->last_adv_data, data, len);
1752         d->last_adv_data_len = len;
1753 }
1754
1755 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1756 {
1757         hci_dev_lock(hdev);
1758
1759         switch (enable) {
1760         case LE_SCAN_ENABLE:
1761                 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1762                 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1763                         clear_pending_adv_report(hdev);
1764                 if (hci_dev_test_flag(hdev, HCI_MESH))
1765                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1766                 break;
1767
1768         case LE_SCAN_DISABLE:
1769                 /* We do this here instead of when setting DISCOVERY_STOPPED
1770                  * since the latter would potentially require waiting for
1771                  * inquiry to stop too.
1772                  */
1773                 if (has_pending_adv_report(hdev)) {
1774                         struct discovery_state *d = &hdev->discovery;
1775
1776                         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1777                                           d->last_adv_addr_type, NULL,
1778                                           d->last_adv_rssi, d->last_adv_flags,
1779                                           d->last_adv_data,
1780                                           d->last_adv_data_len, NULL, 0, 0);
1781                 }
1782
1783                 /* Cancel this timer so that we don't try to disable scanning
1784                  * when it's already disabled.
1785                  */
1786                 cancel_delayed_work(&hdev->le_scan_disable);
1787
1788                 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1789
1790                 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1791                  * interrupted scanning due to a connect request. Mark
1792                  * therefore discovery as stopped.
1793                  */
1794                 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1795 #ifndef TIZEN_BT /* The below line is kernel bug. */
1796                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1797 #else
1798                         hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
1799 #endif
1800                 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1801                          hdev->discovery.state == DISCOVERY_FINDING)
1802                         queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1803
1804                 break;
1805
1806         default:
1807                 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1808                            enable);
1809                 break;
1810         }
1811
1812         hci_dev_unlock(hdev);
1813 }
1814
1815 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1816                                     struct sk_buff *skb)
1817 {
1818         struct hci_cp_le_set_scan_enable *cp;
1819         struct hci_ev_status *rp = data;
1820
1821         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1822
1823         if (rp->status)
1824                 return rp->status;
1825
1826         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1827         if (!cp)
1828                 return rp->status;
1829
1830         le_set_scan_enable_complete(hdev, cp->enable);
1831
1832         return rp->status;
1833 }
1834
1835 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1836                                         struct sk_buff *skb)
1837 {
1838         struct hci_cp_le_set_ext_scan_enable *cp;
1839         struct hci_ev_status *rp = data;
1840
1841         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1842
1843         if (rp->status)
1844                 return rp->status;
1845
1846         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1847         if (!cp)
1848                 return rp->status;
1849
1850         le_set_scan_enable_complete(hdev, cp->enable);
1851
1852         return rp->status;
1853 }
1854
1855 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1856                                       struct sk_buff *skb)
1857 {
1858         struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1859
1860         bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1861                    rp->num_of_sets);
1862
1863         if (rp->status)
1864                 return rp->status;
1865
1866         hdev->le_num_of_adv_sets = rp->num_of_sets;
1867
1868         return rp->status;
1869 }
1870
1871 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1872                                           struct sk_buff *skb)
1873 {
1874         struct hci_rp_le_read_accept_list_size *rp = data;
1875
1876         bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1877
1878         if (rp->status)
1879                 return rp->status;
1880
1881         hdev->le_accept_list_size = rp->size;
1882
1883         return rp->status;
1884 }
1885
1886 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1887                                       struct sk_buff *skb)
1888 {
1889         struct hci_ev_status *rp = data;
1890
1891         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1892
1893         if (rp->status)
1894                 return rp->status;
1895
1896         hci_dev_lock(hdev);
1897         hci_bdaddr_list_clear(&hdev->le_accept_list);
1898         hci_dev_unlock(hdev);
1899
1900         return rp->status;
1901 }
1902
1903 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1904                                        struct sk_buff *skb)
1905 {
1906         struct hci_cp_le_add_to_accept_list *sent;
1907         struct hci_ev_status *rp = data;
1908
1909         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1910
1911         if (rp->status)
1912                 return rp->status;
1913
1914         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1915         if (!sent)
1916                 return rp->status;
1917
1918         hci_dev_lock(hdev);
1919         hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1920                             sent->bdaddr_type);
1921         hci_dev_unlock(hdev);
1922
1923         return rp->status;
1924 }
1925
1926 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1927                                          struct sk_buff *skb)
1928 {
1929         struct hci_cp_le_del_from_accept_list *sent;
1930         struct hci_ev_status *rp = data;
1931
1932         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1933
1934         if (rp->status)
1935                 return rp->status;
1936
1937         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1938         if (!sent)
1939                 return rp->status;
1940
1941         hci_dev_lock(hdev);
1942         hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1943                             sent->bdaddr_type);
1944         hci_dev_unlock(hdev);
1945
1946         return rp->status;
1947 }
1948
1949 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1950                                           struct sk_buff *skb)
1951 {
1952         struct hci_rp_le_read_supported_states *rp = data;
1953
1954         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1955
1956         if (rp->status)
1957                 return rp->status;
1958
1959         memcpy(hdev->le_states, rp->le_states, 8);
1960
1961         return rp->status;
1962 }
1963
1964 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1965                                       struct sk_buff *skb)
1966 {
1967         struct hci_rp_le_read_def_data_len *rp = data;
1968
1969         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1970
1971         if (rp->status)
1972                 return rp->status;
1973
1974         hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1975         hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1976
1977         return rp->status;
1978 }
1979
1980 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
1981                                        struct sk_buff *skb)
1982 {
1983         struct hci_cp_le_write_def_data_len *sent;
1984         struct hci_ev_status *rp = data;
1985
1986         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1987
1988         if (rp->status)
1989                 return rp->status;
1990
1991         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1992         if (!sent)
1993                 return rp->status;
1994
1995         hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1996         hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1997
1998         return rp->status;
1999 }
2000
2001 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
2002                                        struct sk_buff *skb)
2003 {
2004         struct hci_cp_le_add_to_resolv_list *sent;
2005         struct hci_ev_status *rp = data;
2006
2007         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2008
2009         if (rp->status)
2010                 return rp->status;
2011
2012         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
2013         if (!sent)
2014                 return rp->status;
2015
2016         hci_dev_lock(hdev);
2017         hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2018                                 sent->bdaddr_type, sent->peer_irk,
2019                                 sent->local_irk);
2020         hci_dev_unlock(hdev);
2021
2022         return rp->status;
2023 }
2024
2025 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2026                                          struct sk_buff *skb)
2027 {
2028         struct hci_cp_le_del_from_resolv_list *sent;
2029         struct hci_ev_status *rp = data;
2030
2031         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2032
2033         if (rp->status)
2034                 return rp->status;
2035
2036         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2037         if (!sent)
2038                 return rp->status;
2039
2040         hci_dev_lock(hdev);
2041         hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2042                             sent->bdaddr_type);
2043         hci_dev_unlock(hdev);
2044
2045         return rp->status;
2046 }
2047
2048 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2049                                       struct sk_buff *skb)
2050 {
2051         struct hci_ev_status *rp = data;
2052
2053         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2054
2055         if (rp->status)
2056                 return rp->status;
2057
2058         hci_dev_lock(hdev);
2059         hci_bdaddr_list_clear(&hdev->le_resolv_list);
2060         hci_dev_unlock(hdev);
2061
2062         return rp->status;
2063 }
2064
2065 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2066                                           struct sk_buff *skb)
2067 {
2068         struct hci_rp_le_read_resolv_list_size *rp = data;
2069
2070         bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2071
2072         if (rp->status)
2073                 return rp->status;
2074
2075         hdev->le_resolv_list_size = rp->size;
2076
2077         return rp->status;
2078 }
2079
2080 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2081                                                struct sk_buff *skb)
2082 {
2083         struct hci_ev_status *rp = data;
2084         __u8 *sent;
2085
2086         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2087
2088         if (rp->status)
2089                 return rp->status;
2090
2091         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2092         if (!sent)
2093                 return rp->status;
2094
2095         hci_dev_lock(hdev);
2096
2097         if (*sent)
2098                 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2099         else
2100                 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2101
2102         hci_dev_unlock(hdev);
2103
2104         return rp->status;
2105 }
2106
2107 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2108                                       struct sk_buff *skb)
2109 {
2110         struct hci_rp_le_read_max_data_len *rp = data;
2111
2112         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2113
2114         if (rp->status)
2115                 return rp->status;
2116
2117         hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2118         hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2119         hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2120         hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2121
2122         return rp->status;
2123 }
2124
2125 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2126                                          struct sk_buff *skb)
2127 {
2128         struct hci_cp_write_le_host_supported *sent;
2129         struct hci_ev_status *rp = data;
2130
2131         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2132
2133         if (rp->status)
2134                 return rp->status;
2135
2136         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2137         if (!sent)
2138                 return rp->status;
2139
2140         hci_dev_lock(hdev);
2141
2142         if (sent->le) {
2143                 hdev->features[1][0] |= LMP_HOST_LE;
2144                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2145         } else {
2146                 hdev->features[1][0] &= ~LMP_HOST_LE;
2147                 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2148                 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2149         }
2150
2151         if (sent->simul)
2152                 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2153         else
2154                 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2155
2156         hci_dev_unlock(hdev);
2157
2158         return rp->status;
2159 }
2160
2161 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2162                                struct sk_buff *skb)
2163 {
2164         struct hci_cp_le_set_adv_param *cp;
2165         struct hci_ev_status *rp = data;
2166
2167         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2168
2169         if (rp->status)
2170                 return rp->status;
2171
2172         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2173         if (!cp)
2174                 return rp->status;
2175
2176         hci_dev_lock(hdev);
2177         hdev->adv_addr_type = cp->own_address_type;
2178         hci_dev_unlock(hdev);
2179
2180         return rp->status;
2181 }
2182
2183 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2184                                    struct sk_buff *skb)
2185 {
2186         struct hci_rp_le_set_ext_adv_params *rp = data;
2187         struct hci_cp_le_set_ext_adv_params *cp;
2188         struct adv_info *adv_instance;
2189
2190         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2191
2192         if (rp->status)
2193                 return rp->status;
2194
2195         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2196         if (!cp)
2197                 return rp->status;
2198
2199         hci_dev_lock(hdev);
2200         hdev->adv_addr_type = cp->own_addr_type;
2201         if (!cp->handle) {
2202                 /* Store in hdev for instance 0 */
2203                 hdev->adv_tx_power = rp->tx_power;
2204         } else {
2205                 adv_instance = hci_find_adv_instance(hdev, cp->handle);
2206                 if (adv_instance)
2207                         adv_instance->tx_power = rp->tx_power;
2208         }
2209         /* Update adv data as tx power is known now */
2210         hci_update_adv_data(hdev, cp->handle);
2211
2212         hci_dev_unlock(hdev);
2213
2214         return rp->status;
2215 }
2216
2217 #ifdef TIZEN_BT
2218 static u8 hci_cc_enable_rssi(struct hci_dev *hdev, void *data,
2219                              struct sk_buff *skb)
2220 {
2221         struct hci_cc_rsp_enable_rssi *rp = data;
2222
2223         BT_DBG("hci_cc_enable_rssi - %s status 0x%2.2x Event_LE_ext_Opcode 0x%2.2x",
2224                hdev->name, rp->status, rp->le_ext_opcode);
2225
2226         mgmt_enable_rssi_cc(hdev, rp, rp->status);
2227
2228         return rp->status;
2229 }
2230
2231 static u8 hci_cc_get_raw_rssi(struct hci_dev *hdev, void *data,
2232                               struct sk_buff *skb)
2233 {
2234         struct hci_cc_rp_get_raw_rssi *rp = data;
2235
2236         BT_DBG("hci_cc_get_raw_rssi- %s Get Raw Rssi Response[%2.2x %4.4x %2.2X]",
2237                hdev->name, rp->status, rp->conn_handle, rp->rssi_dbm);
2238
2239         mgmt_raw_rssi_response(hdev, rp, rp->status);
2240
2241         return rp->status;
2242 }
2243 #endif
2244
2245 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2246                            struct sk_buff *skb)
2247 {
2248         struct hci_rp_read_rssi *rp = data;
2249         struct hci_conn *conn;
2250
2251         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2252
2253         if (rp->status)
2254                 return rp->status;
2255
2256         hci_dev_lock(hdev);
2257
2258         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2259         if (conn)
2260                 conn->rssi = rp->rssi;
2261
2262         hci_dev_unlock(hdev);
2263
2264         return rp->status;
2265 }
2266
2267 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2268                                struct sk_buff *skb)
2269 {
2270         struct hci_cp_read_tx_power *sent;
2271         struct hci_rp_read_tx_power *rp = data;
2272         struct hci_conn *conn;
2273
2274         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2275
2276         if (rp->status)
2277                 return rp->status;
2278
2279         sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2280         if (!sent)
2281                 return rp->status;
2282
2283         hci_dev_lock(hdev);
2284
2285         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2286         if (!conn)
2287                 goto unlock;
2288
2289         switch (sent->type) {
2290         case 0x00:
2291                 conn->tx_power = rp->tx_power;
2292                 break;
2293         case 0x01:
2294                 conn->max_tx_power = rp->tx_power;
2295                 break;
2296         }
2297
2298 unlock:
2299         hci_dev_unlock(hdev);
2300         return rp->status;
2301 }
2302
2303 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2304                                       struct sk_buff *skb)
2305 {
2306         struct hci_ev_status *rp = data;
2307         u8 *mode;
2308
2309         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2310
2311         if (rp->status)
2312                 return rp->status;
2313
2314         mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2315         if (mode)
2316                 hdev->ssp_debug_mode = *mode;
2317
2318         return rp->status;
2319 }
2320
2321 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2322 {
2323         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2324
2325         if (status) {
2326                 hci_conn_check_pending(hdev);
2327                 return;
2328         }
2329
2330         set_bit(HCI_INQUIRY, &hdev->flags);
2331 }
2332
2333 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2334 {
2335         struct hci_cp_create_conn *cp;
2336         struct hci_conn *conn;
2337
2338         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2339
2340         cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2341         if (!cp)
2342                 return;
2343
2344         hci_dev_lock(hdev);
2345
2346         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2347
2348         bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2349
2350         if (status) {
2351                 if (conn && conn->state == BT_CONNECT) {
2352                         if (status != 0x0c || conn->attempt > 2) {
2353                                 conn->state = BT_CLOSED;
2354                                 hci_connect_cfm(conn, status);
2355                                 hci_conn_del(conn);
2356                         } else
2357                                 conn->state = BT_CONNECT2;
2358                 }
2359         } else {
2360                 if (!conn) {
2361                         conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
2362                                             HCI_ROLE_MASTER);
2363                         if (!conn)
2364                                 bt_dev_err(hdev, "no memory for new connection");
2365                 }
2366         }
2367
2368         hci_dev_unlock(hdev);
2369 }
2370
2371 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2372 {
2373         struct hci_cp_add_sco *cp;
2374         struct hci_conn *acl, *sco;
2375         __u16 handle;
2376
2377         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2378
2379         if (!status)
2380                 return;
2381
2382         cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2383         if (!cp)
2384                 return;
2385
2386         handle = __le16_to_cpu(cp->handle);
2387
2388         bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2389
2390         hci_dev_lock(hdev);
2391
2392         acl = hci_conn_hash_lookup_handle(hdev, handle);
2393         if (acl) {
2394                 sco = acl->link;
2395                 if (sco) {
2396                         sco->state = BT_CLOSED;
2397
2398                         hci_connect_cfm(sco, status);
2399                         hci_conn_del(sco);
2400                 }
2401         }
2402
2403         hci_dev_unlock(hdev);
2404 }
2405
2406 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2407 {
2408         struct hci_cp_auth_requested *cp;
2409         struct hci_conn *conn;
2410
2411         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2412
2413         if (!status)
2414                 return;
2415
2416         cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2417         if (!cp)
2418                 return;
2419
2420         hci_dev_lock(hdev);
2421
2422         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2423         if (conn) {
2424                 if (conn->state == BT_CONFIG) {
2425                         hci_connect_cfm(conn, status);
2426                         hci_conn_drop(conn);
2427                 }
2428         }
2429
2430         hci_dev_unlock(hdev);
2431 }
2432
2433 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2434 {
2435         struct hci_cp_set_conn_encrypt *cp;
2436         struct hci_conn *conn;
2437
2438         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2439
2440         if (!status)
2441                 return;
2442
2443         cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2444         if (!cp)
2445                 return;
2446
2447         hci_dev_lock(hdev);
2448
2449         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2450         if (conn) {
2451                 if (conn->state == BT_CONFIG) {
2452                         hci_connect_cfm(conn, status);
2453                         hci_conn_drop(conn);
2454                 }
2455         }
2456
2457         hci_dev_unlock(hdev);
2458 }
2459
2460 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2461                                     struct hci_conn *conn)
2462 {
2463         if (conn->state != BT_CONFIG || !conn->out)
2464                 return 0;
2465
2466         if (conn->pending_sec_level == BT_SECURITY_SDP)
2467                 return 0;
2468
2469         /* Only request authentication for SSP connections or non-SSP
2470          * devices with sec_level MEDIUM or HIGH or if MITM protection
2471          * is requested.
2472          */
2473         if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2474             conn->pending_sec_level != BT_SECURITY_FIPS &&
2475             conn->pending_sec_level != BT_SECURITY_HIGH &&
2476             conn->pending_sec_level != BT_SECURITY_MEDIUM)
2477                 return 0;
2478
2479         return 1;
2480 }
2481
2482 static int hci_resolve_name(struct hci_dev *hdev,
2483                                    struct inquiry_entry *e)
2484 {
2485         struct hci_cp_remote_name_req cp;
2486
2487         memset(&cp, 0, sizeof(cp));
2488
2489         bacpy(&cp.bdaddr, &e->data.bdaddr);
2490         cp.pscan_rep_mode = e->data.pscan_rep_mode;
2491         cp.pscan_mode = e->data.pscan_mode;
2492         cp.clock_offset = e->data.clock_offset;
2493
2494         return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2495 }
2496
2497 static bool hci_resolve_next_name(struct hci_dev *hdev)
2498 {
2499         struct discovery_state *discov = &hdev->discovery;
2500         struct inquiry_entry *e;
2501
2502         if (list_empty(&discov->resolve))
2503                 return false;
2504
2505         /* We should stop if we already spent too much time resolving names. */
2506         if (time_after(jiffies, discov->name_resolve_timeout)) {
2507                 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2508                 return false;
2509         }
2510
2511         e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2512         if (!e)
2513                 return false;
2514
2515         if (hci_resolve_name(hdev, e) == 0) {
2516                 e->name_state = NAME_PENDING;
2517                 return true;
2518         }
2519
2520         return false;
2521 }
2522
2523 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2524                                    bdaddr_t *bdaddr, u8 *name, u8 name_len)
2525 {
2526         struct discovery_state *discov = &hdev->discovery;
2527         struct inquiry_entry *e;
2528
2529 #ifdef TIZEN_BT
2530         /* Update the mgmt connected state if necessary. Be careful with
2531          * conn objects that exist but are not (yet) connected however.
2532          * Only those in BT_CONFIG or BT_CONNECTED states can be
2533          * considered connected.
2534          */
2535         if (conn &&
2536             (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) {
2537                 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2538                         mgmt_device_connected(hdev, conn, 0, name, name_len);
2539                 else
2540                         mgmt_device_name_update(hdev, bdaddr, name, name_len);
2541         }
2542 #else
2543         if (conn &&
2544             (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2545             !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2546                 mgmt_device_connected(hdev, conn, name, name_len);
2547 #endif
2548
2549         if (discov->state == DISCOVERY_STOPPED)
2550                 return;
2551
2552         if (discov->state == DISCOVERY_STOPPING)
2553                 goto discov_complete;
2554
2555         if (discov->state != DISCOVERY_RESOLVING)
2556                 return;
2557
2558         e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2559         /* If the device was not found in a list of found devices names of which
2560          * are pending. there is no need to continue resolving a next name as it
2561          * will be done upon receiving another Remote Name Request Complete
2562          * Event */
2563         if (!e)
2564                 return;
2565
2566         list_del(&e->list);
2567
2568         e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2569         mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2570                          name, name_len);
2571
2572         if (hci_resolve_next_name(hdev))
2573                 return;
2574
2575 discov_complete:
2576         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2577 }
2578
2579 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2580 {
2581         struct hci_cp_remote_name_req *cp;
2582         struct hci_conn *conn;
2583
2584         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2585
2586         /* If successful wait for the name req complete event before
2587          * checking for the need to do authentication */
2588         if (!status)
2589                 return;
2590
2591         cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2592         if (!cp)
2593                 return;
2594
2595         hci_dev_lock(hdev);
2596
2597         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2598
2599         if (hci_dev_test_flag(hdev, HCI_MGMT))
2600                 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2601
2602         if (!conn)
2603                 goto unlock;
2604
2605         if (!hci_outgoing_auth_needed(hdev, conn))
2606                 goto unlock;
2607
2608         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2609                 struct hci_cp_auth_requested auth_cp;
2610
2611                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2612
2613                 auth_cp.handle = __cpu_to_le16(conn->handle);
2614                 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2615                              sizeof(auth_cp), &auth_cp);
2616         }
2617
2618 unlock:
2619         hci_dev_unlock(hdev);
2620 }
2621
2622 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2623 {
2624         struct hci_cp_read_remote_features *cp;
2625         struct hci_conn *conn;
2626
2627         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2628
2629         if (!status)
2630                 return;
2631
2632         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2633         if (!cp)
2634                 return;
2635
2636         hci_dev_lock(hdev);
2637
2638         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2639         if (conn) {
2640                 if (conn->state == BT_CONFIG) {
2641                         hci_connect_cfm(conn, status);
2642                         hci_conn_drop(conn);
2643                 }
2644         }
2645
2646         hci_dev_unlock(hdev);
2647 }
2648
2649 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2650 {
2651         struct hci_cp_read_remote_ext_features *cp;
2652         struct hci_conn *conn;
2653
2654         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2655
2656         if (!status)
2657                 return;
2658
2659         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2660         if (!cp)
2661                 return;
2662
2663         hci_dev_lock(hdev);
2664
2665         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2666         if (conn) {
2667                 if (conn->state == BT_CONFIG) {
2668                         hci_connect_cfm(conn, status);
2669                         hci_conn_drop(conn);
2670                 }
2671         }
2672
2673         hci_dev_unlock(hdev);
2674 }
2675
2676 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2677 {
2678         struct hci_cp_setup_sync_conn *cp;
2679         struct hci_conn *acl, *sco;
2680         __u16 handle;
2681
2682         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2683
2684         if (!status)
2685                 return;
2686
2687         cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2688         if (!cp)
2689                 return;
2690
2691         handle = __le16_to_cpu(cp->handle);
2692
2693         bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2694
2695         hci_dev_lock(hdev);
2696
2697         acl = hci_conn_hash_lookup_handle(hdev, handle);
2698         if (acl) {
2699                 sco = acl->link;
2700                 if (sco) {
2701                         sco->state = BT_CLOSED;
2702
2703                         hci_connect_cfm(sco, status);
2704                         hci_conn_del(sco);
2705                 }
2706         }
2707
2708         hci_dev_unlock(hdev);
2709 }
2710
2711 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2712 {
2713         struct hci_cp_enhanced_setup_sync_conn *cp;
2714         struct hci_conn *acl, *sco;
2715         __u16 handle;
2716
2717         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2718
2719         if (!status)
2720                 return;
2721
2722         cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2723         if (!cp)
2724                 return;
2725
2726         handle = __le16_to_cpu(cp->handle);
2727
2728         bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2729
2730         hci_dev_lock(hdev);
2731
2732         acl = hci_conn_hash_lookup_handle(hdev, handle);
2733         if (acl) {
2734                 sco = acl->link;
2735                 if (sco) {
2736                         sco->state = BT_CLOSED;
2737
2738                         hci_connect_cfm(sco, status);
2739                         hci_conn_del(sco);
2740                 }
2741         }
2742
2743         hci_dev_unlock(hdev);
2744 }
2745
2746 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2747 {
2748         struct hci_cp_sniff_mode *cp;
2749         struct hci_conn *conn;
2750
2751         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2752
2753         if (!status)
2754                 return;
2755
2756         cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2757         if (!cp)
2758                 return;
2759
2760         hci_dev_lock(hdev);
2761
2762         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2763         if (conn) {
2764                 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2765
2766                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2767                         hci_sco_setup(conn, status);
2768         }
2769
2770         hci_dev_unlock(hdev);
2771 }
2772
2773 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2774 {
2775         struct hci_cp_exit_sniff_mode *cp;
2776         struct hci_conn *conn;
2777
2778         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2779
2780         if (!status)
2781                 return;
2782
2783         cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2784         if (!cp)
2785                 return;
2786
2787         hci_dev_lock(hdev);
2788
2789         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2790         if (conn) {
2791                 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2792
2793                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2794                         hci_sco_setup(conn, status);
2795         }
2796
2797         hci_dev_unlock(hdev);
2798 }
2799
2800 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2801 {
2802         struct hci_cp_disconnect *cp;
2803         struct hci_conn_params *params;
2804         struct hci_conn *conn;
2805         bool mgmt_conn;
2806
2807         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2808
2809         /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2810          * otherwise cleanup the connection immediately.
2811          */
2812         if (!status && !hdev->suspended)
2813                 return;
2814
2815         cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2816         if (!cp)
2817                 return;
2818
2819         hci_dev_lock(hdev);
2820
2821         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2822         if (!conn)
2823                 goto unlock;
2824
2825         if (status) {
2826                 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2827                                        conn->dst_type, status);
2828
2829                 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2830                         hdev->cur_adv_instance = conn->adv_instance;
2831                         hci_enable_advertising(hdev);
2832                 }
2833
2834                 goto done;
2835         }
2836
2837         mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2838
2839         if (conn->type == ACL_LINK) {
2840                 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2841                         hci_remove_link_key(hdev, &conn->dst);
2842         }
2843
2844         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2845         if (params) {
2846                 switch (params->auto_connect) {
2847                 case HCI_AUTO_CONN_LINK_LOSS:
2848                         if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2849                                 break;
2850                         fallthrough;
2851
2852                 case HCI_AUTO_CONN_DIRECT:
2853                 case HCI_AUTO_CONN_ALWAYS:
2854                         list_del_init(&params->action);
2855                         list_add(&params->action, &hdev->pend_le_conns);
2856                         break;
2857
2858                 default:
2859                         break;
2860                 }
2861         }
2862
2863         mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2864                                  cp->reason, mgmt_conn);
2865
2866         hci_disconn_cfm(conn, cp->reason);
2867
2868 done:
2869         /* If the disconnection failed for any reason, the upper layer
2870          * does not retry to disconnect in current implementation.
2871          * Hence, we need to do some basic cleanup here and re-enable
2872          * advertising if necessary.
2873          */
2874         hci_conn_del(conn);
2875 unlock:
2876         hci_dev_unlock(hdev);
2877 }
2878
2879 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2880 {
2881         /* When using controller based address resolution, then the new
2882          * address types 0x02 and 0x03 are used. These types need to be
2883          * converted back into either public address or random address type
2884          */
2885         switch (type) {
2886         case ADDR_LE_DEV_PUBLIC_RESOLVED:
2887                 if (resolved)
2888                         *resolved = true;
2889                 return ADDR_LE_DEV_PUBLIC;
2890         case ADDR_LE_DEV_RANDOM_RESOLVED:
2891                 if (resolved)
2892                         *resolved = true;
2893                 return ADDR_LE_DEV_RANDOM;
2894         }
2895
2896         if (resolved)
2897                 *resolved = false;
2898         return type;
2899 }
2900
2901 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2902                               u8 peer_addr_type, u8 own_address_type,
2903                               u8 filter_policy)
2904 {
2905         struct hci_conn *conn;
2906
2907         conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2908                                        peer_addr_type);
2909         if (!conn)
2910                 return;
2911
2912         own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2913
2914         /* Store the initiator and responder address information which
2915          * is needed for SMP. These values will not change during the
2916          * lifetime of the connection.
2917          */
2918         conn->init_addr_type = own_address_type;
2919         if (own_address_type == ADDR_LE_DEV_RANDOM)
2920                 bacpy(&conn->init_addr, &hdev->random_addr);
2921         else
2922                 bacpy(&conn->init_addr, &hdev->bdaddr);
2923
2924         conn->resp_addr_type = peer_addr_type;
2925         bacpy(&conn->resp_addr, peer_addr);
2926 }
2927
2928 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2929 {
2930         struct hci_cp_le_create_conn *cp;
2931
2932         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2933
2934         /* All connection failure handling is taken care of by the
2935          * hci_conn_failed function which is triggered by the HCI
2936          * request completion callbacks used for connecting.
2937          */
2938         if (status)
2939                 return;
2940
2941         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2942         if (!cp)
2943                 return;
2944
2945         hci_dev_lock(hdev);
2946
2947         cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2948                           cp->own_address_type, cp->filter_policy);
2949
2950         hci_dev_unlock(hdev);
2951 }
2952
2953 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2954 {
2955         struct hci_cp_le_ext_create_conn *cp;
2956
2957         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2958
2959         /* All connection failure handling is taken care of by the
2960          * hci_conn_failed function which is triggered by the HCI
2961          * request completion callbacks used for connecting.
2962          */
2963         if (status)
2964                 return;
2965
2966         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2967         if (!cp)
2968                 return;
2969
2970         hci_dev_lock(hdev);
2971
2972         cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2973                           cp->own_addr_type, cp->filter_policy);
2974
2975         hci_dev_unlock(hdev);
2976 }
2977
2978 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2979 {
2980         struct hci_cp_le_read_remote_features *cp;
2981         struct hci_conn *conn;
2982
2983         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2984
2985         if (!status)
2986                 return;
2987
2988         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2989         if (!cp)
2990                 return;
2991
2992         hci_dev_lock(hdev);
2993
2994         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2995         if (conn) {
2996                 if (conn->state == BT_CONFIG) {
2997                         hci_connect_cfm(conn, status);
2998                         hci_conn_drop(conn);
2999                 }
3000         }
3001
3002         hci_dev_unlock(hdev);
3003 }
3004
3005 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
3006 {
3007         struct hci_cp_le_start_enc *cp;
3008         struct hci_conn *conn;
3009
3010         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3011
3012         if (!status)
3013                 return;
3014
3015         hci_dev_lock(hdev);
3016
3017         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
3018         if (!cp)
3019                 goto unlock;
3020
3021         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3022         if (!conn)
3023                 goto unlock;
3024
3025         if (conn->state != BT_CONNECTED)
3026                 goto unlock;
3027
3028         hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3029         hci_conn_drop(conn);
3030
3031 unlock:
3032         hci_dev_unlock(hdev);
3033 }
3034
3035 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
3036 {
3037         struct hci_cp_switch_role *cp;
3038         struct hci_conn *conn;
3039
3040         BT_DBG("%s status 0x%2.2x", hdev->name, status);
3041
3042         if (!status)
3043                 return;
3044
3045         cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
3046         if (!cp)
3047                 return;
3048
3049         hci_dev_lock(hdev);
3050
3051         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
3052         if (conn)
3053                 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3054
3055         hci_dev_unlock(hdev);
3056 }
3057
3058 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
3059                                      struct sk_buff *skb)
3060 {
3061         struct hci_ev_status *ev = data;
3062         struct discovery_state *discov = &hdev->discovery;
3063         struct inquiry_entry *e;
3064
3065         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3066
3067         hci_conn_check_pending(hdev);
3068
3069         if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
3070                 return;
3071
3072         smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
3073         wake_up_bit(&hdev->flags, HCI_INQUIRY);
3074
3075         if (!hci_dev_test_flag(hdev, HCI_MGMT))
3076                 return;
3077
3078         hci_dev_lock(hdev);
3079
3080         if (discov->state != DISCOVERY_FINDING)
3081                 goto unlock;
3082
3083         if (list_empty(&discov->resolve)) {
3084                 /* When BR/EDR inquiry is active and no LE scanning is in
3085                  * progress, then change discovery state to indicate completion.
3086                  *
3087                  * When running LE scanning and BR/EDR inquiry simultaneously
3088                  * and the LE scan already finished, then change the discovery
3089                  * state to indicate completion.
3090                  */
3091                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3092                     !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3093                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3094                 goto unlock;
3095         }
3096
3097         e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3098         if (e && hci_resolve_name(hdev, e) == 0) {
3099                 e->name_state = NAME_PENDING;
3100                 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3101                 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3102         } else {
3103                 /* When BR/EDR inquiry is active and no LE scanning is in
3104                  * progress, then change discovery state to indicate completion.
3105                  *
3106                  * When running LE scanning and BR/EDR inquiry simultaneously
3107                  * and the LE scan already finished, then change the discovery
3108                  * state to indicate completion.
3109                  */
3110                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3111                     !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3112                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3113         }
3114
3115 unlock:
3116         hci_dev_unlock(hdev);
3117 }
3118
3119 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3120                                    struct sk_buff *skb)
3121 {
3122         struct hci_ev_inquiry_result *ev = edata;
3123         struct inquiry_data data;
3124         int i;
3125
3126         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3127                              flex_array_size(ev, info, ev->num)))
3128                 return;
3129
3130         bt_dev_dbg(hdev, "num %d", ev->num);
3131
3132         if (!ev->num)
3133                 return;
3134
3135         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3136                 return;
3137
3138         hci_dev_lock(hdev);
3139
3140         for (i = 0; i < ev->num; i++) {
3141                 struct inquiry_info *info = &ev->info[i];
3142                 u32 flags;
3143
3144                 bacpy(&data.bdaddr, &info->bdaddr);
3145                 data.pscan_rep_mode     = info->pscan_rep_mode;
3146                 data.pscan_period_mode  = info->pscan_period_mode;
3147                 data.pscan_mode         = info->pscan_mode;
3148                 memcpy(data.dev_class, info->dev_class, 3);
3149                 data.clock_offset       = info->clock_offset;
3150                 data.rssi               = HCI_RSSI_INVALID;
3151                 data.ssp_mode           = 0x00;
3152
3153                 flags = hci_inquiry_cache_update(hdev, &data, false);
3154
3155                 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3156                                   info->dev_class, HCI_RSSI_INVALID,
3157                                   flags, NULL, 0, NULL, 0, 0);
3158         }
3159
3160         hci_dev_unlock(hdev);
3161 }
3162
3163 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3164                                   struct sk_buff *skb)
3165 {
3166         struct hci_ev_conn_complete *ev = data;
3167         struct hci_conn *conn;
3168         u8 status = ev->status;
3169
3170         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3171
3172         hci_dev_lock(hdev);
3173
3174         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3175         if (!conn) {
3176                 /* In case of error status and there is no connection pending
3177                  * just unlock as there is nothing to cleanup.
3178                  */
3179                 if (ev->status)
3180                         goto unlock;
3181
3182                 /* Connection may not exist if auto-connected. Check the bredr
3183                  * allowlist to see if this device is allowed to auto connect.
3184                  * If link is an ACL type, create a connection class
3185                  * automatically.
3186                  *
3187                  * Auto-connect will only occur if the event filter is
3188                  * programmed with a given address. Right now, event filter is
3189                  * only used during suspend.
3190                  */
3191                 if (ev->link_type == ACL_LINK &&
3192                     hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3193                                                       &ev->bdaddr,
3194                                                       BDADDR_BREDR)) {
3195                         conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3196                                             HCI_ROLE_SLAVE);
3197                         if (!conn) {
3198                                 bt_dev_err(hdev, "no memory for new conn");
3199                                 goto unlock;
3200                         }
3201                 } else {
3202                         if (ev->link_type != SCO_LINK)
3203                                 goto unlock;
3204
3205                         conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3206                                                        &ev->bdaddr);
3207                         if (!conn)
3208                                 goto unlock;
3209
3210                         conn->type = SCO_LINK;
3211                 }
3212         }
3213
3214         /* The HCI_Connection_Complete event is only sent once per connection.
3215          * Processing it more than once per connection can corrupt kernel memory.
3216          *
3217          * As the connection handle is set here for the first time, it indicates
3218          * whether the connection is already set up.
3219          */
3220         if (conn->handle != HCI_CONN_HANDLE_UNSET) {
3221                 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3222                 goto unlock;
3223         }
3224
3225         if (!status) {
3226                 conn->handle = __le16_to_cpu(ev->handle);
3227                 if (conn->handle > HCI_CONN_HANDLE_MAX) {
3228                         bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
3229                                    conn->handle, HCI_CONN_HANDLE_MAX);
3230                         status = HCI_ERROR_INVALID_PARAMETERS;
3231                         goto done;
3232                 }
3233
3234                 if (conn->type == ACL_LINK) {
3235                         conn->state = BT_CONFIG;
3236                         hci_conn_hold(conn);
3237
3238                         if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3239                             !hci_find_link_key(hdev, &ev->bdaddr))
3240                                 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3241                         else
3242                                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3243                 } else
3244                         conn->state = BT_CONNECTED;
3245
3246                 hci_debugfs_create_conn(conn);
3247                 hci_conn_add_sysfs(conn);
3248
3249                 if (test_bit(HCI_AUTH, &hdev->flags))
3250                         set_bit(HCI_CONN_AUTH, &conn->flags);
3251
3252                 if (test_bit(HCI_ENCRYPT, &hdev->flags))
3253                         set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3254
3255                 /* Get remote features */
3256                 if (conn->type == ACL_LINK) {
3257                         struct hci_cp_read_remote_features cp;
3258                         cp.handle = ev->handle;
3259                         hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3260                                      sizeof(cp), &cp);
3261
3262                         hci_update_scan(hdev);
3263                 }
3264
3265                 /* Set packet type for incoming connection */
3266                 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3267                         struct hci_cp_change_conn_ptype cp;
3268                         cp.handle = ev->handle;
3269                         cp.pkt_type = cpu_to_le16(conn->pkt_type);
3270                         hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3271                                      &cp);
3272                 }
3273         }
3274
3275         if (conn->type == ACL_LINK)
3276                 hci_sco_setup(conn, ev->status);
3277
3278 done:
3279         if (status) {
3280                 hci_conn_failed(conn, status);
3281         } else if (ev->link_type == SCO_LINK) {
3282                 switch (conn->setting & SCO_AIRMODE_MASK) {
3283                 case SCO_AIRMODE_CVSD:
3284                         if (hdev->notify)
3285                                 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3286                         break;
3287                 }
3288
3289                 hci_connect_cfm(conn, status);
3290         }
3291
3292 unlock:
3293         hci_dev_unlock(hdev);
3294
3295         hci_conn_check_pending(hdev);
3296 }
3297
3298 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3299 {
3300         struct hci_cp_reject_conn_req cp;
3301
3302         bacpy(&cp.bdaddr, bdaddr);
3303         cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3304         hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3305 }
3306
3307 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3308                                  struct sk_buff *skb)
3309 {
3310         struct hci_ev_conn_request *ev = data;
3311         int mask = hdev->link_mode;
3312         struct inquiry_entry *ie;
3313         struct hci_conn *conn;
3314         __u8 flags = 0;
3315
3316         bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3317
3318         mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3319                                       &flags);
3320
3321         if (!(mask & HCI_LM_ACCEPT)) {
3322                 hci_reject_conn(hdev, &ev->bdaddr);
3323                 return;
3324         }
3325
3326         hci_dev_lock(hdev);
3327
3328         if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3329                                    BDADDR_BREDR)) {
3330                 hci_reject_conn(hdev, &ev->bdaddr);
3331                 goto unlock;
3332         }
3333
3334         /* Require HCI_CONNECTABLE or an accept list entry to accept the
3335          * connection. These features are only touched through mgmt so
3336          * only do the checks if HCI_MGMT is set.
3337          */
3338         if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3339             !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3340             !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3341                                                BDADDR_BREDR)) {
3342                 hci_reject_conn(hdev, &ev->bdaddr);
3343                 goto unlock;
3344         }
3345
3346         /* Connection accepted */
3347
3348         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3349         if (ie)
3350                 memcpy(ie->data.dev_class, ev->dev_class, 3);
3351
3352         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3353                         &ev->bdaddr);
3354         if (!conn) {
3355                 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3356                                     HCI_ROLE_SLAVE);
3357                 if (!conn) {
3358                         bt_dev_err(hdev, "no memory for new connection");
3359                         goto unlock;
3360                 }
3361         }
3362
3363         memcpy(conn->dev_class, ev->dev_class, 3);
3364
3365         hci_dev_unlock(hdev);
3366
3367         if (ev->link_type == ACL_LINK ||
3368             (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3369                 struct hci_cp_accept_conn_req cp;
3370                 conn->state = BT_CONNECT;
3371
3372                 bacpy(&cp.bdaddr, &ev->bdaddr);
3373
3374                 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3375                         cp.role = 0x00; /* Become central */
3376                 else
3377                         cp.role = 0x01; /* Remain peripheral */
3378
3379                 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3380         } else if (!(flags & HCI_PROTO_DEFER)) {
3381                 struct hci_cp_accept_sync_conn_req cp;
3382                 conn->state = BT_CONNECT;
3383
3384                 bacpy(&cp.bdaddr, &ev->bdaddr);
3385                 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3386
3387                 cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
3388                 cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
3389                 cp.max_latency    = cpu_to_le16(0xffff);
3390                 cp.content_format = cpu_to_le16(hdev->voice_setting);
3391                 cp.retrans_effort = 0xff;
3392
3393                 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3394                              &cp);
3395         } else {
3396                 conn->state = BT_CONNECT2;
3397                 hci_connect_cfm(conn, 0);
3398         }
3399
3400         return;
3401 unlock:
3402         hci_dev_unlock(hdev);
3403 }
3404
3405 static u8 hci_to_mgmt_reason(u8 err)
3406 {
3407         switch (err) {
3408         case HCI_ERROR_CONNECTION_TIMEOUT:
3409                 return MGMT_DEV_DISCONN_TIMEOUT;
3410         case HCI_ERROR_REMOTE_USER_TERM:
3411         case HCI_ERROR_REMOTE_LOW_RESOURCES:
3412         case HCI_ERROR_REMOTE_POWER_OFF:
3413                 return MGMT_DEV_DISCONN_REMOTE;
3414         case HCI_ERROR_LOCAL_HOST_TERM:
3415                 return MGMT_DEV_DISCONN_LOCAL_HOST;
3416         default:
3417                 return MGMT_DEV_DISCONN_UNKNOWN;
3418         }
3419 }
3420
3421 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3422                                      struct sk_buff *skb)
3423 {
3424         struct hci_ev_disconn_complete *ev = data;
3425         u8 reason;
3426         struct hci_conn_params *params;
3427         struct hci_conn *conn;
3428         bool mgmt_connected;
3429
3430         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3431
3432         hci_dev_lock(hdev);
3433
3434         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3435         if (!conn)
3436                 goto unlock;
3437
3438         if (ev->status) {
3439                 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3440                                        conn->dst_type, ev->status);
3441                 goto unlock;
3442         }
3443
3444         conn->state = BT_CLOSED;
3445
3446         mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3447
3448         if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3449                 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3450         else
3451                 reason = hci_to_mgmt_reason(ev->reason);
3452
3453         mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3454                                 reason, mgmt_connected);
3455
3456         if (conn->type == ACL_LINK) {
3457                 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3458                         hci_remove_link_key(hdev, &conn->dst);
3459
3460                 hci_update_scan(hdev);
3461         }
3462
3463         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3464         if (params) {
3465                 switch (params->auto_connect) {
3466                 case HCI_AUTO_CONN_LINK_LOSS:
3467                         if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3468                                 break;
3469                         fallthrough;
3470
3471                 case HCI_AUTO_CONN_DIRECT:
3472                 case HCI_AUTO_CONN_ALWAYS:
3473                         list_del_init(&params->action);
3474                         list_add(&params->action, &hdev->pend_le_conns);
3475                         hci_update_passive_scan(hdev);
3476                         break;
3477
3478                 default:
3479                         break;
3480                 }
3481         }
3482
3483         hci_disconn_cfm(conn, ev->reason);
3484
3485         /* Re-enable advertising if necessary, since it might
3486          * have been disabled by the connection. From the
3487          * HCI_LE_Set_Advertise_Enable command description in
3488          * the core specification (v4.0):
3489          * "The Controller shall continue advertising until the Host
3490          * issues an LE_Set_Advertise_Enable command with
3491          * Advertising_Enable set to 0x00 (Advertising is disabled)
3492          * or until a connection is created or until the Advertising
3493          * is timed out due to Directed Advertising."
3494          */
3495         if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3496                 hdev->cur_adv_instance = conn->adv_instance;
3497                 hci_enable_advertising(hdev);
3498         }
3499
3500         hci_conn_del(conn);
3501
3502 unlock:
3503         hci_dev_unlock(hdev);
3504 }
3505
3506 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3507                                   struct sk_buff *skb)
3508 {
3509         struct hci_ev_auth_complete *ev = data;
3510         struct hci_conn *conn;
3511
3512         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3513
3514         hci_dev_lock(hdev);
3515
3516         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3517         if (!conn)
3518                 goto unlock;
3519
3520         if (!ev->status) {
3521                 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3522
3523                 if (!hci_conn_ssp_enabled(conn) &&
3524                     test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
3525                         bt_dev_info(hdev, "re-auth of legacy device is not possible.");
3526                 } else {
3527                         set_bit(HCI_CONN_AUTH, &conn->flags);
3528                         conn->sec_level = conn->pending_sec_level;
3529                 }
3530         } else {
3531                 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3532                         set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3533
3534                 mgmt_auth_failed(conn, ev->status);
3535         }
3536
3537         clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3538         clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3539
3540         if (conn->state == BT_CONFIG) {
3541                 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3542                         struct hci_cp_set_conn_encrypt cp;
3543                         cp.handle  = ev->handle;
3544                         cp.encrypt = 0x01;
3545                         hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3546                                      &cp);
3547                 } else {
3548                         conn->state = BT_CONNECTED;
3549                         hci_connect_cfm(conn, ev->status);
3550                         hci_conn_drop(conn);
3551                 }
3552         } else {
3553                 hci_auth_cfm(conn, ev->status);
3554
3555                 hci_conn_hold(conn);
3556                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3557                 hci_conn_drop(conn);
3558         }
3559
3560         if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3561                 if (!ev->status) {
3562                         struct hci_cp_set_conn_encrypt cp;
3563                         cp.handle  = ev->handle;
3564                         cp.encrypt = 0x01;
3565                         hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3566                                      &cp);
3567                 } else {
3568                         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3569                         hci_encrypt_cfm(conn, ev->status);
3570                 }
3571         }
3572
3573 unlock:
3574         hci_dev_unlock(hdev);
3575 }
3576
3577 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3578                                 struct sk_buff *skb)
3579 {
3580         struct hci_ev_remote_name *ev = data;
3581         struct hci_conn *conn;
3582
3583         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3584
3585         hci_conn_check_pending(hdev);
3586
3587         hci_dev_lock(hdev);
3588
3589         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3590
3591         if (!hci_dev_test_flag(hdev, HCI_MGMT))
3592                 goto check_auth;
3593
3594         if (ev->status == 0)
3595                 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3596                                        strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3597         else
3598                 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3599
3600 check_auth:
3601         if (!conn)
3602                 goto unlock;
3603
3604         if (!hci_outgoing_auth_needed(hdev, conn))
3605                 goto unlock;
3606
3607         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3608                 struct hci_cp_auth_requested cp;
3609
3610                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3611
3612                 cp.handle = __cpu_to_le16(conn->handle);
3613                 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3614         }
3615
3616 unlock:
3617         hci_dev_unlock(hdev);
3618 }
3619
3620 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3621                                    struct sk_buff *skb)
3622 {
3623         struct hci_ev_encrypt_change *ev = data;
3624         struct hci_conn *conn;
3625
3626         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3627
3628         hci_dev_lock(hdev);
3629
3630         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3631         if (!conn)
3632                 goto unlock;
3633
3634         if (!ev->status) {
3635                 if (ev->encrypt) {
3636                         /* Encryption implies authentication */
3637                         set_bit(HCI_CONN_AUTH, &conn->flags);
3638                         set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3639                         conn->sec_level = conn->pending_sec_level;
3640
3641                         /* P-256 authentication key implies FIPS */
3642                         if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3643                                 set_bit(HCI_CONN_FIPS, &conn->flags);
3644
3645                         if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3646                             conn->type == LE_LINK)
3647                                 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3648                 } else {
3649                         clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3650                         clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3651                 }
3652         }
3653
3654         /* We should disregard the current RPA and generate a new one
3655          * whenever the encryption procedure fails.
3656          */
3657         if (ev->status && conn->type == LE_LINK) {
3658                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3659                 hci_adv_instances_set_rpa_expired(hdev, true);
3660         }
3661
3662         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3663
3664         /* Check link security requirements are met */
3665         if (!hci_conn_check_link_mode(conn))
3666                 ev->status = HCI_ERROR_AUTH_FAILURE;
3667
3668         if (ev->status && conn->state == BT_CONNECTED) {
3669                 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3670                         set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3671
3672                 /* Notify upper layers so they can cleanup before
3673                  * disconnecting.
3674                  */
3675                 hci_encrypt_cfm(conn, ev->status);
3676                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3677                 hci_conn_drop(conn);
3678                 goto unlock;
3679         }
3680
3681         /* Try reading the encryption key size for encrypted ACL links */
3682         if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3683                 struct hci_cp_read_enc_key_size cp;
3684
3685                 /* Only send HCI_Read_Encryption_Key_Size if the
3686                  * controller really supports it. If it doesn't, assume
3687                  * the default size (16).
3688                  */
3689                 if (!(hdev->commands[20] & 0x10)) {
3690                         conn->enc_key_size = HCI_LINK_KEY_SIZE;
3691                         goto notify;
3692                 }
3693
3694                 cp.handle = cpu_to_le16(conn->handle);
3695                 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3696                                  sizeof(cp), &cp)) {
3697                         bt_dev_err(hdev, "sending read key size failed");
3698                         conn->enc_key_size = HCI_LINK_KEY_SIZE;
3699                         goto notify;
3700                 }
3701
3702                 goto unlock;
3703         }
3704
3705         /* Set the default Authenticated Payload Timeout after
3706          * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3707          * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3708          * sent when the link is active and Encryption is enabled, the conn
3709          * type can be either LE or ACL and controller must support LMP Ping.
3710          * Ensure for AES-CCM encryption as well.
3711          */
3712         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3713             test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3714             ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3715              (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3716                 struct hci_cp_write_auth_payload_to cp;
3717
3718                 cp.handle = cpu_to_le16(conn->handle);
3719                 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3720                 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3721                              sizeof(cp), &cp);
3722         }
3723
3724 notify:
3725         hci_encrypt_cfm(conn, ev->status);
3726
3727 unlock:
3728         hci_dev_unlock(hdev);
3729 }
3730
3731 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3732                                              struct sk_buff *skb)
3733 {
3734         struct hci_ev_change_link_key_complete *ev = data;
3735         struct hci_conn *conn;
3736
3737         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3738
3739         hci_dev_lock(hdev);
3740
3741         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3742         if (conn) {
3743                 if (!ev->status)
3744                         set_bit(HCI_CONN_SECURE, &conn->flags);
3745
3746                 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3747
3748                 hci_key_change_cfm(conn, ev->status);
3749         }
3750
3751         hci_dev_unlock(hdev);
3752 }
3753
3754 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3755                                     struct sk_buff *skb)
3756 {
3757         struct hci_ev_remote_features *ev = data;
3758         struct hci_conn *conn;
3759
3760         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3761
3762         hci_dev_lock(hdev);
3763
3764         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3765         if (!conn)
3766                 goto unlock;
3767
3768         if (!ev->status)
3769                 memcpy(conn->features[0], ev->features, 8);
3770
3771         if (conn->state != BT_CONFIG)
3772                 goto unlock;
3773
3774         if (!ev->status && lmp_ext_feat_capable(hdev) &&
3775             lmp_ext_feat_capable(conn)) {
3776                 struct hci_cp_read_remote_ext_features cp;
3777                 cp.handle = ev->handle;
3778                 cp.page = 0x01;
3779                 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3780                              sizeof(cp), &cp);
3781                 goto unlock;
3782         }
3783
3784         if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3785                 struct hci_cp_remote_name_req cp;
3786                 memset(&cp, 0, sizeof(cp));
3787                 bacpy(&cp.bdaddr, &conn->dst);
3788                 cp.pscan_rep_mode = 0x02;
3789                 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3790         } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3791                 mgmt_device_connected(hdev, conn, NULL, 0);
3792
3793         if (!hci_outgoing_auth_needed(hdev, conn)) {
3794                 conn->state = BT_CONNECTED;
3795                 hci_connect_cfm(conn, ev->status);
3796                 hci_conn_drop(conn);
3797         }
3798
3799 unlock:
3800         hci_dev_unlock(hdev);
3801 }
3802
3803 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3804 {
3805         cancel_delayed_work(&hdev->cmd_timer);
3806
3807         rcu_read_lock();
3808         if (!test_bit(HCI_RESET, &hdev->flags)) {
3809                 if (ncmd) {
3810                         cancel_delayed_work(&hdev->ncmd_timer);
3811                         atomic_set(&hdev->cmd_cnt, 1);
3812                 } else {
3813                         if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3814                                 queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3815                                                    HCI_NCMD_TIMEOUT);
3816                 }
3817         }
3818         rcu_read_unlock();
3819 }
3820
3821 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3822                                         struct sk_buff *skb)
3823 {
3824         struct hci_rp_le_read_buffer_size_v2 *rp = data;
3825
3826         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3827
3828         if (rp->status)
3829                 return rp->status;
3830
3831         hdev->le_mtu   = __le16_to_cpu(rp->acl_mtu);
3832         hdev->le_pkts  = rp->acl_max_pkt;
3833         hdev->iso_mtu  = __le16_to_cpu(rp->iso_mtu);
3834         hdev->iso_pkts = rp->iso_max_pkt;
3835
3836         hdev->le_cnt  = hdev->le_pkts;
3837         hdev->iso_cnt = hdev->iso_pkts;
3838
3839         BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3840                hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3841
3842         return rp->status;
3843 }
3844
3845 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3846                                    struct sk_buff *skb)
3847 {
3848         struct hci_rp_le_set_cig_params *rp = data;
3849         struct hci_conn *conn;
3850         int i = 0;
3851
3852         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3853
3854         hci_dev_lock(hdev);
3855
3856         if (rp->status) {
3857                 while ((conn = hci_conn_hash_lookup_cig(hdev, rp->cig_id))) {
3858                         conn->state = BT_CLOSED;
3859                         hci_connect_cfm(conn, rp->status);
3860                         hci_conn_del(conn);
3861                 }
3862                 goto unlock;
3863         }
3864
3865         rcu_read_lock();
3866
3867         list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
3868                 if (conn->type != ISO_LINK || conn->iso_qos.cig != rp->cig_id ||
3869                     conn->state == BT_CONNECTED)
3870                         continue;
3871
3872                 conn->handle = __le16_to_cpu(rp->handle[i++]);
3873
3874                 bt_dev_dbg(hdev, "%p handle 0x%4.4x link %p", conn,
3875                            conn->handle, conn->link);
3876
3877                 /* Create CIS if LE is already connected */
3878                 if (conn->link && conn->link->state == BT_CONNECTED) {
3879                         rcu_read_unlock();
3880                         hci_le_create_cis(conn->link);
3881                         rcu_read_lock();
3882                 }
3883
3884                 if (i == rp->num_handles)
3885                         break;
3886         }
3887
3888         rcu_read_unlock();
3889
3890 unlock:
3891         hci_dev_unlock(hdev);
3892
3893         return rp->status;
3894 }
3895
3896 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3897                                    struct sk_buff *skb)
3898 {
3899         struct hci_rp_le_setup_iso_path *rp = data;
3900         struct hci_cp_le_setup_iso_path *cp;
3901         struct hci_conn *conn;
3902
3903         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3904
3905         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3906         if (!cp)
3907                 return rp->status;
3908
3909         hci_dev_lock(hdev);
3910
3911         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3912         if (!conn)
3913                 goto unlock;
3914
3915         if (rp->status) {
3916                 hci_connect_cfm(conn, rp->status);
3917                 hci_conn_del(conn);
3918                 goto unlock;
3919         }
3920
3921         switch (cp->direction) {
3922         /* Input (Host to Controller) */
3923         case 0x00:
3924                 /* Only confirm connection if output only */
3925                 if (conn->iso_qos.out.sdu && !conn->iso_qos.in.sdu)
3926                         hci_connect_cfm(conn, rp->status);
3927                 break;
3928         /* Output (Controller to Host) */
3929         case 0x01:
3930                 /* Confirm connection since conn->iso_qos is always configured
3931                  * last.
3932                  */
3933                 hci_connect_cfm(conn, rp->status);
3934                 break;
3935         }
3936
3937 unlock:
3938         hci_dev_unlock(hdev);
3939         return rp->status;
3940 }
3941
3942 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3943 {
3944         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3945 }
3946
3947 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
3948                                    struct sk_buff *skb)
3949 {
3950         struct hci_ev_status *rp = data;
3951         struct hci_cp_le_set_per_adv_params *cp;
3952
3953         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3954
3955         if (rp->status)
3956                 return rp->status;
3957
3958         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
3959         if (!cp)
3960                 return rp->status;
3961
3962         /* TODO: set the conn state */
3963         return rp->status;
3964 }
3965
3966 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
3967                                        struct sk_buff *skb)
3968 {
3969         struct hci_ev_status *rp = data;
3970         __u8 *sent;
3971
3972         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3973
3974         if (rp->status)
3975                 return rp->status;
3976
3977         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
3978         if (!sent)
3979                 return rp->status;
3980
3981         hci_dev_lock(hdev);
3982
3983         if (*sent)
3984                 hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
3985         else
3986                 hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
3987
3988         hci_dev_unlock(hdev);
3989
3990         return rp->status;
3991 }
3992
3993 #define HCI_CC_VL(_op, _func, _min, _max) \
3994 { \
3995         .op = _op, \
3996         .func = _func, \
3997         .min_len = _min, \
3998         .max_len = _max, \
3999 }
4000
4001 #define HCI_CC(_op, _func, _len) \
4002         HCI_CC_VL(_op, _func, _len, _len)
4003
4004 #define HCI_CC_STATUS(_op, _func) \
4005         HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4006
4007 static const struct hci_cc {
4008         u16  op;
4009         u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4010         u16  min_len;
4011         u16  max_len;
4012 } hci_cc_table[] = {
4013         HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4014         HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4015         HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4016         HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
4017                       hci_cc_remote_name_req_cancel),
4018         HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4019                sizeof(struct hci_rp_role_discovery)),
4020         HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4021                sizeof(struct hci_rp_read_link_policy)),
4022         HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4023                sizeof(struct hci_rp_write_link_policy)),
4024         HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4025                sizeof(struct hci_rp_read_def_link_policy)),
4026         HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4027                       hci_cc_write_def_link_policy),
4028         HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4029         HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4030                sizeof(struct hci_rp_read_stored_link_key)),
4031         HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4032                sizeof(struct hci_rp_delete_stored_link_key)),
4033         HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4034         HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4035                sizeof(struct hci_rp_read_local_name)),
4036         HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4037         HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4038         HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4039         HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4040         HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4041                sizeof(struct hci_rp_read_class_of_dev)),
4042         HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4043         HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4044                sizeof(struct hci_rp_read_voice_setting)),
4045         HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4046         HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4047                sizeof(struct hci_rp_read_num_supported_iac)),
4048         HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4049         HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4050         HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4051                sizeof(struct hci_rp_read_auth_payload_to)),
4052         HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4053                sizeof(struct hci_rp_write_auth_payload_to)),
4054         HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4055                sizeof(struct hci_rp_read_local_version)),
4056         HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4057                sizeof(struct hci_rp_read_local_commands)),
4058         HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4059                sizeof(struct hci_rp_read_local_features)),
4060         HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4061                sizeof(struct hci_rp_read_local_ext_features)),
4062         HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4063                sizeof(struct hci_rp_read_buffer_size)),
4064         HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4065                sizeof(struct hci_rp_read_bd_addr)),
4066         HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4067                sizeof(struct hci_rp_read_local_pairing_opts)),
4068         HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4069                sizeof(struct hci_rp_read_page_scan_activity)),
4070         HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4071                       hci_cc_write_page_scan_activity),
4072         HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4073                sizeof(struct hci_rp_read_page_scan_type)),
4074         HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4075         HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
4076                sizeof(struct hci_rp_read_data_block_size)),
4077         HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
4078                sizeof(struct hci_rp_read_flow_control_mode)),
4079         HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
4080                sizeof(struct hci_rp_read_local_amp_info)),
4081         HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4082                sizeof(struct hci_rp_read_clock)),
4083         HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4084                sizeof(struct hci_rp_read_enc_key_size)),
4085         HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4086                sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4087         HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4088                hci_cc_read_def_err_data_reporting,
4089                sizeof(struct hci_rp_read_def_err_data_reporting)),
4090         HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4091                       hci_cc_write_def_err_data_reporting),
4092         HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4093                sizeof(struct hci_rp_pin_code_reply)),
4094         HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4095                sizeof(struct hci_rp_pin_code_neg_reply)),
4096         HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4097                sizeof(struct hci_rp_read_local_oob_data)),
4098         HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4099                sizeof(struct hci_rp_read_local_oob_ext_data)),
4100         HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4101                sizeof(struct hci_rp_le_read_buffer_size)),
4102         HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4103                sizeof(struct hci_rp_le_read_local_features)),
4104         HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4105                sizeof(struct hci_rp_le_read_adv_tx_power)),
4106         HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4107                sizeof(struct hci_rp_user_confirm_reply)),
4108         HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4109                sizeof(struct hci_rp_user_confirm_reply)),
4110         HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4111                sizeof(struct hci_rp_user_confirm_reply)),
4112         HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4113                sizeof(struct hci_rp_user_confirm_reply)),
4114         HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4115         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4116         HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4117         HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4118         HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4119                hci_cc_le_read_accept_list_size,
4120                sizeof(struct hci_rp_le_read_accept_list_size)),
4121         HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4122         HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4123                       hci_cc_le_add_to_accept_list),
4124         HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4125                       hci_cc_le_del_from_accept_list),
4126         HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4127                sizeof(struct hci_rp_le_read_supported_states)),
4128         HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4129                sizeof(struct hci_rp_le_read_def_data_len)),
4130         HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4131                       hci_cc_le_write_def_data_len),
4132         HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4133                       hci_cc_le_add_to_resolv_list),
4134         HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4135                       hci_cc_le_del_from_resolv_list),
4136         HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4137                       hci_cc_le_clear_resolv_list),
4138         HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4139                sizeof(struct hci_rp_le_read_resolv_list_size)),
4140         HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4141                       hci_cc_le_set_addr_resolution_enable),
4142         HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4143                sizeof(struct hci_rp_le_read_max_data_len)),
4144         HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4145                       hci_cc_write_le_host_supported),
4146         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4147         HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4148                sizeof(struct hci_rp_read_rssi)),
4149         HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4150                sizeof(struct hci_rp_read_tx_power)),
4151         HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4152         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4153                       hci_cc_le_set_ext_scan_param),
4154         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4155                       hci_cc_le_set_ext_scan_enable),
4156         HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4157         HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4158                hci_cc_le_read_num_adv_sets,
4159                sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4160         HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4161                sizeof(struct hci_rp_le_set_ext_adv_params)),
4162         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4163                       hci_cc_le_set_ext_adv_enable),
4164         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4165                       hci_cc_le_set_adv_set_random_addr),
4166         HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4167         HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4168         HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4169         HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4170                       hci_cc_le_set_per_adv_enable),
4171         HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4172                sizeof(struct hci_rp_le_read_transmit_power)),
4173 #ifdef TIZEN_BT
4174         HCI_CC(HCI_OP_ENABLE_RSSI, hci_cc_enable_rssi,
4175                sizeof(struct hci_cc_rsp_enable_rssi)),
4176         HCI_CC(HCI_OP_GET_RAW_RSSI, hci_cc_get_raw_rssi,
4177                sizeof(struct hci_cc_rp_get_raw_rssi)),
4178 #endif
4179         HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4180         HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4181                sizeof(struct hci_rp_le_read_buffer_size_v2)),
4182         HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4183                   sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4184         HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4185                sizeof(struct hci_rp_le_setup_iso_path)),
4186 };
4187
4188 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4189                       struct sk_buff *skb)
4190 {
4191         void *data;
4192
4193         if (skb->len < cc->min_len) {
4194                 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4195                            cc->op, skb->len, cc->min_len);
4196                 return HCI_ERROR_UNSPECIFIED;
4197         }
4198
4199         /* Just warn if the length is over max_len size it still be possible to
4200          * partially parse the cc so leave to callback to decide if that is
4201          * acceptable.
4202          */
4203         if (skb->len > cc->max_len)
4204                 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4205                             cc->op, skb->len, cc->max_len);
4206
4207         data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4208         if (!data)
4209                 return HCI_ERROR_UNSPECIFIED;
4210
4211         return cc->func(hdev, data, skb);
4212 }
4213
4214 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4215                                  struct sk_buff *skb, u16 *opcode, u8 *status,
4216                                  hci_req_complete_t *req_complete,
4217                                  hci_req_complete_skb_t *req_complete_skb)
4218 {
4219         struct hci_ev_cmd_complete *ev = data;
4220         int i;
4221
4222         *opcode = __le16_to_cpu(ev->opcode);
4223
4224         bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4225
4226         for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4227                 if (hci_cc_table[i].op == *opcode) {
4228                         *status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4229                         break;
4230                 }
4231         }
4232
4233         if (i == ARRAY_SIZE(hci_cc_table)) {
4234                 /* Unknown opcode, assume byte 0 contains the status, so
4235                  * that e.g. __hci_cmd_sync() properly returns errors
4236                  * for vendor specific commands send by HCI drivers.
4237                  * If a vendor doesn't actually follow this convention we may
4238                  * need to introduce a vendor CC table in order to properly set
4239                  * the status.
4240                  */
4241                 *status = skb->data[0];
4242         }
4243
4244         handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4245
4246         hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4247                              req_complete_skb);
4248
4249         if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4250                 bt_dev_err(hdev,
4251                            "unexpected event for opcode 0x%4.4x", *opcode);
4252                 return;
4253         }
4254
4255         if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4256                 queue_work(hdev->workqueue, &hdev->cmd_work);
4257 }
4258
4259 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4260 {
4261         struct hci_cp_le_create_cis *cp;
4262         int i;
4263
4264         bt_dev_dbg(hdev, "status 0x%2.2x", status);
4265
4266         if (!status)
4267                 return;
4268
4269         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4270         if (!cp)
4271                 return;
4272
4273         hci_dev_lock(hdev);
4274
4275         /* Remove connection if command failed */
4276         for (i = 0; cp->num_cis; cp->num_cis--, i++) {
4277                 struct hci_conn *conn;
4278                 u16 handle;
4279
4280                 handle = __le16_to_cpu(cp->cis[i].cis_handle);
4281
4282                 conn = hci_conn_hash_lookup_handle(hdev, handle);
4283                 if (conn) {
4284                         conn->state = BT_CLOSED;
4285                         hci_connect_cfm(conn, status);
4286                         hci_conn_del(conn);
4287                 }
4288         }
4289
4290         hci_dev_unlock(hdev);
4291 }
4292
4293 #define HCI_CS(_op, _func) \
4294 { \
4295         .op = _op, \
4296         .func = _func, \
4297 }
4298
4299 static const struct hci_cs {
4300         u16  op;
4301         void (*func)(struct hci_dev *hdev, __u8 status);
4302 } hci_cs_table[] = {
4303         HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4304         HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4305         HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4306         HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4307         HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4308         HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4309         HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4310         HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4311         HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4312                hci_cs_read_remote_ext_features),
4313         HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4314         HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4315                hci_cs_enhanced_setup_sync_conn),
4316         HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4317         HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4318         HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4319         HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4320         HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4321         HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4322         HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4323         HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4324         HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4325 };
4326
4327 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4328                                struct sk_buff *skb, u16 *opcode, u8 *status,
4329                                hci_req_complete_t *req_complete,
4330                                hci_req_complete_skb_t *req_complete_skb)
4331 {
4332         struct hci_ev_cmd_status *ev = data;
4333         int i;
4334
4335         *opcode = __le16_to_cpu(ev->opcode);
4336         *status = ev->status;
4337
4338         bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4339
4340         for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4341                 if (hci_cs_table[i].op == *opcode) {
4342                         hci_cs_table[i].func(hdev, ev->status);
4343                         break;
4344                 }
4345         }
4346
4347         handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4348
4349         /* Indicate request completion if the command failed. Also, if
4350          * we're not waiting for a special event and we get a success
4351          * command status we should try to flag the request as completed
4352          * (since for this kind of commands there will not be a command
4353          * complete event).
4354          */
4355         if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
4356                 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4357                                      req_complete_skb);
4358                 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4359                         bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4360                                    *opcode);
4361                         return;
4362                 }
4363         }
4364
4365         if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4366                 queue_work(hdev->workqueue, &hdev->cmd_work);
4367 }
4368
4369 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4370                                    struct sk_buff *skb)
4371 {
4372         struct hci_ev_hardware_error *ev = data;
4373
4374         bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4375
4376         hdev->hw_error_code = ev->code;
4377
4378         queue_work(hdev->req_workqueue, &hdev->error_reset);
4379 }
4380
4381 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4382                                 struct sk_buff *skb)
4383 {
4384         struct hci_ev_role_change *ev = data;
4385         struct hci_conn *conn;
4386
4387         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4388
4389         hci_dev_lock(hdev);
4390
4391         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4392         if (conn) {
4393                 if (!ev->status)
4394                         conn->role = ev->role;
4395
4396                 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4397
4398                 hci_role_switch_cfm(conn, ev->status, ev->role);
4399         }
4400
4401         hci_dev_unlock(hdev);
4402 }
4403
4404 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4405                                   struct sk_buff *skb)
4406 {
4407         struct hci_ev_num_comp_pkts *ev = data;
4408         int i;
4409
4410         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4411                              flex_array_size(ev, handles, ev->num)))
4412                 return;
4413
4414         if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4415                 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4416                 return;
4417         }
4418
4419         bt_dev_dbg(hdev, "num %d", ev->num);
4420
4421         for (i = 0; i < ev->num; i++) {
4422                 struct hci_comp_pkts_info *info = &ev->handles[i];
4423                 struct hci_conn *conn;
4424                 __u16  handle, count;
4425
4426                 handle = __le16_to_cpu(info->handle);
4427                 count  = __le16_to_cpu(info->count);
4428
4429                 conn = hci_conn_hash_lookup_handle(hdev, handle);
4430                 if (!conn)
4431                         continue;
4432
4433                 conn->sent -= count;
4434
4435                 switch (conn->type) {
4436                 case ACL_LINK:
4437                         hdev->acl_cnt += count;
4438                         if (hdev->acl_cnt > hdev->acl_pkts)
4439                                 hdev->acl_cnt = hdev->acl_pkts;
4440                         break;
4441
4442                 case LE_LINK:
4443                         if (hdev->le_pkts) {
4444                                 hdev->le_cnt += count;
4445                                 if (hdev->le_cnt > hdev->le_pkts)
4446                                         hdev->le_cnt = hdev->le_pkts;
4447                         } else {
4448                                 hdev->acl_cnt += count;
4449                                 if (hdev->acl_cnt > hdev->acl_pkts)
4450                                         hdev->acl_cnt = hdev->acl_pkts;
4451                         }
4452                         break;
4453
4454                 case SCO_LINK:
4455                         hdev->sco_cnt += count;
4456                         if (hdev->sco_cnt > hdev->sco_pkts)
4457                                 hdev->sco_cnt = hdev->sco_pkts;
4458                         break;
4459
4460                 case ISO_LINK:
4461                         if (hdev->iso_pkts) {
4462                                 hdev->iso_cnt += count;
4463                                 if (hdev->iso_cnt > hdev->iso_pkts)
4464                                         hdev->iso_cnt = hdev->iso_pkts;
4465                         } else if (hdev->le_pkts) {
4466                                 hdev->le_cnt += count;
4467                                 if (hdev->le_cnt > hdev->le_pkts)
4468                                         hdev->le_cnt = hdev->le_pkts;
4469                         } else {
4470                                 hdev->acl_cnt += count;
4471                                 if (hdev->acl_cnt > hdev->acl_pkts)
4472                                         hdev->acl_cnt = hdev->acl_pkts;
4473                         }
4474                         break;
4475
4476                 default:
4477                         bt_dev_err(hdev, "unknown type %d conn %p",
4478                                    conn->type, conn);
4479                         break;
4480                 }
4481         }
4482
4483         queue_work(hdev->workqueue, &hdev->tx_work);
4484 }
4485
4486 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4487                                                  __u16 handle)
4488 {
4489         struct hci_chan *chan;
4490
4491         switch (hdev->dev_type) {
4492         case HCI_PRIMARY:
4493                 return hci_conn_hash_lookup_handle(hdev, handle);
4494         case HCI_AMP:
4495                 chan = hci_chan_lookup_handle(hdev, handle);
4496                 if (chan)
4497                         return chan->conn;
4498                 break;
4499         default:
4500                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4501                 break;
4502         }
4503
4504         return NULL;
4505 }
4506
4507 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
4508                                     struct sk_buff *skb)
4509 {
4510         struct hci_ev_num_comp_blocks *ev = data;
4511         int i;
4512
4513         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
4514                              flex_array_size(ev, handles, ev->num_hndl)))
4515                 return;
4516
4517         if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4518                 bt_dev_err(hdev, "wrong event for mode %d",
4519                            hdev->flow_ctl_mode);
4520                 return;
4521         }
4522
4523         bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
4524                    ev->num_hndl);
4525
4526         for (i = 0; i < ev->num_hndl; i++) {
4527                 struct hci_comp_blocks_info *info = &ev->handles[i];
4528                 struct hci_conn *conn = NULL;
4529                 __u16  handle, block_count;
4530
4531                 handle = __le16_to_cpu(info->handle);
4532                 block_count = __le16_to_cpu(info->blocks);
4533
4534                 conn = __hci_conn_lookup_handle(hdev, handle);
4535                 if (!conn)
4536                         continue;
4537
4538                 conn->sent -= block_count;
4539
4540                 switch (conn->type) {
4541                 case ACL_LINK:
4542                 case AMP_LINK:
4543                         hdev->block_cnt += block_count;
4544                         if (hdev->block_cnt > hdev->num_blocks)
4545                                 hdev->block_cnt = hdev->num_blocks;
4546                         break;
4547
4548                 default:
4549                         bt_dev_err(hdev, "unknown type %d conn %p",
4550                                    conn->type, conn);
4551                         break;
4552                 }
4553         }
4554
4555         queue_work(hdev->workqueue, &hdev->tx_work);
4556 }
4557
4558 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4559                                 struct sk_buff *skb)
4560 {
4561         struct hci_ev_mode_change *ev = data;
4562         struct hci_conn *conn;
4563
4564         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4565
4566         hci_dev_lock(hdev);
4567
4568         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4569         if (conn) {
4570                 conn->mode = ev->mode;
4571
4572                 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4573                                         &conn->flags)) {
4574                         if (conn->mode == HCI_CM_ACTIVE)
4575                                 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4576                         else
4577                                 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4578                 }
4579
4580                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4581                         hci_sco_setup(conn, ev->status);
4582         }
4583
4584         hci_dev_unlock(hdev);
4585 }
4586
4587 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4588                                      struct sk_buff *skb)
4589 {
4590         struct hci_ev_pin_code_req *ev = data;
4591         struct hci_conn *conn;
4592
4593         bt_dev_dbg(hdev, "");
4594
4595         hci_dev_lock(hdev);
4596
4597         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4598         if (!conn)
4599                 goto unlock;
4600
4601         if (conn->state == BT_CONNECTED) {
4602                 hci_conn_hold(conn);
4603                 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4604                 hci_conn_drop(conn);
4605         }
4606
4607         if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4608             !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4609                 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4610                              sizeof(ev->bdaddr), &ev->bdaddr);
4611         } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4612                 u8 secure;
4613
4614                 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4615                         secure = 1;
4616                 else
4617                         secure = 0;
4618
4619                 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4620         }
4621
4622 unlock:
4623         hci_dev_unlock(hdev);
4624 }
4625
4626 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4627 {
4628         if (key_type == HCI_LK_CHANGED_COMBINATION)
4629                 return;
4630
4631         conn->pin_length = pin_len;
4632         conn->key_type = key_type;
4633
4634         switch (key_type) {
4635         case HCI_LK_LOCAL_UNIT:
4636         case HCI_LK_REMOTE_UNIT:
4637         case HCI_LK_DEBUG_COMBINATION:
4638                 return;
4639         case HCI_LK_COMBINATION:
4640                 if (pin_len == 16)
4641                         conn->pending_sec_level = BT_SECURITY_HIGH;
4642                 else
4643                         conn->pending_sec_level = BT_SECURITY_MEDIUM;
4644                 break;
4645         case HCI_LK_UNAUTH_COMBINATION_P192:
4646         case HCI_LK_UNAUTH_COMBINATION_P256:
4647                 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4648                 break;
4649         case HCI_LK_AUTH_COMBINATION_P192:
4650                 conn->pending_sec_level = BT_SECURITY_HIGH;
4651                 break;
4652         case HCI_LK_AUTH_COMBINATION_P256:
4653                 conn->pending_sec_level = BT_SECURITY_FIPS;
4654                 break;
4655         }
4656 }
4657
4658 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4659                                      struct sk_buff *skb)
4660 {
4661         struct hci_ev_link_key_req *ev = data;
4662         struct hci_cp_link_key_reply cp;
4663         struct hci_conn *conn;
4664         struct link_key *key;
4665
4666         bt_dev_dbg(hdev, "");
4667
4668         if (!hci_dev_test_flag(hdev, HCI_MGMT))
4669                 return;
4670
4671         hci_dev_lock(hdev);
4672
4673         key = hci_find_link_key(hdev, &ev->bdaddr);
4674         if (!key) {
4675                 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4676                 goto not_found;
4677         }
4678
4679         bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4680
4681         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4682         if (conn) {
4683                 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4684
4685                 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4686                      key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4687                     conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4688                         bt_dev_dbg(hdev, "ignoring unauthenticated key");
4689                         goto not_found;
4690                 }
4691
4692                 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4693                     (conn->pending_sec_level == BT_SECURITY_HIGH ||
4694                      conn->pending_sec_level == BT_SECURITY_FIPS)) {
4695                         bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4696                         goto not_found;
4697                 }
4698
4699                 conn_set_key(conn, key->type, key->pin_len);
4700         }
4701
4702         bacpy(&cp.bdaddr, &ev->bdaddr);
4703         memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4704
4705         hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4706
4707         hci_dev_unlock(hdev);
4708
4709         return;
4710
4711 not_found:
4712         hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4713         hci_dev_unlock(hdev);
4714 }
4715
4716 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4717                                     struct sk_buff *skb)
4718 {
4719         struct hci_ev_link_key_notify *ev = data;
4720         struct hci_conn *conn;
4721         struct link_key *key;
4722         bool persistent;
4723         u8 pin_len = 0;
4724
4725         bt_dev_dbg(hdev, "");
4726
4727         hci_dev_lock(hdev);
4728
4729         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4730         if (!conn)
4731                 goto unlock;
4732
4733         hci_conn_hold(conn);
4734         conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4735         hci_conn_drop(conn);
4736
4737         set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4738         conn_set_key(conn, ev->key_type, conn->pin_length);
4739
4740         if (!hci_dev_test_flag(hdev, HCI_MGMT))
4741                 goto unlock;
4742
4743         key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4744                                 ev->key_type, pin_len, &persistent);
4745         if (!key)
4746                 goto unlock;
4747
4748         /* Update connection information since adding the key will have
4749          * fixed up the type in the case of changed combination keys.
4750          */
4751         if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4752                 conn_set_key(conn, key->type, key->pin_len);
4753
4754         mgmt_new_link_key(hdev, key, persistent);
4755
4756         /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4757          * is set. If it's not set simply remove the key from the kernel
4758          * list (we've still notified user space about it but with
4759          * store_hint being 0).
4760          */
4761         if (key->type == HCI_LK_DEBUG_COMBINATION &&
4762             !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4763                 list_del_rcu(&key->list);
4764                 kfree_rcu(key, rcu);
4765                 goto unlock;
4766         }
4767
4768         if (persistent)
4769                 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4770         else
4771                 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4772
4773 unlock:
4774         hci_dev_unlock(hdev);
4775 }
4776
4777 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4778                                  struct sk_buff *skb)
4779 {
4780         struct hci_ev_clock_offset *ev = data;
4781         struct hci_conn *conn;
4782
4783         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4784
4785         hci_dev_lock(hdev);
4786
4787         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4788         if (conn && !ev->status) {
4789                 struct inquiry_entry *ie;
4790
4791                 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4792                 if (ie) {
4793                         ie->data.clock_offset = ev->clock_offset;
4794                         ie->timestamp = jiffies;
4795                 }
4796         }
4797
4798         hci_dev_unlock(hdev);
4799 }
4800
4801 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4802                                     struct sk_buff *skb)
4803 {
4804         struct hci_ev_pkt_type_change *ev = data;
4805         struct hci_conn *conn;
4806
4807         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4808
4809         hci_dev_lock(hdev);
4810
4811         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4812         if (conn && !ev->status)
4813                 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4814
4815         hci_dev_unlock(hdev);
4816 }
4817
4818 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4819                                    struct sk_buff *skb)
4820 {
4821         struct hci_ev_pscan_rep_mode *ev = data;
4822         struct inquiry_entry *ie;
4823
4824         bt_dev_dbg(hdev, "");
4825
4826         hci_dev_lock(hdev);
4827
4828         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4829         if (ie) {
4830                 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4831                 ie->timestamp = jiffies;
4832         }
4833
4834         hci_dev_unlock(hdev);
4835 }
4836
4837 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4838                                              struct sk_buff *skb)
4839 {
4840         struct hci_ev_inquiry_result_rssi *ev = edata;
4841         struct inquiry_data data;
4842         int i;
4843
4844         bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4845
4846         if (!ev->num)
4847                 return;
4848
4849         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4850                 return;
4851
4852         hci_dev_lock(hdev);
4853
4854         if (skb->len == array_size(ev->num,
4855                                    sizeof(struct inquiry_info_rssi_pscan))) {
4856                 struct inquiry_info_rssi_pscan *info;
4857
4858                 for (i = 0; i < ev->num; i++) {
4859                         u32 flags;
4860
4861                         info = hci_ev_skb_pull(hdev, skb,
4862                                                HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4863                                                sizeof(*info));
4864                         if (!info) {
4865                                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4866                                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4867                                 goto unlock;
4868                         }
4869
4870                         bacpy(&data.bdaddr, &info->bdaddr);
4871                         data.pscan_rep_mode     = info->pscan_rep_mode;
4872                         data.pscan_period_mode  = info->pscan_period_mode;
4873                         data.pscan_mode         = info->pscan_mode;
4874                         memcpy(data.dev_class, info->dev_class, 3);
4875                         data.clock_offset       = info->clock_offset;
4876                         data.rssi               = info->rssi;
4877                         data.ssp_mode           = 0x00;
4878
4879                         flags = hci_inquiry_cache_update(hdev, &data, false);
4880
4881                         mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4882                                           info->dev_class, info->rssi,
4883                                           flags, NULL, 0, NULL, 0, 0);
4884                 }
4885         } else if (skb->len == array_size(ev->num,
4886                                           sizeof(struct inquiry_info_rssi))) {
4887                 struct inquiry_info_rssi *info;
4888
4889                 for (i = 0; i < ev->num; i++) {
4890                         u32 flags;
4891
4892                         info = hci_ev_skb_pull(hdev, skb,
4893                                                HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4894                                                sizeof(*info));
4895                         if (!info) {
4896                                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4897                                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4898                                 goto unlock;
4899                         }
4900
4901                         bacpy(&data.bdaddr, &info->bdaddr);
4902                         data.pscan_rep_mode     = info->pscan_rep_mode;
4903                         data.pscan_period_mode  = info->pscan_period_mode;
4904                         data.pscan_mode         = 0x00;
4905                         memcpy(data.dev_class, info->dev_class, 3);
4906                         data.clock_offset       = info->clock_offset;
4907                         data.rssi               = info->rssi;
4908                         data.ssp_mode           = 0x00;
4909
4910                         flags = hci_inquiry_cache_update(hdev, &data, false);
4911
4912                         mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4913                                           info->dev_class, info->rssi,
4914                                           flags, NULL, 0, NULL, 0, 0);
4915                 }
4916         } else {
4917                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4918                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4919         }
4920 unlock:
4921         hci_dev_unlock(hdev);
4922 }
4923
4924 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4925                                         struct sk_buff *skb)
4926 {
4927         struct hci_ev_remote_ext_features *ev = data;
4928         struct hci_conn *conn;
4929
4930         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4931
4932         hci_dev_lock(hdev);
4933
4934         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4935         if (!conn)
4936                 goto unlock;
4937
4938         if (ev->page < HCI_MAX_PAGES)
4939                 memcpy(conn->features[ev->page], ev->features, 8);
4940
4941         if (!ev->status && ev->page == 0x01) {
4942                 struct inquiry_entry *ie;
4943
4944                 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4945                 if (ie)
4946                         ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4947
4948                 if (ev->features[0] & LMP_HOST_SSP) {
4949                         set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4950                 } else {
4951                         /* It is mandatory by the Bluetooth specification that
4952                          * Extended Inquiry Results are only used when Secure
4953                          * Simple Pairing is enabled, but some devices violate
4954                          * this.
4955                          *
4956                          * To make these devices work, the internal SSP
4957                          * enabled flag needs to be cleared if the remote host
4958                          * features do not indicate SSP support */
4959                         clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4960                 }
4961
4962                 if (ev->features[0] & LMP_HOST_SC)
4963                         set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4964         }
4965
4966         if (conn->state != BT_CONFIG)
4967                 goto unlock;
4968
4969         if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4970                 struct hci_cp_remote_name_req cp;
4971                 memset(&cp, 0, sizeof(cp));
4972                 bacpy(&cp.bdaddr, &conn->dst);
4973                 cp.pscan_rep_mode = 0x02;
4974                 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4975         } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4976                 mgmt_device_connected(hdev, conn, NULL, 0);
4977
4978         if (!hci_outgoing_auth_needed(hdev, conn)) {
4979                 conn->state = BT_CONNECTED;
4980                 hci_connect_cfm(conn, ev->status);
4981                 hci_conn_drop(conn);
4982         }
4983
4984 unlock:
4985         hci_dev_unlock(hdev);
4986 }
4987
4988 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
4989                                        struct sk_buff *skb)
4990 {
4991         struct hci_ev_sync_conn_complete *ev = data;
4992         struct hci_conn *conn;
4993         u8 status = ev->status;
4994
4995         switch (ev->link_type) {
4996         case SCO_LINK:
4997         case ESCO_LINK:
4998                 break;
4999         default:
5000                 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
5001                  * for HCI_Synchronous_Connection_Complete is limited to
5002                  * either SCO or eSCO
5003                  */
5004                 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
5005                 return;
5006         }
5007
5008         bt_dev_dbg(hdev, "status 0x%2.2x", status);
5009
5010         hci_dev_lock(hdev);
5011
5012         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
5013         if (!conn) {
5014                 if (ev->link_type == ESCO_LINK)
5015                         goto unlock;
5016
5017                 /* When the link type in the event indicates SCO connection
5018                  * and lookup of the connection object fails, then check
5019                  * if an eSCO connection object exists.
5020                  *
5021                  * The core limits the synchronous connections to either
5022                  * SCO or eSCO. The eSCO connection is preferred and tried
5023                  * to be setup first and until successfully established,
5024                  * the link type will be hinted as eSCO.
5025                  */
5026                 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
5027                 if (!conn)
5028                         goto unlock;
5029         }
5030
5031         /* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
5032          * Processing it more than once per connection can corrupt kernel memory.
5033          *
5034          * As the connection handle is set here for the first time, it indicates
5035          * whether the connection is already set up.
5036          */
5037         if (conn->handle != HCI_CONN_HANDLE_UNSET) {
5038                 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
5039                 goto unlock;
5040         }
5041
5042         switch (status) {
5043         case 0x00:
5044                 conn->handle = __le16_to_cpu(ev->handle);
5045                 if (conn->handle > HCI_CONN_HANDLE_MAX) {
5046                         bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
5047                                    conn->handle, HCI_CONN_HANDLE_MAX);
5048                         status = HCI_ERROR_INVALID_PARAMETERS;
5049                         conn->state = BT_CLOSED;
5050                         break;
5051                 }
5052
5053                 conn->state  = BT_CONNECTED;
5054                 conn->type   = ev->link_type;
5055
5056                 hci_debugfs_create_conn(conn);
5057                 hci_conn_add_sysfs(conn);
5058                 break;
5059
5060         case 0x10:      /* Connection Accept Timeout */
5061         case 0x0d:      /* Connection Rejected due to Limited Resources */
5062         case 0x11:      /* Unsupported Feature or Parameter Value */
5063         case 0x1c:      /* SCO interval rejected */
5064         case 0x1a:      /* Unsupported Remote Feature */
5065         case 0x1e:      /* Invalid LMP Parameters */
5066         case 0x1f:      /* Unspecified error */
5067         case 0x20:      /* Unsupported LMP Parameter value */
5068                 if (conn->out) {
5069                         conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5070                                         (hdev->esco_type & EDR_ESCO_MASK);
5071                         if (hci_setup_sync(conn, conn->link->handle))
5072                                 goto unlock;
5073                 }
5074                 fallthrough;
5075
5076         default:
5077                 conn->state = BT_CLOSED;
5078                 break;
5079         }
5080
5081         bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5082         /* Notify only in case of SCO over HCI transport data path which
5083          * is zero and non-zero value shall be non-HCI transport data path
5084          */
5085         if (conn->codec.data_path == 0 && hdev->notify) {
5086                 switch (ev->air_mode) {
5087                 case 0x02:
5088                         hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5089                         break;
5090                 case 0x03:
5091                         hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5092                         break;
5093                 }
5094         }
5095
5096         hci_connect_cfm(conn, status);
5097         if (status)
5098                 hci_conn_del(conn);
5099
5100 unlock:
5101         hci_dev_unlock(hdev);
5102 }
5103
5104 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5105 {
5106         size_t parsed = 0;
5107
5108         while (parsed < eir_len) {
5109                 u8 field_len = eir[0];
5110
5111                 if (field_len == 0)
5112                         return parsed;
5113
5114                 parsed += field_len + 1;
5115                 eir += field_len + 1;
5116         }
5117
5118         return eir_len;
5119 }
5120
5121 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5122                                             struct sk_buff *skb)
5123 {
5124         struct hci_ev_ext_inquiry_result *ev = edata;
5125         struct inquiry_data data;
5126         size_t eir_len;
5127         int i;
5128
5129         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5130                              flex_array_size(ev, info, ev->num)))
5131                 return;
5132
5133         bt_dev_dbg(hdev, "num %d", ev->num);
5134
5135         if (!ev->num)
5136                 return;
5137
5138         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5139                 return;
5140
5141         hci_dev_lock(hdev);
5142
5143         for (i = 0; i < ev->num; i++) {
5144                 struct extended_inquiry_info *info = &ev->info[i];
5145                 u32 flags;
5146                 bool name_known;
5147
5148                 bacpy(&data.bdaddr, &info->bdaddr);
5149                 data.pscan_rep_mode     = info->pscan_rep_mode;
5150                 data.pscan_period_mode  = info->pscan_period_mode;
5151                 data.pscan_mode         = 0x00;
5152                 memcpy(data.dev_class, info->dev_class, 3);
5153                 data.clock_offset       = info->clock_offset;
5154                 data.rssi               = info->rssi;
5155                 data.ssp_mode           = 0x01;
5156
5157                 if (hci_dev_test_flag(hdev, HCI_MGMT))
5158                         name_known = eir_get_data(info->data,
5159                                                   sizeof(info->data),
5160                                                   EIR_NAME_COMPLETE, NULL);
5161                 else
5162                         name_known = true;
5163
5164                 flags = hci_inquiry_cache_update(hdev, &data, name_known);
5165
5166                 eir_len = eir_get_length(info->data, sizeof(info->data));
5167
5168                 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5169                                   info->dev_class, info->rssi,
5170                                   flags, info->data, eir_len, NULL, 0, 0);
5171         }
5172
5173         hci_dev_unlock(hdev);
5174 }
5175
5176 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5177                                          struct sk_buff *skb)
5178 {
5179         struct hci_ev_key_refresh_complete *ev = data;
5180         struct hci_conn *conn;
5181
5182         bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5183                    __le16_to_cpu(ev->handle));
5184
5185         hci_dev_lock(hdev);
5186
5187         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5188         if (!conn)
5189                 goto unlock;
5190
5191         /* For BR/EDR the necessary steps are taken through the
5192          * auth_complete event.
5193          */
5194         if (conn->type != LE_LINK)
5195                 goto unlock;
5196
5197         if (!ev->status)
5198                 conn->sec_level = conn->pending_sec_level;
5199
5200         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5201
5202         if (ev->status && conn->state == BT_CONNECTED) {
5203                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5204                 hci_conn_drop(conn);
5205                 goto unlock;
5206         }
5207
5208         if (conn->state == BT_CONFIG) {
5209                 if (!ev->status)
5210                         conn->state = BT_CONNECTED;
5211
5212                 hci_connect_cfm(conn, ev->status);
5213                 hci_conn_drop(conn);
5214         } else {
5215                 hci_auth_cfm(conn, ev->status);
5216
5217                 hci_conn_hold(conn);
5218                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5219                 hci_conn_drop(conn);
5220         }
5221
5222 unlock:
5223         hci_dev_unlock(hdev);
5224 }
5225
5226 static u8 hci_get_auth_req(struct hci_conn *conn)
5227 {
5228         /* If remote requests no-bonding follow that lead */
5229         if (conn->remote_auth == HCI_AT_NO_BONDING ||
5230             conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5231                 return conn->remote_auth | (conn->auth_type & 0x01);
5232
5233         /* If both remote and local have enough IO capabilities, require
5234          * MITM protection
5235          */
5236         if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5237             conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5238                 return conn->remote_auth | 0x01;
5239
5240         /* No MITM protection possible so ignore remote requirement */
5241         return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5242 }
5243
5244 static u8 bredr_oob_data_present(struct hci_conn *conn)
5245 {
5246         struct hci_dev *hdev = conn->hdev;
5247         struct oob_data *data;
5248
5249         data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5250         if (!data)
5251                 return 0x00;
5252
5253         if (bredr_sc_enabled(hdev)) {
5254                 /* When Secure Connections is enabled, then just
5255                  * return the present value stored with the OOB
5256                  * data. The stored value contains the right present
5257                  * information. However it can only be trusted when
5258                  * not in Secure Connection Only mode.
5259                  */
5260                 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5261                         return data->present;
5262
5263                 /* When Secure Connections Only mode is enabled, then
5264                  * the P-256 values are required. If they are not
5265                  * available, then do not declare that OOB data is
5266                  * present.
5267                  */
5268                 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
5269                     !memcmp(data->hash256, ZERO_KEY, 16))
5270                         return 0x00;
5271
5272                 return 0x02;
5273         }
5274
5275         /* When Secure Connections is not enabled or actually
5276          * not supported by the hardware, then check that if
5277          * P-192 data values are present.
5278          */
5279         if (!memcmp(data->rand192, ZERO_KEY, 16) ||
5280             !memcmp(data->hash192, ZERO_KEY, 16))
5281                 return 0x00;
5282
5283         return 0x01;
5284 }
5285
5286 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5287                                     struct sk_buff *skb)
5288 {
5289         struct hci_ev_io_capa_request *ev = data;
5290         struct hci_conn *conn;
5291
5292         bt_dev_dbg(hdev, "");
5293
5294         hci_dev_lock(hdev);
5295
5296         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5297         if (!conn)
5298                 goto unlock;
5299
5300         hci_conn_hold(conn);
5301
5302         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5303                 goto unlock;
5304
5305         /* Allow pairing if we're pairable, the initiators of the
5306          * pairing or if the remote is not requesting bonding.
5307          */
5308         if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5309             test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5310             (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5311                 struct hci_cp_io_capability_reply cp;
5312
5313                 bacpy(&cp.bdaddr, &ev->bdaddr);
5314                 /* Change the IO capability from KeyboardDisplay
5315                  * to DisplayYesNo as it is not supported by BT spec. */
5316                 cp.capability = (conn->io_capability == 0x04) ?
5317                                 HCI_IO_DISPLAY_YESNO : conn->io_capability;
5318
5319                 /* If we are initiators, there is no remote information yet */
5320                 if (conn->remote_auth == 0xff) {
5321                         /* Request MITM protection if our IO caps allow it
5322                          * except for the no-bonding case.
5323                          */
5324                         if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5325                             conn->auth_type != HCI_AT_NO_BONDING)
5326                                 conn->auth_type |= 0x01;
5327                 } else {
5328                         conn->auth_type = hci_get_auth_req(conn);
5329                 }
5330
5331                 /* If we're not bondable, force one of the non-bondable
5332                  * authentication requirement values.
5333                  */
5334                 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5335                         conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5336
5337                 cp.authentication = conn->auth_type;
5338                 cp.oob_data = bredr_oob_data_present(conn);
5339
5340                 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5341                              sizeof(cp), &cp);
5342         } else {
5343                 struct hci_cp_io_capability_neg_reply cp;
5344
5345                 bacpy(&cp.bdaddr, &ev->bdaddr);
5346                 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5347
5348                 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5349                              sizeof(cp), &cp);
5350         }
5351
5352 unlock:
5353         hci_dev_unlock(hdev);
5354 }
5355
5356 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5357                                   struct sk_buff *skb)
5358 {
5359         struct hci_ev_io_capa_reply *ev = data;
5360         struct hci_conn *conn;
5361
5362         bt_dev_dbg(hdev, "");
5363
5364         hci_dev_lock(hdev);
5365
5366         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5367         if (!conn)
5368                 goto unlock;
5369
5370         conn->remote_cap = ev->capability;
5371         conn->remote_auth = ev->authentication;
5372
5373 unlock:
5374         hci_dev_unlock(hdev);
5375 }
5376
5377 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5378                                          struct sk_buff *skb)
5379 {
5380         struct hci_ev_user_confirm_req *ev = data;
5381         int loc_mitm, rem_mitm, confirm_hint = 0;
5382         struct hci_conn *conn;
5383
5384         bt_dev_dbg(hdev, "");
5385
5386         hci_dev_lock(hdev);
5387
5388         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5389                 goto unlock;
5390
5391         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5392         if (!conn)
5393                 goto unlock;
5394
5395         loc_mitm = (conn->auth_type & 0x01);
5396         rem_mitm = (conn->remote_auth & 0x01);
5397
5398         /* If we require MITM but the remote device can't provide that
5399          * (it has NoInputNoOutput) then reject the confirmation
5400          * request. We check the security level here since it doesn't
5401          * necessarily match conn->auth_type.
5402          */
5403         if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5404             conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5405                 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5406                 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5407                              sizeof(ev->bdaddr), &ev->bdaddr);
5408                 goto unlock;
5409         }
5410
5411         /* If no side requires MITM protection; auto-accept */
5412         if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5413             (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5414
5415                 /* If we're not the initiators request authorization to
5416                  * proceed from user space (mgmt_user_confirm with
5417                  * confirm_hint set to 1). The exception is if neither
5418                  * side had MITM or if the local IO capability is
5419                  * NoInputNoOutput, in which case we do auto-accept
5420                  */
5421                 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5422                     conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5423                     (loc_mitm || rem_mitm)) {
5424                         bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5425                         confirm_hint = 1;
5426                         goto confirm;
5427                 }
5428
5429                 /* If there already exists link key in local host, leave the
5430                  * decision to user space since the remote device could be
5431                  * legitimate or malicious.
5432                  */
5433                 if (hci_find_link_key(hdev, &ev->bdaddr)) {
5434                         bt_dev_dbg(hdev, "Local host already has link key");
5435                         confirm_hint = 1;
5436                         goto confirm;
5437                 }
5438
5439                 BT_DBG("Auto-accept of user confirmation with %ums delay",
5440                        hdev->auto_accept_delay);
5441
5442                 if (hdev->auto_accept_delay > 0) {
5443                         int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5444                         queue_delayed_work(conn->hdev->workqueue,
5445                                            &conn->auto_accept_work, delay);
5446                         goto unlock;
5447                 }
5448
5449                 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5450                              sizeof(ev->bdaddr), &ev->bdaddr);
5451                 goto unlock;
5452         }
5453
5454 confirm:
5455         mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5456                                   le32_to_cpu(ev->passkey), confirm_hint);
5457
5458 unlock:
5459         hci_dev_unlock(hdev);
5460 }
5461
5462 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5463                                          struct sk_buff *skb)
5464 {
5465         struct hci_ev_user_passkey_req *ev = data;
5466
5467         bt_dev_dbg(hdev, "");
5468
5469         if (hci_dev_test_flag(hdev, HCI_MGMT))
5470                 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5471 }
5472
5473 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5474                                         struct sk_buff *skb)
5475 {
5476         struct hci_ev_user_passkey_notify *ev = data;
5477         struct hci_conn *conn;
5478
5479         bt_dev_dbg(hdev, "");
5480
5481         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5482         if (!conn)
5483                 return;
5484
5485         conn->passkey_notify = __le32_to_cpu(ev->passkey);
5486         conn->passkey_entered = 0;
5487
5488         if (hci_dev_test_flag(hdev, HCI_MGMT))
5489                 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5490                                          conn->dst_type, conn->passkey_notify,
5491                                          conn->passkey_entered);
5492 }
5493
5494 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5495                                     struct sk_buff *skb)
5496 {
5497         struct hci_ev_keypress_notify *ev = data;
5498         struct hci_conn *conn;
5499
5500         bt_dev_dbg(hdev, "");
5501
5502         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5503         if (!conn)
5504                 return;
5505
5506         switch (ev->type) {
5507         case HCI_KEYPRESS_STARTED:
5508                 conn->passkey_entered = 0;
5509                 return;
5510
5511         case HCI_KEYPRESS_ENTERED:
5512                 conn->passkey_entered++;
5513                 break;
5514
5515         case HCI_KEYPRESS_ERASED:
5516                 conn->passkey_entered--;
5517                 break;
5518
5519         case HCI_KEYPRESS_CLEARED:
5520                 conn->passkey_entered = 0;
5521                 break;
5522
5523         case HCI_KEYPRESS_COMPLETED:
5524                 return;
5525         }
5526
5527         if (hci_dev_test_flag(hdev, HCI_MGMT))
5528                 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5529                                          conn->dst_type, conn->passkey_notify,
5530                                          conn->passkey_entered);
5531 }
5532
5533 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5534                                          struct sk_buff *skb)
5535 {
5536         struct hci_ev_simple_pair_complete *ev = data;
5537         struct hci_conn *conn;
5538
5539         bt_dev_dbg(hdev, "");
5540
5541         hci_dev_lock(hdev);
5542
5543         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5544         if (!conn)
5545                 goto unlock;
5546
5547         /* Reset the authentication requirement to unknown */
5548         conn->remote_auth = 0xff;
5549
5550         /* To avoid duplicate auth_failed events to user space we check
5551          * the HCI_CONN_AUTH_PEND flag which will be set if we
5552          * initiated the authentication. A traditional auth_complete
5553          * event gets always produced as initiator and is also mapped to
5554          * the mgmt_auth_failed event */
5555         if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5556                 mgmt_auth_failed(conn, ev->status);
5557
5558         hci_conn_drop(conn);
5559
5560 unlock:
5561         hci_dev_unlock(hdev);
5562 }
5563
5564 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5565                                          struct sk_buff *skb)
5566 {
5567         struct hci_ev_remote_host_features *ev = data;
5568         struct inquiry_entry *ie;
5569         struct hci_conn *conn;
5570
5571         bt_dev_dbg(hdev, "");
5572
5573         hci_dev_lock(hdev);
5574
5575         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5576         if (conn)
5577                 memcpy(conn->features[1], ev->features, 8);
5578
5579         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5580         if (ie)
5581                 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5582
5583         hci_dev_unlock(hdev);
5584 }
5585
5586 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5587                                             struct sk_buff *skb)
5588 {
5589         struct hci_ev_remote_oob_data_request *ev = edata;
5590         struct oob_data *data;
5591
5592         bt_dev_dbg(hdev, "");
5593
5594         hci_dev_lock(hdev);
5595
5596         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5597                 goto unlock;
5598
5599         data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5600         if (!data) {
5601                 struct hci_cp_remote_oob_data_neg_reply cp;
5602
5603                 bacpy(&cp.bdaddr, &ev->bdaddr);
5604                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5605                              sizeof(cp), &cp);
5606                 goto unlock;
5607         }
5608
5609         if (bredr_sc_enabled(hdev)) {
5610                 struct hci_cp_remote_oob_ext_data_reply cp;
5611
5612                 bacpy(&cp.bdaddr, &ev->bdaddr);
5613                 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5614                         memset(cp.hash192, 0, sizeof(cp.hash192));
5615                         memset(cp.rand192, 0, sizeof(cp.rand192));
5616                 } else {
5617                         memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5618                         memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5619                 }
5620                 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5621                 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5622
5623                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5624                              sizeof(cp), &cp);
5625         } else {
5626                 struct hci_cp_remote_oob_data_reply cp;
5627
5628                 bacpy(&cp.bdaddr, &ev->bdaddr);
5629                 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5630                 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5631
5632                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5633                              sizeof(cp), &cp);
5634         }
5635
5636 unlock:
5637         hci_dev_unlock(hdev);
5638 }
5639
5640 #if IS_ENABLED(CONFIG_BT_HS)
5641 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
5642                                   struct sk_buff *skb)
5643 {
5644         struct hci_ev_channel_selected *ev = data;
5645         struct hci_conn *hcon;
5646
5647         bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
5648
5649         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5650         if (!hcon)
5651                 return;
5652
5653         amp_read_loc_assoc_final_data(hdev, hcon);
5654 }
5655
5656 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
5657                                       struct sk_buff *skb)
5658 {
5659         struct hci_ev_phy_link_complete *ev = data;
5660         struct hci_conn *hcon, *bredr_hcon;
5661
5662         bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
5663                    ev->status);
5664
5665         hci_dev_lock(hdev);
5666
5667         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5668         if (!hcon)
5669                 goto unlock;
5670
5671         if (!hcon->amp_mgr)
5672                 goto unlock;
5673
5674         if (ev->status) {
5675                 hci_conn_del(hcon);
5676                 goto unlock;
5677         }
5678
5679         bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5680
5681         hcon->state = BT_CONNECTED;
5682         bacpy(&hcon->dst, &bredr_hcon->dst);
5683
5684         hci_conn_hold(hcon);
5685         hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5686         hci_conn_drop(hcon);
5687
5688         hci_debugfs_create_conn(hcon);
5689         hci_conn_add_sysfs(hcon);
5690
5691         amp_physical_cfm(bredr_hcon, hcon);
5692
5693 unlock:
5694         hci_dev_unlock(hdev);
5695 }
5696
5697 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
5698                                      struct sk_buff *skb)
5699 {
5700         struct hci_ev_logical_link_complete *ev = data;
5701         struct hci_conn *hcon;
5702         struct hci_chan *hchan;
5703         struct amp_mgr *mgr;
5704
5705         bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5706                    le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
5707
5708         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5709         if (!hcon)
5710                 return;
5711
5712         /* Create AMP hchan */
5713         hchan = hci_chan_create(hcon);
5714         if (!hchan)
5715                 return;
5716
5717         hchan->handle = le16_to_cpu(ev->handle);
5718         hchan->amp = true;
5719
5720         BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5721
5722         mgr = hcon->amp_mgr;
5723         if (mgr && mgr->bredr_chan) {
5724                 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5725
5726                 l2cap_chan_lock(bredr_chan);
5727
5728                 bredr_chan->conn->mtu = hdev->block_mtu;
5729                 l2cap_logical_cfm(bredr_chan, hchan, 0);
5730                 hci_conn_hold(hcon);
5731
5732                 l2cap_chan_unlock(bredr_chan);
5733         }
5734 }
5735
5736 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
5737                                              struct sk_buff *skb)
5738 {
5739         struct hci_ev_disconn_logical_link_complete *ev = data;
5740         struct hci_chan *hchan;
5741
5742         bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
5743                    le16_to_cpu(ev->handle), ev->status);
5744
5745         if (ev->status)
5746                 return;
5747
5748         hci_dev_lock(hdev);
5749
5750         hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5751         if (!hchan || !hchan->amp)
5752                 goto unlock;
5753
5754         amp_destroy_logical_link(hchan, ev->reason);
5755
5756 unlock:
5757         hci_dev_unlock(hdev);
5758 }
5759
5760 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
5761                                              struct sk_buff *skb)
5762 {
5763         struct hci_ev_disconn_phy_link_complete *ev = data;
5764         struct hci_conn *hcon;
5765
5766         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5767
5768         if (ev->status)
5769                 return;
5770
5771         hci_dev_lock(hdev);
5772
5773         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5774         if (hcon && hcon->type == AMP_LINK) {
5775                 hcon->state = BT_CLOSED;
5776                 hci_disconn_cfm(hcon, ev->reason);
5777                 hci_conn_del(hcon);
5778         }
5779
5780         hci_dev_unlock(hdev);
5781 }
5782 #endif
5783
5784 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5785                                 u8 bdaddr_type, bdaddr_t *local_rpa)
5786 {
5787         if (conn->out) {
5788                 conn->dst_type = bdaddr_type;
5789                 conn->resp_addr_type = bdaddr_type;
5790                 bacpy(&conn->resp_addr, bdaddr);
5791
5792                 /* Check if the controller has set a Local RPA then it must be
5793                  * used instead or hdev->rpa.
5794                  */
5795                 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5796                         conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5797                         bacpy(&conn->init_addr, local_rpa);
5798                 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5799                         conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5800                         bacpy(&conn->init_addr, &conn->hdev->rpa);
5801                 } else {
5802                         hci_copy_identity_address(conn->hdev, &conn->init_addr,
5803                                                   &conn->init_addr_type);
5804                 }
5805         } else {
5806                 conn->resp_addr_type = conn->hdev->adv_addr_type;
5807                 /* Check if the controller has set a Local RPA then it must be
5808                  * used instead or hdev->rpa.
5809                  */
5810                 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5811                         conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5812                         bacpy(&conn->resp_addr, local_rpa);
5813                 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5814                         /* In case of ext adv, resp_addr will be updated in
5815                          * Adv Terminated event.
5816                          */
5817                         if (!ext_adv_capable(conn->hdev))
5818                                 bacpy(&conn->resp_addr,
5819                                       &conn->hdev->random_addr);
5820                 } else {
5821                         bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5822                 }
5823
5824                 conn->init_addr_type = bdaddr_type;
5825                 bacpy(&conn->init_addr, bdaddr);
5826
5827                 /* For incoming connections, set the default minimum
5828                  * and maximum connection interval. They will be used
5829                  * to check if the parameters are in range and if not
5830                  * trigger the connection update procedure.
5831                  */
5832                 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5833                 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5834         }
5835 }
5836
5837 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5838                                  bdaddr_t *bdaddr, u8 bdaddr_type,
5839                                  bdaddr_t *local_rpa, u8 role, u16 handle,
5840                                  u16 interval, u16 latency,
5841                                  u16 supervision_timeout)
5842 {
5843         struct hci_conn_params *params;
5844         struct hci_conn *conn;
5845         struct smp_irk *irk;
5846         u8 addr_type;
5847
5848         hci_dev_lock(hdev);
5849
5850         /* All controllers implicitly stop advertising in the event of a
5851          * connection, so ensure that the state bit is cleared.
5852          */
5853         hci_dev_clear_flag(hdev, HCI_LE_ADV);
5854
5855         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
5856         if (!conn) {
5857                 /* In case of error status and there is no connection pending
5858                  * just unlock as there is nothing to cleanup.
5859                  */
5860                 if (status)
5861                         goto unlock;
5862
5863                 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5864                 if (!conn) {
5865                         bt_dev_err(hdev, "no memory for new connection");
5866                         goto unlock;
5867                 }
5868
5869                 conn->dst_type = bdaddr_type;
5870
5871                 /* If we didn't have a hci_conn object previously
5872                  * but we're in central role this must be something
5873                  * initiated using an accept list. Since accept list based
5874                  * connections are not "first class citizens" we don't
5875                  * have full tracking of them. Therefore, we go ahead
5876                  * with a "best effort" approach of determining the
5877                  * initiator address based on the HCI_PRIVACY flag.
5878                  */
5879                 if (conn->out) {
5880                         conn->resp_addr_type = bdaddr_type;
5881                         bacpy(&conn->resp_addr, bdaddr);
5882                         if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5883                                 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5884                                 bacpy(&conn->init_addr, &hdev->rpa);
5885                         } else {
5886                                 hci_copy_identity_address(hdev,
5887                                                           &conn->init_addr,
5888                                                           &conn->init_addr_type);
5889                         }
5890                 }
5891         } else {
5892                 cancel_delayed_work(&conn->le_conn_timeout);
5893         }
5894
5895         /* The HCI_LE_Connection_Complete event is only sent once per connection.
5896          * Processing it more than once per connection can corrupt kernel memory.
5897          *
5898          * As the connection handle is set here for the first time, it indicates
5899          * whether the connection is already set up.
5900          */
5901         if (conn->handle != HCI_CONN_HANDLE_UNSET) {
5902                 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5903                 goto unlock;
5904         }
5905
5906         le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5907
5908         /* Lookup the identity address from the stored connection
5909          * address and address type.
5910          *
5911          * When establishing connections to an identity address, the
5912          * connection procedure will store the resolvable random
5913          * address first. Now if it can be converted back into the
5914          * identity address, start using the identity address from
5915          * now on.
5916          */
5917         irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5918         if (irk) {
5919                 bacpy(&conn->dst, &irk->bdaddr);
5920                 conn->dst_type = irk->addr_type;
5921         }
5922
5923         conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5924
5925         if (handle > HCI_CONN_HANDLE_MAX) {
5926                 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle,
5927                            HCI_CONN_HANDLE_MAX);
5928                 status = HCI_ERROR_INVALID_PARAMETERS;
5929         }
5930
5931         /* All connection failure handling is taken care of by the
5932          * hci_conn_failed function which is triggered by the HCI
5933          * request completion callbacks used for connecting.
5934          */
5935         if (status)
5936                 goto unlock;
5937
5938         /* Drop the connection if it has been aborted */
5939         if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
5940                 hci_conn_drop(conn);
5941                 goto unlock;
5942         }
5943
5944         if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5945                 addr_type = BDADDR_LE_PUBLIC;
5946         else
5947                 addr_type = BDADDR_LE_RANDOM;
5948
5949         /* Drop the connection if the device is blocked */
5950         if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5951                 hci_conn_drop(conn);
5952                 goto unlock;
5953         }
5954
5955         if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5956                 mgmt_device_connected(hdev, conn, NULL, 0);
5957
5958         conn->sec_level = BT_SECURITY_LOW;
5959         conn->handle = handle;
5960         conn->state = BT_CONFIG;
5961
5962         /* Store current advertising instance as connection advertising instance
5963          * when sotfware rotation is in use so it can be re-enabled when
5964          * disconnected.
5965          */
5966         if (!ext_adv_capable(hdev))
5967                 conn->adv_instance = hdev->cur_adv_instance;
5968
5969         conn->le_conn_interval = interval;
5970         conn->le_conn_latency = latency;
5971         conn->le_supv_timeout = supervision_timeout;
5972
5973         hci_debugfs_create_conn(conn);
5974         hci_conn_add_sysfs(conn);
5975
5976         /* The remote features procedure is defined for central
5977          * role only. So only in case of an initiated connection
5978          * request the remote features.
5979          *
5980          * If the local controller supports peripheral-initiated features
5981          * exchange, then requesting the remote features in peripheral
5982          * role is possible. Otherwise just transition into the
5983          * connected state without requesting the remote features.
5984          */
5985         if (conn->out ||
5986             (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5987                 struct hci_cp_le_read_remote_features cp;
5988
5989                 cp.handle = __cpu_to_le16(conn->handle);
5990
5991                 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5992                              sizeof(cp), &cp);
5993
5994                 hci_conn_hold(conn);
5995         } else {
5996                 conn->state = BT_CONNECTED;
5997                 hci_connect_cfm(conn, status);
5998         }
5999
6000         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
6001                                            conn->dst_type);
6002         if (params) {
6003                 list_del_init(&params->action);
6004                 if (params->conn) {
6005                         hci_conn_drop(params->conn);
6006                         hci_conn_put(params->conn);
6007                         params->conn = NULL;
6008                 }
6009         }
6010
6011 unlock:
6012         hci_update_passive_scan(hdev);
6013         hci_dev_unlock(hdev);
6014 }
6015
6016 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
6017                                      struct sk_buff *skb)
6018 {
6019         struct hci_ev_le_conn_complete *ev = data;
6020
6021         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6022
6023         le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6024                              NULL, ev->role, le16_to_cpu(ev->handle),
6025                              le16_to_cpu(ev->interval),
6026                              le16_to_cpu(ev->latency),
6027                              le16_to_cpu(ev->supervision_timeout));
6028 }
6029
6030 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
6031                                          struct sk_buff *skb)
6032 {
6033         struct hci_ev_le_enh_conn_complete *ev = data;
6034
6035         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6036
6037         le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6038                              &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
6039                              le16_to_cpu(ev->interval),
6040                              le16_to_cpu(ev->latency),
6041                              le16_to_cpu(ev->supervision_timeout));
6042 }
6043
6044 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
6045                                     struct sk_buff *skb)
6046 {
6047         struct hci_evt_le_ext_adv_set_term *ev = data;
6048         struct hci_conn *conn;
6049         struct adv_info *adv, *n;
6050
6051         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6052
6053         /* The Bluetooth Core 5.3 specification clearly states that this event
6054          * shall not be sent when the Host disables the advertising set. So in
6055          * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
6056          *
6057          * When the Host disables an advertising set, all cleanup is done via
6058          * its command callback and not needed to be duplicated here.
6059          */
6060         if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
6061                 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
6062                 return;
6063         }
6064
6065         hci_dev_lock(hdev);
6066
6067         adv = hci_find_adv_instance(hdev, ev->handle);
6068
6069         if (ev->status) {
6070                 if (!adv)
6071                         goto unlock;
6072
6073                 /* Remove advertising as it has been terminated */
6074                 hci_remove_adv_instance(hdev, ev->handle);
6075                 mgmt_advertising_removed(NULL, hdev, ev->handle);
6076
6077                 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
6078                         if (adv->enabled)
6079                                 goto unlock;
6080                 }
6081
6082                 /* We are no longer advertising, clear HCI_LE_ADV */
6083                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
6084                 goto unlock;
6085         }
6086
6087         if (adv)
6088                 adv->enabled = false;
6089
6090         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
6091         if (conn) {
6092                 /* Store handle in the connection so the correct advertising
6093                  * instance can be re-enabled when disconnected.
6094                  */
6095                 conn->adv_instance = ev->handle;
6096
6097                 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
6098                     bacmp(&conn->resp_addr, BDADDR_ANY))
6099                         goto unlock;
6100
6101                 if (!ev->handle) {
6102                         bacpy(&conn->resp_addr, &hdev->random_addr);
6103                         goto unlock;
6104                 }
6105
6106                 if (adv)
6107                         bacpy(&conn->resp_addr, &adv->random_addr);
6108         }
6109
6110 unlock:
6111         hci_dev_unlock(hdev);
6112 }
6113
6114 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
6115                                             struct sk_buff *skb)
6116 {
6117         struct hci_ev_le_conn_update_complete *ev = data;
6118         struct hci_conn *conn;
6119
6120         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6121
6122         if (ev->status)
6123                 return;
6124
6125         hci_dev_lock(hdev);
6126
6127         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6128         if (conn) {
6129                 conn->le_conn_interval = le16_to_cpu(ev->interval);
6130                 conn->le_conn_latency = le16_to_cpu(ev->latency);
6131                 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
6132         }
6133
6134         hci_dev_unlock(hdev);
6135 }
6136
6137 /* This function requires the caller holds hdev->lock */
6138 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
6139                                               bdaddr_t *addr,
6140                                               u8 addr_type, bool addr_resolved,
6141                                               u8 adv_type)
6142 {
6143         struct hci_conn *conn;
6144         struct hci_conn_params *params;
6145
6146         /* If the event is not connectable don't proceed further */
6147         if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
6148                 return NULL;
6149
6150         /* Ignore if the device is blocked or hdev is suspended */
6151         if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
6152             hdev->suspended)
6153                 return NULL;
6154
6155         /* Most controller will fail if we try to create new connections
6156          * while we have an existing one in peripheral role.
6157          */
6158         if (hdev->conn_hash.le_num_peripheral > 0 &&
6159             (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
6160              !(hdev->le_states[3] & 0x10)))
6161                 return NULL;
6162
6163         /* If we're not connectable only connect devices that we have in
6164          * our pend_le_conns list.
6165          */
6166         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
6167                                            addr_type);
6168         if (!params)
6169                 return NULL;
6170
6171         if (!params->explicit_connect) {
6172                 switch (params->auto_connect) {
6173                 case HCI_AUTO_CONN_DIRECT:
6174                         /* Only devices advertising with ADV_DIRECT_IND are
6175                          * triggering a connection attempt. This is allowing
6176                          * incoming connections from peripheral devices.
6177                          */
6178                         if (adv_type != LE_ADV_DIRECT_IND)
6179                                 return NULL;
6180                         break;
6181                 case HCI_AUTO_CONN_ALWAYS:
6182                         /* Devices advertising with ADV_IND or ADV_DIRECT_IND
6183                          * are triggering a connection attempt. This means
6184                          * that incoming connections from peripheral device are
6185                          * accepted and also outgoing connections to peripheral
6186                          * devices are established when found.
6187                          */
6188                         break;
6189                 default:
6190                         return NULL;
6191                 }
6192         }
6193
6194         conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
6195                               BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
6196                               HCI_ROLE_MASTER);
6197         if (!IS_ERR(conn)) {
6198                 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6199                  * by higher layer that tried to connect, if no then
6200                  * store the pointer since we don't really have any
6201                  * other owner of the object besides the params that
6202                  * triggered it. This way we can abort the connection if
6203                  * the parameters get removed and keep the reference
6204                  * count consistent once the connection is established.
6205                  */
6206
6207                 if (!params->explicit_connect)
6208                         params->conn = hci_conn_get(conn);
6209
6210                 return conn;
6211         }
6212
6213         switch (PTR_ERR(conn)) {
6214         case -EBUSY:
6215                 /* If hci_connect() returns -EBUSY it means there is already
6216                  * an LE connection attempt going on. Since controllers don't
6217                  * support more than one connection attempt at the time, we
6218                  * don't consider this an error case.
6219                  */
6220                 break;
6221         default:
6222                 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6223                 return NULL;
6224         }
6225
6226         return NULL;
6227 }
6228
6229 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6230                                u8 bdaddr_type, bdaddr_t *direct_addr,
6231                                u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
6232                                bool ext_adv, bool ctl_time, u64 instant)
6233 {
6234         struct discovery_state *d = &hdev->discovery;
6235         struct smp_irk *irk;
6236         struct hci_conn *conn;
6237         bool match, bdaddr_resolved;
6238         u32 flags;
6239         u8 *ptr;
6240
6241         switch (type) {
6242         case LE_ADV_IND:
6243         case LE_ADV_DIRECT_IND:
6244         case LE_ADV_SCAN_IND:
6245         case LE_ADV_NONCONN_IND:
6246         case LE_ADV_SCAN_RSP:
6247                 break;
6248         default:
6249                 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6250                                        "type: 0x%02x", type);
6251                 return;
6252         }
6253
6254         if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
6255                 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
6256                 return;
6257         }
6258
6259         /* Find the end of the data in case the report contains padded zero
6260          * bytes at the end causing an invalid length value.
6261          *
6262          * When data is NULL, len is 0 so there is no need for extra ptr
6263          * check as 'ptr < data + 0' is already false in such case.
6264          */
6265         for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6266                 if (ptr + 1 + *ptr > data + len)
6267                         break;
6268         }
6269
6270         /* Adjust for actual length. This handles the case when remote
6271          * device is advertising with incorrect data length.
6272          */
6273         len = ptr - data;
6274
6275         /* If the direct address is present, then this report is from
6276          * a LE Direct Advertising Report event. In that case it is
6277          * important to see if the address is matching the local
6278          * controller address.
6279          */
6280         if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6281                 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6282                                                   &bdaddr_resolved);
6283
6284                 /* Only resolvable random addresses are valid for these
6285                  * kind of reports and others can be ignored.
6286                  */
6287                 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6288                         return;
6289
6290                 /* If the controller is not using resolvable random
6291                  * addresses, then this report can be ignored.
6292                  */
6293                 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6294                         return;
6295
6296                 /* If the local IRK of the controller does not match
6297                  * with the resolvable random address provided, then
6298                  * this report can be ignored.
6299                  */
6300                 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6301                         return;
6302         }
6303
6304         /* Check if we need to convert to identity address */
6305         irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6306         if (irk) {
6307                 bdaddr = &irk->bdaddr;
6308                 bdaddr_type = irk->addr_type;
6309         }
6310
6311         bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6312
6313         /* Check if we have been requested to connect to this device.
6314          *
6315          * direct_addr is set only for directed advertising reports (it is NULL
6316          * for advertising reports) and is already verified to be RPA above.
6317          */
6318         conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6319                                      type);
6320         if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
6321                 /* Store report for later inclusion by
6322                  * mgmt_device_connected
6323                  */
6324                 memcpy(conn->le_adv_data, data, len);
6325                 conn->le_adv_data_len = len;
6326         }
6327
6328         if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6329                 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6330         else
6331                 flags = 0;
6332
6333         /* All scan results should be sent up for Mesh systems */
6334         if (hci_dev_test_flag(hdev, HCI_MESH)) {
6335                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6336                                   rssi, flags, data, len, NULL, 0, instant);
6337                 return;
6338         }
6339
6340         /* Passive scanning shouldn't trigger any device found events,
6341          * except for devices marked as CONN_REPORT for which we do send
6342          * device found events, or advertisement monitoring requested.
6343          */
6344         if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6345                 if (type == LE_ADV_DIRECT_IND)
6346                         return;
6347
6348                 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6349                                                bdaddr, bdaddr_type) &&
6350                     idr_is_empty(&hdev->adv_monitors_idr))
6351                         return;
6352
6353                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6354                                   rssi, flags, data, len, NULL, 0, 0);
6355                 return;
6356         }
6357
6358         /* When receiving a scan response, then there is no way to
6359          * know if the remote device is connectable or not. However
6360          * since scan responses are merged with a previously seen
6361          * advertising report, the flags field from that report
6362          * will be used.
6363          *
6364          * In the unlikely case that a controller just sends a scan
6365          * response event that doesn't match the pending report, then
6366          * it is marked as a standalone SCAN_RSP.
6367          */
6368         if (type == LE_ADV_SCAN_RSP)
6369                 flags = MGMT_DEV_FOUND_SCAN_RSP;
6370
6371         /* If there's nothing pending either store the data from this
6372          * event or send an immediate device found event if the data
6373          * should not be stored for later.
6374          */
6375         if (!ext_adv && !has_pending_adv_report(hdev)) {
6376                 /* If the report will trigger a SCAN_REQ store it for
6377                  * later merging.
6378                  */
6379                 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6380                         store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6381                                                  rssi, flags, data, len);
6382                         return;
6383                 }
6384
6385                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6386                                   rssi, flags, data, len, NULL, 0, 0);
6387                 return;
6388         }
6389
6390         /* Check if the pending report is for the same device as the new one */
6391         match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6392                  bdaddr_type == d->last_adv_addr_type);
6393
6394         /* If the pending data doesn't match this report or this isn't a
6395          * scan response (e.g. we got a duplicate ADV_IND) then force
6396          * sending of the pending data.
6397          */
6398         if (type != LE_ADV_SCAN_RSP || !match) {
6399                 /* Send out whatever is in the cache, but skip duplicates */
6400                 if (!match)
6401                         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6402                                           d->last_adv_addr_type, NULL,
6403                                           d->last_adv_rssi, d->last_adv_flags,
6404                                           d->last_adv_data,
6405                                           d->last_adv_data_len, NULL, 0, 0);
6406
6407                 /* If the new report will trigger a SCAN_REQ store it for
6408                  * later merging.
6409                  */
6410                 if (!ext_adv && (type == LE_ADV_IND ||
6411                                  type == LE_ADV_SCAN_IND)) {
6412                         store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6413                                                  rssi, flags, data, len);
6414                         return;
6415                 }
6416
6417                 /* The advertising reports cannot be merged, so clear
6418                  * the pending report and send out a device found event.
6419                  */
6420                 clear_pending_adv_report(hdev);
6421                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6422                                   rssi, flags, data, len, NULL, 0, 0);
6423                 return;
6424         }
6425
6426         /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6427          * the new event is a SCAN_RSP. We can therefore proceed with
6428          * sending a merged device found event.
6429          */
6430         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6431                           d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6432                           d->last_adv_data, d->last_adv_data_len, data, len, 0);
6433         clear_pending_adv_report(hdev);
6434 }
6435
6436 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6437                                   struct sk_buff *skb)
6438 {
6439         struct hci_ev_le_advertising_report *ev = data;
6440         u64 instant = jiffies;
6441
6442         if (!ev->num)
6443                 return;
6444
6445         hci_dev_lock(hdev);
6446
6447         while (ev->num--) {
6448                 struct hci_ev_le_advertising_info *info;
6449                 s8 rssi;
6450
6451                 info = hci_le_ev_skb_pull(hdev, skb,
6452                                           HCI_EV_LE_ADVERTISING_REPORT,
6453                                           sizeof(*info));
6454                 if (!info)
6455                         break;
6456
6457                 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6458                                         info->length + 1))
6459                         break;
6460
6461                 if (info->length <= HCI_MAX_AD_LENGTH) {
6462                         rssi = info->data[info->length];
6463                         process_adv_report(hdev, info->type, &info->bdaddr,
6464                                            info->bdaddr_type, NULL, 0, rssi,
6465                                            info->data, info->length, false,
6466                                            false, instant);
6467                 } else {
6468                         bt_dev_err(hdev, "Dropping invalid advertising data");
6469                 }
6470         }
6471
6472         hci_dev_unlock(hdev);
6473 }
6474
6475 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6476 {
6477         if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6478                 switch (evt_type) {
6479                 case LE_LEGACY_ADV_IND:
6480                         return LE_ADV_IND;
6481                 case LE_LEGACY_ADV_DIRECT_IND:
6482                         return LE_ADV_DIRECT_IND;
6483                 case LE_LEGACY_ADV_SCAN_IND:
6484                         return LE_ADV_SCAN_IND;
6485                 case LE_LEGACY_NONCONN_IND:
6486                         return LE_ADV_NONCONN_IND;
6487                 case LE_LEGACY_SCAN_RSP_ADV:
6488                 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6489                         return LE_ADV_SCAN_RSP;
6490                 }
6491
6492                 goto invalid;
6493         }
6494
6495         if (evt_type & LE_EXT_ADV_CONN_IND) {
6496                 if (evt_type & LE_EXT_ADV_DIRECT_IND)
6497                         return LE_ADV_DIRECT_IND;
6498
6499                 return LE_ADV_IND;
6500         }
6501
6502         if (evt_type & LE_EXT_ADV_SCAN_RSP)
6503                 return LE_ADV_SCAN_RSP;
6504
6505         if (evt_type & LE_EXT_ADV_SCAN_IND)
6506                 return LE_ADV_SCAN_IND;
6507
6508         if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6509             evt_type & LE_EXT_ADV_DIRECT_IND)
6510                 return LE_ADV_NONCONN_IND;
6511
6512 invalid:
6513         bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6514                                evt_type);
6515
6516         return LE_ADV_INVALID;
6517 }
6518
6519 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6520                                       struct sk_buff *skb)
6521 {
6522         struct hci_ev_le_ext_adv_report *ev = data;
6523         u64 instant = jiffies;
6524
6525         if (!ev->num)
6526                 return;
6527
6528         hci_dev_lock(hdev);
6529
6530         while (ev->num--) {
6531                 struct hci_ev_le_ext_adv_info *info;
6532                 u8 legacy_evt_type;
6533                 u16 evt_type;
6534
6535                 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6536                                           sizeof(*info));
6537                 if (!info)
6538                         break;
6539
6540                 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6541                                         info->length))
6542                         break;
6543
6544                 evt_type = __le16_to_cpu(info->type);
6545                 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6546                 if (legacy_evt_type != LE_ADV_INVALID) {
6547                         process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6548                                            info->bdaddr_type, NULL, 0,
6549                                            info->rssi, info->data, info->length,
6550                                            !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6551                                            false, instant);
6552                 }
6553         }
6554
6555         hci_dev_unlock(hdev);
6556 }
6557
6558 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6559 {
6560         struct hci_cp_le_pa_term_sync cp;
6561
6562         memset(&cp, 0, sizeof(cp));
6563         cp.handle = handle;
6564
6565         return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6566 }
6567
6568 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6569                                             struct sk_buff *skb)
6570 {
6571         struct hci_ev_le_pa_sync_established *ev = data;
6572         int mask = hdev->link_mode;
6573         __u8 flags = 0;
6574
6575         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6576
6577         if (ev->status)
6578                 return;
6579
6580         hci_dev_lock(hdev);
6581
6582         hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6583
6584         mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6585         if (!(mask & HCI_LM_ACCEPT))
6586                 hci_le_pa_term_sync(hdev, ev->handle);
6587
6588         hci_dev_unlock(hdev);
6589 }
6590
6591 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6592                                             struct sk_buff *skb)
6593 {
6594         struct hci_ev_le_remote_feat_complete *ev = data;
6595         struct hci_conn *conn;
6596
6597         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6598
6599         hci_dev_lock(hdev);
6600
6601         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6602         if (conn) {
6603                 if (!ev->status)
6604                         memcpy(conn->features[0], ev->features, 8);
6605
6606                 if (conn->state == BT_CONFIG) {
6607                         __u8 status;
6608
6609                         /* If the local controller supports peripheral-initiated
6610                          * features exchange, but the remote controller does
6611                          * not, then it is possible that the error code 0x1a
6612                          * for unsupported remote feature gets returned.
6613                          *
6614                          * In this specific case, allow the connection to
6615                          * transition into connected state and mark it as
6616                          * successful.
6617                          */
6618                         if (!conn->out && ev->status == 0x1a &&
6619                             (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6620                                 status = 0x00;
6621                         else
6622                                 status = ev->status;
6623
6624                         conn->state = BT_CONNECTED;
6625                         hci_connect_cfm(conn, status);
6626                         hci_conn_drop(conn);
6627                 }
6628         }
6629
6630         hci_dev_unlock(hdev);
6631 }
6632
6633 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6634                                    struct sk_buff *skb)
6635 {
6636         struct hci_ev_le_ltk_req *ev = data;
6637         struct hci_cp_le_ltk_reply cp;
6638         struct hci_cp_le_ltk_neg_reply neg;
6639         struct hci_conn *conn;
6640         struct smp_ltk *ltk;
6641
6642         bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6643
6644         hci_dev_lock(hdev);
6645
6646         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6647         if (conn == NULL)
6648                 goto not_found;
6649
6650         ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6651         if (!ltk)
6652                 goto not_found;
6653
6654         if (smp_ltk_is_sc(ltk)) {
6655                 /* With SC both EDiv and Rand are set to zero */
6656                 if (ev->ediv || ev->rand)
6657                         goto not_found;
6658         } else {
6659                 /* For non-SC keys check that EDiv and Rand match */
6660                 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6661                         goto not_found;
6662         }
6663
6664         memcpy(cp.ltk, ltk->val, ltk->enc_size);
6665         memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6666         cp.handle = cpu_to_le16(conn->handle);
6667
6668         conn->pending_sec_level = smp_ltk_sec_level(ltk);
6669
6670         conn->enc_key_size = ltk->enc_size;
6671
6672         hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6673
6674         /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6675          * temporary key used to encrypt a connection following
6676          * pairing. It is used during the Encrypted Session Setup to
6677          * distribute the keys. Later, security can be re-established
6678          * using a distributed LTK.
6679          */
6680         if (ltk->type == SMP_STK) {
6681                 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6682                 list_del_rcu(&ltk->list);
6683                 kfree_rcu(ltk, rcu);
6684         } else {
6685                 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6686         }
6687
6688         hci_dev_unlock(hdev);
6689
6690         return;
6691
6692 not_found:
6693         neg.handle = ev->handle;
6694         hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6695         hci_dev_unlock(hdev);
6696 }
6697
6698 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6699                                       u8 reason)
6700 {
6701         struct hci_cp_le_conn_param_req_neg_reply cp;
6702
6703         cp.handle = cpu_to_le16(handle);
6704         cp.reason = reason;
6705
6706         hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6707                      &cp);
6708 }
6709
6710 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6711                                              struct sk_buff *skb)
6712 {
6713         struct hci_ev_le_remote_conn_param_req *ev = data;
6714         struct hci_cp_le_conn_param_req_reply cp;
6715         struct hci_conn *hcon;
6716         u16 handle, min, max, latency, timeout;
6717
6718         bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6719
6720         handle = le16_to_cpu(ev->handle);
6721         min = le16_to_cpu(ev->interval_min);
6722         max = le16_to_cpu(ev->interval_max);
6723         latency = le16_to_cpu(ev->latency);
6724         timeout = le16_to_cpu(ev->timeout);
6725
6726         hcon = hci_conn_hash_lookup_handle(hdev, handle);
6727         if (!hcon || hcon->state != BT_CONNECTED)
6728                 return send_conn_param_neg_reply(hdev, handle,
6729                                                  HCI_ERROR_UNKNOWN_CONN_ID);
6730
6731         if (hci_check_conn_params(min, max, latency, timeout))
6732                 return send_conn_param_neg_reply(hdev, handle,
6733                                                  HCI_ERROR_INVALID_LL_PARAMS);
6734
6735         if (hcon->role == HCI_ROLE_MASTER) {
6736                 struct hci_conn_params *params;
6737                 u8 store_hint;
6738
6739                 hci_dev_lock(hdev);
6740
6741                 params = hci_conn_params_lookup(hdev, &hcon->dst,
6742                                                 hcon->dst_type);
6743                 if (params) {
6744                         params->conn_min_interval = min;
6745                         params->conn_max_interval = max;
6746                         params->conn_latency = latency;
6747                         params->supervision_timeout = timeout;
6748                         store_hint = 0x01;
6749                 } else {
6750                         store_hint = 0x00;
6751                 }
6752
6753                 hci_dev_unlock(hdev);
6754
6755                 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6756                                     store_hint, min, max, latency, timeout);
6757         }
6758
6759         cp.handle = ev->handle;
6760         cp.interval_min = ev->interval_min;
6761         cp.interval_max = ev->interval_max;
6762         cp.latency = ev->latency;
6763         cp.timeout = ev->timeout;
6764         cp.min_ce_len = 0;
6765         cp.max_ce_len = 0;
6766
6767         hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6768 }
6769
6770 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6771                                          struct sk_buff *skb)
6772 {
6773         struct hci_ev_le_direct_adv_report *ev = data;
6774         u64 instant = jiffies;
6775         int i;
6776
6777         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6778                                 flex_array_size(ev, info, ev->num)))
6779                 return;
6780
6781         if (!ev->num)
6782                 return;
6783
6784         hci_dev_lock(hdev);
6785
6786         for (i = 0; i < ev->num; i++) {
6787                 struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6788
6789                 process_adv_report(hdev, info->type, &info->bdaddr,
6790                                    info->bdaddr_type, &info->direct_addr,
6791                                    info->direct_addr_type, info->rssi, NULL, 0,
6792                                    false, false, instant);
6793         }
6794
6795         hci_dev_unlock(hdev);
6796 }
6797
6798 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6799                                   struct sk_buff *skb)
6800 {
6801         struct hci_ev_le_phy_update_complete *ev = data;
6802         struct hci_conn *conn;
6803
6804         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6805
6806         if (ev->status)
6807                 return;
6808
6809         hci_dev_lock(hdev);
6810
6811         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6812         if (!conn)
6813                 goto unlock;
6814
6815         conn->le_tx_phy = ev->tx_phy;
6816         conn->le_rx_phy = ev->rx_phy;
6817
6818 unlock:
6819         hci_dev_unlock(hdev);
6820 }
6821
6822 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
6823                                         struct sk_buff *skb)
6824 {
6825         struct hci_evt_le_cis_established *ev = data;
6826         struct hci_conn *conn;
6827         u16 handle = __le16_to_cpu(ev->handle);
6828
6829         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6830
6831         hci_dev_lock(hdev);
6832
6833         conn = hci_conn_hash_lookup_handle(hdev, handle);
6834         if (!conn) {
6835                 bt_dev_err(hdev,
6836                            "Unable to find connection with handle 0x%4.4x",
6837                            handle);
6838                 goto unlock;
6839         }
6840
6841         if (conn->type != ISO_LINK) {
6842                 bt_dev_err(hdev,
6843                            "Invalid connection link type handle 0x%4.4x",
6844                            handle);
6845                 goto unlock;
6846         }
6847
6848         if (conn->role == HCI_ROLE_SLAVE) {
6849                 __le32 interval;
6850
6851                 memset(&interval, 0, sizeof(interval));
6852
6853                 memcpy(&interval, ev->c_latency, sizeof(ev->c_latency));
6854                 conn->iso_qos.in.interval = le32_to_cpu(interval);
6855                 memcpy(&interval, ev->p_latency, sizeof(ev->p_latency));
6856                 conn->iso_qos.out.interval = le32_to_cpu(interval);
6857                 conn->iso_qos.in.latency = le16_to_cpu(ev->interval);
6858                 conn->iso_qos.out.latency = le16_to_cpu(ev->interval);
6859                 conn->iso_qos.in.sdu = le16_to_cpu(ev->c_mtu);
6860                 conn->iso_qos.out.sdu = le16_to_cpu(ev->p_mtu);
6861                 conn->iso_qos.in.phy = ev->c_phy;
6862                 conn->iso_qos.out.phy = ev->p_phy;
6863         }
6864
6865         if (!ev->status) {
6866                 conn->state = BT_CONNECTED;
6867                 hci_debugfs_create_conn(conn);
6868                 hci_conn_add_sysfs(conn);
6869                 hci_iso_setup_path(conn);
6870                 goto unlock;
6871         }
6872
6873         hci_connect_cfm(conn, ev->status);
6874         hci_conn_del(conn);
6875
6876 unlock:
6877         hci_dev_unlock(hdev);
6878 }
6879
6880 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
6881 {
6882         struct hci_cp_le_reject_cis cp;
6883
6884         memset(&cp, 0, sizeof(cp));
6885         cp.handle = handle;
6886         cp.reason = HCI_ERROR_REJ_BAD_ADDR;
6887         hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
6888 }
6889
6890 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
6891 {
6892         struct hci_cp_le_accept_cis cp;
6893
6894         memset(&cp, 0, sizeof(cp));
6895         cp.handle = handle;
6896         hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
6897 }
6898
6899 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
6900                                struct sk_buff *skb)
6901 {
6902         struct hci_evt_le_cis_req *ev = data;
6903         u16 acl_handle, cis_handle;
6904         struct hci_conn *acl, *cis;
6905         int mask;
6906         __u8 flags = 0;
6907
6908         acl_handle = __le16_to_cpu(ev->acl_handle);
6909         cis_handle = __le16_to_cpu(ev->cis_handle);
6910
6911         bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
6912                    acl_handle, cis_handle, ev->cig_id, ev->cis_id);
6913
6914         hci_dev_lock(hdev);
6915
6916         acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
6917         if (!acl)
6918                 goto unlock;
6919
6920         mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
6921         if (!(mask & HCI_LM_ACCEPT)) {
6922                 hci_le_reject_cis(hdev, ev->cis_handle);
6923                 goto unlock;
6924         }
6925
6926         cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
6927         if (!cis) {
6928                 cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE);
6929                 if (!cis) {
6930                         hci_le_reject_cis(hdev, ev->cis_handle);
6931                         goto unlock;
6932                 }
6933                 cis->handle = cis_handle;
6934         }
6935
6936         cis->iso_qos.cig = ev->cig_id;
6937         cis->iso_qos.cis = ev->cis_id;
6938
6939         if (!(flags & HCI_PROTO_DEFER)) {
6940                 hci_le_accept_cis(hdev, ev->cis_handle);
6941         } else {
6942                 cis->state = BT_CONNECT2;
6943                 hci_connect_cfm(cis, 0);
6944         }
6945
6946 unlock:
6947         hci_dev_unlock(hdev);
6948 }
6949
6950 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
6951                                            struct sk_buff *skb)
6952 {
6953         struct hci_evt_le_create_big_complete *ev = data;
6954         struct hci_conn *conn;
6955
6956         BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6957
6958         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
6959                                 flex_array_size(ev, bis_handle, ev->num_bis)))
6960                 return;
6961
6962         hci_dev_lock(hdev);
6963
6964         conn = hci_conn_hash_lookup_big(hdev, ev->handle);
6965         if (!conn)
6966                 goto unlock;
6967
6968         if (conn->type != ISO_LINK) {
6969                 bt_dev_err(hdev,
6970                            "Invalid connection link type handle 0x%2.2x",
6971                            ev->handle);
6972                 goto unlock;
6973         }
6974
6975         if (ev->num_bis)
6976                 conn->handle = __le16_to_cpu(ev->bis_handle[0]);
6977
6978         if (!ev->status) {
6979                 conn->state = BT_CONNECTED;
6980                 hci_debugfs_create_conn(conn);
6981                 hci_conn_add_sysfs(conn);
6982                 hci_iso_setup_path(conn);
6983                 goto unlock;
6984         }
6985
6986         hci_connect_cfm(conn, ev->status);
6987         hci_conn_del(conn);
6988
6989 unlock:
6990         hci_dev_unlock(hdev);
6991 }
6992
6993 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
6994                                             struct sk_buff *skb)
6995 {
6996         struct hci_evt_le_big_sync_estabilished *ev = data;
6997         struct hci_conn *bis;
6998         int i;
6999
7000         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7001
7002         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7003                                 flex_array_size(ev, bis, ev->num_bis)))
7004                 return;
7005
7006         if (ev->status)
7007                 return;
7008
7009         hci_dev_lock(hdev);
7010
7011         for (i = 0; i < ev->num_bis; i++) {
7012                 u16 handle = le16_to_cpu(ev->bis[i]);
7013                 __le32 interval;
7014
7015                 bis = hci_conn_hash_lookup_handle(hdev, handle);
7016                 if (!bis) {
7017                         bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
7018                                            HCI_ROLE_SLAVE);
7019                         if (!bis)
7020                                 continue;
7021                         bis->handle = handle;
7022                 }
7023
7024                 bis->iso_qos.big = ev->handle;
7025                 memset(&interval, 0, sizeof(interval));
7026                 memcpy(&interval, ev->latency, sizeof(ev->latency));
7027                 bis->iso_qos.in.interval = le32_to_cpu(interval);
7028                 /* Convert ISO Interval (1.25 ms slots) to latency (ms) */
7029                 bis->iso_qos.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
7030                 bis->iso_qos.in.sdu = le16_to_cpu(ev->max_pdu);
7031
7032                 hci_iso_setup_path(bis);
7033         }
7034
7035         hci_dev_unlock(hdev);
7036 }
7037
7038 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7039                                            struct sk_buff *skb)
7040 {
7041         struct hci_evt_le_big_info_adv_report *ev = data;
7042         int mask = hdev->link_mode;
7043         __u8 flags = 0;
7044
7045         bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7046
7047         hci_dev_lock(hdev);
7048
7049         mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
7050         if (!(mask & HCI_LM_ACCEPT))
7051                 hci_le_pa_term_sync(hdev, ev->sync_handle);
7052
7053         hci_dev_unlock(hdev);
7054 }
7055
7056 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7057 [_op] = { \
7058         .func = _func, \
7059         .min_len = _min_len, \
7060         .max_len = _max_len, \
7061 }
7062
7063 #define HCI_LE_EV(_op, _func, _len) \
7064         HCI_LE_EV_VL(_op, _func, _len, _len)
7065
7066 #define HCI_LE_EV_STATUS(_op, _func) \
7067         HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7068
7069 /* Entries in this table shall have their position according to the subevent
7070  * opcode they handle so the use of the macros above is recommend since it does
7071  * attempt to initialize at its proper index using Designated Initializers that
7072  * way events without a callback function can be ommited.
7073  */
7074 static const struct hci_le_ev {
7075         void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7076         u16  min_len;
7077         u16  max_len;
7078 } hci_le_ev_table[U8_MAX + 1] = {
7079         /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7080         HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7081                   sizeof(struct hci_ev_le_conn_complete)),
7082         /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7083         HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7084                      sizeof(struct hci_ev_le_advertising_report),
7085                      HCI_MAX_EVENT_SIZE),
7086         /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7087         HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7088                   hci_le_conn_update_complete_evt,
7089                   sizeof(struct hci_ev_le_conn_update_complete)),
7090         /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7091         HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7092                   hci_le_remote_feat_complete_evt,
7093                   sizeof(struct hci_ev_le_remote_feat_complete)),
7094         /* [0x05 = HCI_EV_LE_LTK_REQ] */
7095         HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7096                   sizeof(struct hci_ev_le_ltk_req)),
7097         /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7098         HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7099                   hci_le_remote_conn_param_req_evt,
7100                   sizeof(struct hci_ev_le_remote_conn_param_req)),
7101         /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7102         HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7103                   hci_le_enh_conn_complete_evt,
7104                   sizeof(struct hci_ev_le_enh_conn_complete)),
7105         /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7106         HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7107                      sizeof(struct hci_ev_le_direct_adv_report),
7108                      HCI_MAX_EVENT_SIZE),
7109         /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7110         HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7111                   sizeof(struct hci_ev_le_phy_update_complete)),
7112         /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7113         HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7114                      sizeof(struct hci_ev_le_ext_adv_report),
7115                      HCI_MAX_EVENT_SIZE),
7116         /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7117         HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7118                   hci_le_pa_sync_estabilished_evt,
7119                   sizeof(struct hci_ev_le_pa_sync_established)),
7120         /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7121         HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7122                   sizeof(struct hci_evt_le_ext_adv_set_term)),
7123         /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7124         HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7125                   sizeof(struct hci_evt_le_cis_established)),
7126         /* [0x1a = HCI_EVT_LE_CIS_REQ] */
7127         HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7128                   sizeof(struct hci_evt_le_cis_req)),
7129         /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7130         HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7131                      hci_le_create_big_complete_evt,
7132                      sizeof(struct hci_evt_le_create_big_complete),
7133                      HCI_MAX_EVENT_SIZE),
7134         /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7135         HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7136                      hci_le_big_sync_established_evt,
7137                      sizeof(struct hci_evt_le_big_sync_estabilished),
7138                      HCI_MAX_EVENT_SIZE),
7139         /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7140         HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7141                      hci_le_big_info_adv_report_evt,
7142                      sizeof(struct hci_evt_le_big_info_adv_report),
7143                      HCI_MAX_EVENT_SIZE),
7144 };
7145
7146 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7147                             struct sk_buff *skb, u16 *opcode, u8 *status,
7148                             hci_req_complete_t *req_complete,
7149                             hci_req_complete_skb_t *req_complete_skb)
7150 {
7151         struct hci_ev_le_meta *ev = data;
7152         const struct hci_le_ev *subev;
7153
7154         bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7155
7156         /* Only match event if command OGF is for LE */
7157         if (hdev->sent_cmd &&
7158             hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
7159             hci_skb_event(hdev->sent_cmd) == ev->subevent) {
7160                 *opcode = hci_skb_opcode(hdev->sent_cmd);
7161                 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7162                                      req_complete_skb);
7163         }
7164
7165         subev = &hci_le_ev_table[ev->subevent];
7166         if (!subev->func)
7167                 return;
7168
7169         if (skb->len < subev->min_len) {
7170                 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7171                            ev->subevent, skb->len, subev->min_len);
7172                 return;
7173         }
7174
7175         /* Just warn if the length is over max_len size it still be
7176          * possible to partially parse the event so leave to callback to
7177          * decide if that is acceptable.
7178          */
7179         if (skb->len > subev->max_len)
7180                 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7181                             ev->subevent, skb->len, subev->max_len);
7182         data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7183         if (!data)
7184                 return;
7185
7186         subev->func(hdev, data, skb);
7187 }
7188
7189 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7190                                  u8 event, struct sk_buff *skb)
7191 {
7192         struct hci_ev_cmd_complete *ev;
7193         struct hci_event_hdr *hdr;
7194
7195         if (!skb)
7196                 return false;
7197
7198         hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7199         if (!hdr)
7200                 return false;
7201
7202         if (event) {
7203                 if (hdr->evt != event)
7204                         return false;
7205                 return true;
7206         }
7207
7208         /* Check if request ended in Command Status - no way to retrieve
7209          * any extra parameters in this case.
7210          */
7211         if (hdr->evt == HCI_EV_CMD_STATUS)
7212                 return false;
7213
7214         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7215                 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7216                            hdr->evt);
7217                 return false;
7218         }
7219
7220         ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7221         if (!ev)
7222                 return false;
7223
7224         if (opcode != __le16_to_cpu(ev->opcode)) {
7225                 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7226                        __le16_to_cpu(ev->opcode));
7227                 return false;
7228         }
7229
7230         return true;
7231 }
7232
7233 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7234                                   struct sk_buff *skb)
7235 {
7236         struct hci_ev_le_advertising_info *adv;
7237         struct hci_ev_le_direct_adv_info *direct_adv;
7238         struct hci_ev_le_ext_adv_info *ext_adv;
7239         const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7240         const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7241
7242         hci_dev_lock(hdev);
7243
7244         /* If we are currently suspended and this is the first BT event seen,
7245          * save the wake reason associated with the event.
7246          */
7247         if (!hdev->suspended || hdev->wake_reason)
7248                 goto unlock;
7249
7250         /* Default to remote wake. Values for wake_reason are documented in the
7251          * Bluez mgmt api docs.
7252          */
7253         hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7254
7255         /* Once configured for remote wakeup, we should only wake up for
7256          * reconnections. It's useful to see which device is waking us up so
7257          * keep track of the bdaddr of the connection event that woke us up.
7258          */
7259         if (event == HCI_EV_CONN_REQUEST) {
7260                 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7261                 hdev->wake_addr_type = BDADDR_BREDR;
7262         } else if (event == HCI_EV_CONN_COMPLETE) {
7263                 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7264                 hdev->wake_addr_type = BDADDR_BREDR;
7265         } else if (event == HCI_EV_LE_META) {
7266                 struct hci_ev_le_meta *le_ev = (void *)skb->data;
7267                 u8 subevent = le_ev->subevent;
7268                 u8 *ptr = &skb->data[sizeof(*le_ev)];
7269                 u8 num_reports = *ptr;
7270
7271                 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7272                      subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7273                      subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7274                     num_reports) {
7275                         adv = (void *)(ptr + 1);
7276                         direct_adv = (void *)(ptr + 1);
7277                         ext_adv = (void *)(ptr + 1);
7278
7279                         switch (subevent) {
7280                         case HCI_EV_LE_ADVERTISING_REPORT:
7281                                 bacpy(&hdev->wake_addr, &adv->bdaddr);
7282                                 hdev->wake_addr_type = adv->bdaddr_type;
7283                                 break;
7284                         case HCI_EV_LE_DIRECT_ADV_REPORT:
7285                                 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7286                                 hdev->wake_addr_type = direct_adv->bdaddr_type;
7287                                 break;
7288                         case HCI_EV_LE_EXT_ADV_REPORT:
7289                                 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7290                                 hdev->wake_addr_type = ext_adv->bdaddr_type;
7291                                 break;
7292                         }
7293                 }
7294         } else {
7295                 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7296         }
7297
7298 unlock:
7299         hci_dev_unlock(hdev);
7300 }
7301
7302 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7303 [_op] = { \
7304         .req = false, \
7305         .func = _func, \
7306         .min_len = _min_len, \
7307         .max_len = _max_len, \
7308 }
7309
7310 #define HCI_EV(_op, _func, _len) \
7311         HCI_EV_VL(_op, _func, _len, _len)
7312
7313 #define HCI_EV_STATUS(_op, _func) \
7314         HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7315
7316 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7317 [_op] = { \
7318         .req = true, \
7319         .func_req = _func, \
7320         .min_len = _min_len, \
7321         .max_len = _max_len, \
7322 }
7323
7324 #define HCI_EV_REQ(_op, _func, _len) \
7325         HCI_EV_REQ_VL(_op, _func, _len, _len)
7326
7327 /* Entries in this table shall have their position according to the event opcode
7328  * they handle so the use of the macros above is recommend since it does attempt
7329  * to initialize at its proper index using Designated Initializers that way
7330  * events without a callback function don't have entered.
7331  */
7332 static const struct hci_ev {
7333         bool req;
7334         union {
7335                 void (*func)(struct hci_dev *hdev, void *data,
7336                              struct sk_buff *skb);
7337                 void (*func_req)(struct hci_dev *hdev, void *data,
7338                                  struct sk_buff *skb, u16 *opcode, u8 *status,
7339                                  hci_req_complete_t *req_complete,
7340                                  hci_req_complete_skb_t *req_complete_skb);
7341         };
7342         u16  min_len;
7343         u16  max_len;
7344 } hci_ev_table[U8_MAX + 1] = {
7345         /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7346         HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7347         /* [0x02 = HCI_EV_INQUIRY_RESULT] */
7348         HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7349                   sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7350         /* [0x03 = HCI_EV_CONN_COMPLETE] */
7351         HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7352                sizeof(struct hci_ev_conn_complete)),
7353         /* [0x04 = HCI_EV_CONN_REQUEST] */
7354         HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7355                sizeof(struct hci_ev_conn_request)),
7356         /* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7357         HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7358                sizeof(struct hci_ev_disconn_complete)),
7359         /* [0x06 = HCI_EV_AUTH_COMPLETE] */
7360         HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7361                sizeof(struct hci_ev_auth_complete)),
7362         /* [0x07 = HCI_EV_REMOTE_NAME] */
7363         HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7364                sizeof(struct hci_ev_remote_name)),
7365         /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7366         HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7367                sizeof(struct hci_ev_encrypt_change)),
7368         /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7369         HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7370                hci_change_link_key_complete_evt,
7371                sizeof(struct hci_ev_change_link_key_complete)),
7372         /* [0x0b = HCI_EV_REMOTE_FEATURES] */
7373         HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7374                sizeof(struct hci_ev_remote_features)),
7375         /* [0x0e = HCI_EV_CMD_COMPLETE] */
7376         HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7377                       sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7378         /* [0x0f = HCI_EV_CMD_STATUS] */
7379         HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7380                    sizeof(struct hci_ev_cmd_status)),
7381         /* [0x10 = HCI_EV_CMD_STATUS] */
7382         HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7383                sizeof(struct hci_ev_hardware_error)),
7384         /* [0x12 = HCI_EV_ROLE_CHANGE] */
7385         HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7386                sizeof(struct hci_ev_role_change)),
7387         /* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7388         HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7389                   sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7390         /* [0x14 = HCI_EV_MODE_CHANGE] */
7391         HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7392                sizeof(struct hci_ev_mode_change)),
7393         /* [0x16 = HCI_EV_PIN_CODE_REQ] */
7394         HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7395                sizeof(struct hci_ev_pin_code_req)),
7396         /* [0x17 = HCI_EV_LINK_KEY_REQ] */
7397         HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7398                sizeof(struct hci_ev_link_key_req)),
7399         /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7400         HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7401                sizeof(struct hci_ev_link_key_notify)),
7402         /* [0x1c = HCI_EV_CLOCK_OFFSET] */
7403         HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7404                sizeof(struct hci_ev_clock_offset)),
7405         /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7406         HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7407                sizeof(struct hci_ev_pkt_type_change)),
7408         /* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7409         HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7410                sizeof(struct hci_ev_pscan_rep_mode)),
7411         /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7412         HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7413                   hci_inquiry_result_with_rssi_evt,
7414                   sizeof(struct hci_ev_inquiry_result_rssi),
7415                   HCI_MAX_EVENT_SIZE),
7416         /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7417         HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7418                sizeof(struct hci_ev_remote_ext_features)),
7419         /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7420         HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7421                sizeof(struct hci_ev_sync_conn_complete)),
7422         /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7423         HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7424                   hci_extended_inquiry_result_evt,
7425                   sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7426         /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7427         HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7428                sizeof(struct hci_ev_key_refresh_complete)),
7429         /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7430         HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7431                sizeof(struct hci_ev_io_capa_request)),
7432         /* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7433         HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7434                sizeof(struct hci_ev_io_capa_reply)),
7435         /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7436         HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7437                sizeof(struct hci_ev_user_confirm_req)),
7438         /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7439         HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7440                sizeof(struct hci_ev_user_passkey_req)),
7441         /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7442         HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7443                sizeof(struct hci_ev_remote_oob_data_request)),
7444         /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7445         HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7446                sizeof(struct hci_ev_simple_pair_complete)),
7447         /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7448         HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7449                sizeof(struct hci_ev_user_passkey_notify)),
7450         /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7451         HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7452                sizeof(struct hci_ev_keypress_notify)),
7453         /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7454         HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7455                sizeof(struct hci_ev_remote_host_features)),
7456         /* [0x3e = HCI_EV_LE_META] */
7457         HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7458                       sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7459 #if IS_ENABLED(CONFIG_BT_HS)
7460         /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
7461         HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
7462                sizeof(struct hci_ev_phy_link_complete)),
7463         /* [0x41 = HCI_EV_CHANNEL_SELECTED] */
7464         HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
7465                sizeof(struct hci_ev_channel_selected)),
7466         /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
7467         HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
7468                hci_disconn_loglink_complete_evt,
7469                sizeof(struct hci_ev_disconn_logical_link_complete)),
7470         /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
7471         HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
7472                sizeof(struct hci_ev_logical_link_complete)),
7473         /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
7474         HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
7475                hci_disconn_phylink_complete_evt,
7476                sizeof(struct hci_ev_disconn_phy_link_complete)),
7477 #endif
7478         /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
7479         HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
7480                sizeof(struct hci_ev_num_comp_blocks)),
7481         /* [0xff = HCI_EV_VENDOR] */
7482         HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7483 };
7484
7485 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7486                            u16 *opcode, u8 *status,
7487                            hci_req_complete_t *req_complete,
7488                            hci_req_complete_skb_t *req_complete_skb)
7489 {
7490         const struct hci_ev *ev = &hci_ev_table[event];
7491         void *data;
7492
7493         if (!ev->func)
7494                 return;
7495
7496         if (skb->len < ev->min_len) {
7497                 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7498                            event, skb->len, ev->min_len);
7499                 return;
7500         }
7501
7502         /* Just warn if the length is over max_len size it still be
7503          * possible to partially parse the event so leave to callback to
7504          * decide if that is acceptable.
7505          */
7506         if (skb->len > ev->max_len)
7507                 bt_dev_warn_ratelimited(hdev,
7508                                         "unexpected event 0x%2.2x length: %u > %u",
7509                                         event, skb->len, ev->max_len);
7510
7511         data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7512         if (!data)
7513                 return;
7514
7515         if (ev->req)
7516                 ev->func_req(hdev, data, skb, opcode, status, req_complete,
7517                              req_complete_skb);
7518         else
7519                 ev->func(hdev, data, skb);
7520 }
7521
7522 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7523 {
7524         struct hci_event_hdr *hdr = (void *) skb->data;
7525         hci_req_complete_t req_complete = NULL;
7526         hci_req_complete_skb_t req_complete_skb = NULL;
7527         struct sk_buff *orig_skb = NULL;
7528         u8 status = 0, event, req_evt = 0;
7529         u16 opcode = HCI_OP_NOP;
7530
7531         if (skb->len < sizeof(*hdr)) {
7532                 bt_dev_err(hdev, "Malformed HCI Event");
7533                 goto done;
7534         }
7535
7536         kfree_skb(hdev->recv_event);
7537         hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7538
7539         event = hdr->evt;
7540         if (!event) {
7541                 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7542                             event);
7543                 goto done;
7544         }
7545
7546         /* Only match event if command OGF is not for LE */
7547         if (hdev->sent_cmd &&
7548             hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
7549             hci_skb_event(hdev->sent_cmd) == event) {
7550                 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
7551                                      status, &req_complete, &req_complete_skb);
7552                 req_evt = event;
7553         }
7554
7555         /* If it looks like we might end up having to call
7556          * req_complete_skb, store a pristine copy of the skb since the
7557          * various handlers may modify the original one through
7558          * skb_pull() calls, etc.
7559          */
7560         if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7561             event == HCI_EV_CMD_COMPLETE)
7562                 orig_skb = skb_clone(skb, GFP_KERNEL);
7563
7564         skb_pull(skb, HCI_EVENT_HDR_SIZE);
7565
7566         /* Store wake reason if we're suspended */
7567         hci_store_wake_reason(hdev, event, skb);
7568
7569         bt_dev_dbg(hdev, "event 0x%2.2x", event);
7570
7571         hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7572                        &req_complete_skb);
7573
7574         if (req_complete) {
7575                 req_complete(hdev, status, opcode);
7576         } else if (req_complete_skb) {
7577                 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7578                         kfree_skb(orig_skb);
7579                         orig_skb = NULL;
7580                 }
7581                 req_complete_skb(hdev, status, opcode, orig_skb);
7582         }
7583
7584 done:
7585         kfree_skb(orig_skb);
7586         kfree_skb(skb);
7587         hdev->stat.evt_rx++;
7588 }