Bluetooth: Add LE device found MGMT event
[platform/kernel/linux-starfive.git] / net / bluetooth / hci_event.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
38 #include "msft.h"
39 #include "eir.h"
40
41 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
42                  "\x00\x00\x00\x00\x00\x00\x00\x00"
43
44 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
45
46 /* Handle HCI Event packets */
47
48 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
49                              u8 ev, size_t len)
50 {
51         void *data;
52
53         data = skb_pull_data(skb, len);
54         if (!data)
55                 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
56
57         return data;
58 }
59
60 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
61                              u16 op, size_t len)
62 {
63         void *data;
64
65         data = skb_pull_data(skb, len);
66         if (!data)
67                 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
68
69         return data;
70 }
71
72 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
73                                 u8 ev, size_t len)
74 {
75         void *data;
76
77         data = skb_pull_data(skb, len);
78         if (!data)
79                 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
80
81         return data;
82 }
83
84 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
85                                 struct sk_buff *skb)
86 {
87         struct hci_ev_status *rp = data;
88
89         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
90
91         /* It is possible that we receive Inquiry Complete event right
92          * before we receive Inquiry Cancel Command Complete event, in
93          * which case the latter event should have status of Command
94          * Disallowed (0x0c). This should not be treated as error, since
95          * we actually achieve what Inquiry Cancel wants to achieve,
96          * which is to end the last Inquiry session.
97          */
98         if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
99                 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
100                 rp->status = 0x00;
101         }
102
103         if (rp->status)
104                 return rp->status;
105
106         clear_bit(HCI_INQUIRY, &hdev->flags);
107         smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
108         wake_up_bit(&hdev->flags, HCI_INQUIRY);
109
110         hci_dev_lock(hdev);
111         /* Set discovery state to stopped if we're not doing LE active
112          * scanning.
113          */
114         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
115             hdev->le_scan_type != LE_SCAN_ACTIVE)
116                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
117         hci_dev_unlock(hdev);
118
119         hci_conn_check_pending(hdev);
120
121         return rp->status;
122 }
123
124 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
125                               struct sk_buff *skb)
126 {
127         struct hci_ev_status *rp = data;
128
129         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
130
131         if (rp->status)
132                 return rp->status;
133
134         hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
135
136         return rp->status;
137 }
138
139 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
140                                    struct sk_buff *skb)
141 {
142         struct hci_ev_status *rp = data;
143
144         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
145
146         if (rp->status)
147                 return rp->status;
148
149         hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
150
151         hci_conn_check_pending(hdev);
152
153         return rp->status;
154 }
155
156 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
157                                         struct sk_buff *skb)
158 {
159         struct hci_ev_status *rp = data;
160
161         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
162
163         return rp->status;
164 }
165
166 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
167                                 struct sk_buff *skb)
168 {
169         struct hci_rp_role_discovery *rp = data;
170         struct hci_conn *conn;
171
172         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
173
174         if (rp->status)
175                 return rp->status;
176
177         hci_dev_lock(hdev);
178
179         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
180         if (conn)
181                 conn->role = rp->role;
182
183         hci_dev_unlock(hdev);
184
185         return rp->status;
186 }
187
188 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
189                                   struct sk_buff *skb)
190 {
191         struct hci_rp_read_link_policy *rp = data;
192         struct hci_conn *conn;
193
194         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
195
196         if (rp->status)
197                 return rp->status;
198
199         hci_dev_lock(hdev);
200
201         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
202         if (conn)
203                 conn->link_policy = __le16_to_cpu(rp->policy);
204
205         hci_dev_unlock(hdev);
206
207         return rp->status;
208 }
209
210 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
211                                    struct sk_buff *skb)
212 {
213         struct hci_rp_write_link_policy *rp = data;
214         struct hci_conn *conn;
215         void *sent;
216
217         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
218
219         if (rp->status)
220                 return rp->status;
221
222         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
223         if (!sent)
224                 return rp->status;
225
226         hci_dev_lock(hdev);
227
228         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
229         if (conn)
230                 conn->link_policy = get_unaligned_le16(sent + 2);
231
232         hci_dev_unlock(hdev);
233
234         return rp->status;
235 }
236
237 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
238                                       struct sk_buff *skb)
239 {
240         struct hci_rp_read_def_link_policy *rp = data;
241
242         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
243
244         if (rp->status)
245                 return rp->status;
246
247         hdev->link_policy = __le16_to_cpu(rp->policy);
248
249         return rp->status;
250 }
251
252 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
253                                        struct sk_buff *skb)
254 {
255         struct hci_ev_status *rp = data;
256         void *sent;
257
258         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
259
260         if (rp->status)
261                 return rp->status;
262
263         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
264         if (!sent)
265                 return rp->status;
266
267         hdev->link_policy = get_unaligned_le16(sent);
268
269         return rp->status;
270 }
271
272 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
273 {
274         struct hci_ev_status *rp = data;
275
276         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
277
278         clear_bit(HCI_RESET, &hdev->flags);
279
280         if (rp->status)
281                 return rp->status;
282
283         /* Reset all non-persistent flags */
284         hci_dev_clear_volatile_flags(hdev);
285
286         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
287
288         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
289         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
290
291         memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
292         hdev->adv_data_len = 0;
293
294         memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
295         hdev->scan_rsp_data_len = 0;
296
297         hdev->le_scan_type = LE_SCAN_PASSIVE;
298
299         hdev->ssp_debug_mode = 0;
300
301         hci_bdaddr_list_clear(&hdev->le_accept_list);
302         hci_bdaddr_list_clear(&hdev->le_resolv_list);
303
304         return rp->status;
305 }
306
307 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
308                                       struct sk_buff *skb)
309 {
310         struct hci_rp_read_stored_link_key *rp = data;
311         struct hci_cp_read_stored_link_key *sent;
312
313         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
314
315         sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
316         if (!sent)
317                 return rp->status;
318
319         if (!rp->status && sent->read_all == 0x01) {
320                 hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
321                 hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
322         }
323
324         return rp->status;
325 }
326
327 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
328                                         struct sk_buff *skb)
329 {
330         struct hci_rp_delete_stored_link_key *rp = data;
331         u16 num_keys;
332
333         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
334
335         if (rp->status)
336                 return rp->status;
337
338         num_keys = le16_to_cpu(rp->num_keys);
339
340         if (num_keys <= hdev->stored_num_keys)
341                 hdev->stored_num_keys -= num_keys;
342         else
343                 hdev->stored_num_keys = 0;
344
345         return rp->status;
346 }
347
348 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
349                                   struct sk_buff *skb)
350 {
351         struct hci_ev_status *rp = data;
352         void *sent;
353
354         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
355
356         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
357         if (!sent)
358                 return rp->status;
359
360         hci_dev_lock(hdev);
361
362         if (hci_dev_test_flag(hdev, HCI_MGMT))
363                 mgmt_set_local_name_complete(hdev, sent, rp->status);
364         else if (!rp->status)
365                 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
366
367         hci_dev_unlock(hdev);
368
369         return rp->status;
370 }
371
372 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
373                                  struct sk_buff *skb)
374 {
375         struct hci_rp_read_local_name *rp = data;
376
377         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
378
379         if (rp->status)
380                 return rp->status;
381
382         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
383             hci_dev_test_flag(hdev, HCI_CONFIG))
384                 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
385
386         return rp->status;
387 }
388
389 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
390                                    struct sk_buff *skb)
391 {
392         struct hci_ev_status *rp = data;
393         void *sent;
394
395         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
396
397         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
398         if (!sent)
399                 return rp->status;
400
401         hci_dev_lock(hdev);
402
403         if (!rp->status) {
404                 __u8 param = *((__u8 *) sent);
405
406                 if (param == AUTH_ENABLED)
407                         set_bit(HCI_AUTH, &hdev->flags);
408                 else
409                         clear_bit(HCI_AUTH, &hdev->flags);
410         }
411
412         if (hci_dev_test_flag(hdev, HCI_MGMT))
413                 mgmt_auth_enable_complete(hdev, rp->status);
414
415         hci_dev_unlock(hdev);
416
417         return rp->status;
418 }
419
420 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
421                                     struct sk_buff *skb)
422 {
423         struct hci_ev_status *rp = data;
424         __u8 param;
425         void *sent;
426
427         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
428
429         if (rp->status)
430                 return rp->status;
431
432         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
433         if (!sent)
434                 return rp->status;
435
436         param = *((__u8 *) sent);
437
438         if (param)
439                 set_bit(HCI_ENCRYPT, &hdev->flags);
440         else
441                 clear_bit(HCI_ENCRYPT, &hdev->flags);
442
443         return rp->status;
444 }
445
446 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
447                                    struct sk_buff *skb)
448 {
449         struct hci_ev_status *rp = data;
450         __u8 param;
451         void *sent;
452
453         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
454
455         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
456         if (!sent)
457                 return rp->status;
458
459         param = *((__u8 *) sent);
460
461         hci_dev_lock(hdev);
462
463         if (rp->status) {
464                 hdev->discov_timeout = 0;
465                 goto done;
466         }
467
468         if (param & SCAN_INQUIRY)
469                 set_bit(HCI_ISCAN, &hdev->flags);
470         else
471                 clear_bit(HCI_ISCAN, &hdev->flags);
472
473         if (param & SCAN_PAGE)
474                 set_bit(HCI_PSCAN, &hdev->flags);
475         else
476                 clear_bit(HCI_PSCAN, &hdev->flags);
477
478 done:
479         hci_dev_unlock(hdev);
480
481         return rp->status;
482 }
483
484 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
485                                   struct sk_buff *skb)
486 {
487         struct hci_ev_status *rp = data;
488         struct hci_cp_set_event_filter *cp;
489         void *sent;
490
491         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
492
493         if (rp->status)
494                 return rp->status;
495
496         sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
497         if (!sent)
498                 return rp->status;
499
500         cp = (struct hci_cp_set_event_filter *)sent;
501
502         if (cp->flt_type == HCI_FLT_CLEAR_ALL)
503                 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
504         else
505                 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
506
507         return rp->status;
508 }
509
510 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
511                                    struct sk_buff *skb)
512 {
513         struct hci_rp_read_class_of_dev *rp = data;
514
515         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
516
517         if (rp->status)
518                 return rp->status;
519
520         memcpy(hdev->dev_class, rp->dev_class, 3);
521
522         bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
523                    hdev->dev_class[1], hdev->dev_class[0]);
524
525         return rp->status;
526 }
527
528 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
529                                     struct sk_buff *skb)
530 {
531         struct hci_ev_status *rp = data;
532         void *sent;
533
534         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
535
536         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
537         if (!sent)
538                 return rp->status;
539
540         hci_dev_lock(hdev);
541
542         if (!rp->status)
543                 memcpy(hdev->dev_class, sent, 3);
544
545         if (hci_dev_test_flag(hdev, HCI_MGMT))
546                 mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
547
548         hci_dev_unlock(hdev);
549
550         return rp->status;
551 }
552
553 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
554                                     struct sk_buff *skb)
555 {
556         struct hci_rp_read_voice_setting *rp = data;
557         __u16 setting;
558
559         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
560
561         if (rp->status)
562                 return rp->status;
563
564         setting = __le16_to_cpu(rp->voice_setting);
565
566         if (hdev->voice_setting == setting)
567                 return rp->status;
568
569         hdev->voice_setting = setting;
570
571         bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
572
573         if (hdev->notify)
574                 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
575
576         return rp->status;
577 }
578
579 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
580                                      struct sk_buff *skb)
581 {
582         struct hci_ev_status *rp = data;
583         __u16 setting;
584         void *sent;
585
586         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
587
588         if (rp->status)
589                 return rp->status;
590
591         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
592         if (!sent)
593                 return rp->status;
594
595         setting = get_unaligned_le16(sent);
596
597         if (hdev->voice_setting == setting)
598                 return rp->status;
599
600         hdev->voice_setting = setting;
601
602         bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
603
604         if (hdev->notify)
605                 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
606
607         return rp->status;
608 }
609
610 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
611                                         struct sk_buff *skb)
612 {
613         struct hci_rp_read_num_supported_iac *rp = data;
614
615         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
616
617         if (rp->status)
618                 return rp->status;
619
620         hdev->num_iac = rp->num_iac;
621
622         bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
623
624         return rp->status;
625 }
626
627 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
628                                 struct sk_buff *skb)
629 {
630         struct hci_ev_status *rp = data;
631         struct hci_cp_write_ssp_mode *sent;
632
633         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
634
635         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
636         if (!sent)
637                 return rp->status;
638
639         hci_dev_lock(hdev);
640
641         if (!rp->status) {
642                 if (sent->mode)
643                         hdev->features[1][0] |= LMP_HOST_SSP;
644                 else
645                         hdev->features[1][0] &= ~LMP_HOST_SSP;
646         }
647
648         if (!rp->status) {
649                 if (sent->mode)
650                         hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
651                 else
652                         hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
653         }
654
655         hci_dev_unlock(hdev);
656
657         return rp->status;
658 }
659
660 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
661                                   struct sk_buff *skb)
662 {
663         struct hci_ev_status *rp = data;
664         struct hci_cp_write_sc_support *sent;
665
666         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
667
668         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
669         if (!sent)
670                 return rp->status;
671
672         hci_dev_lock(hdev);
673
674         if (!rp->status) {
675                 if (sent->support)
676                         hdev->features[1][0] |= LMP_HOST_SC;
677                 else
678                         hdev->features[1][0] &= ~LMP_HOST_SC;
679         }
680
681         if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
682                 if (sent->support)
683                         hci_dev_set_flag(hdev, HCI_SC_ENABLED);
684                 else
685                         hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
686         }
687
688         hci_dev_unlock(hdev);
689
690         return rp->status;
691 }
692
693 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
694                                     struct sk_buff *skb)
695 {
696         struct hci_rp_read_local_version *rp = data;
697
698         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
699
700         if (rp->status)
701                 return rp->status;
702
703         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
704             hci_dev_test_flag(hdev, HCI_CONFIG)) {
705                 hdev->hci_ver = rp->hci_ver;
706                 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
707                 hdev->lmp_ver = rp->lmp_ver;
708                 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
709                 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
710         }
711
712         return rp->status;
713 }
714
715 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
716                                    struct sk_buff *skb)
717 {
718         struct hci_rp_read_enc_key_size *rp = data;
719         struct hci_conn *conn;
720         u16 handle;
721         u8 status = rp->status;
722
723         bt_dev_dbg(hdev, "status 0x%2.2x", status);
724
725         handle = le16_to_cpu(rp->handle);
726
727         hci_dev_lock(hdev);
728
729         conn = hci_conn_hash_lookup_handle(hdev, handle);
730         if (!conn) {
731                 status = 0xFF;
732                 goto done;
733         }
734
735         /* While unexpected, the read_enc_key_size command may fail. The most
736          * secure approach is to then assume the key size is 0 to force a
737          * disconnection.
738          */
739         if (status) {
740                 bt_dev_err(hdev, "failed to read key size for handle %u",
741                            handle);
742                 conn->enc_key_size = 0;
743         } else {
744                 conn->enc_key_size = rp->key_size;
745                 status = 0;
746         }
747
748         hci_encrypt_cfm(conn, 0);
749
750 done:
751         hci_dev_unlock(hdev);
752
753         return status;
754 }
755
756 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
757                                      struct sk_buff *skb)
758 {
759         struct hci_rp_read_local_commands *rp = data;
760
761         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
762
763         if (rp->status)
764                 return rp->status;
765
766         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
767             hci_dev_test_flag(hdev, HCI_CONFIG))
768                 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
769
770         return rp->status;
771 }
772
773 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
774                                            struct sk_buff *skb)
775 {
776         struct hci_rp_read_auth_payload_to *rp = data;
777         struct hci_conn *conn;
778
779         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
780
781         if (rp->status)
782                 return rp->status;
783
784         hci_dev_lock(hdev);
785
786         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
787         if (conn)
788                 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
789
790         hci_dev_unlock(hdev);
791
792         return rp->status;
793 }
794
795 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
796                                             struct sk_buff *skb)
797 {
798         struct hci_rp_write_auth_payload_to *rp = data;
799         struct hci_conn *conn;
800         void *sent;
801
802         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
803
804         if (rp->status)
805                 return rp->status;
806
807         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
808         if (!sent)
809                 return rp->status;
810
811         hci_dev_lock(hdev);
812
813         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
814         if (conn)
815                 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
816
817         hci_dev_unlock(hdev);
818
819         return rp->status;
820 }
821
822 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
823                                      struct sk_buff *skb)
824 {
825         struct hci_rp_read_local_features *rp = data;
826
827         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
828
829         if (rp->status)
830                 return rp->status;
831
832         memcpy(hdev->features, rp->features, 8);
833
834         /* Adjust default settings according to features
835          * supported by device. */
836
837         if (hdev->features[0][0] & LMP_3SLOT)
838                 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
839
840         if (hdev->features[0][0] & LMP_5SLOT)
841                 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
842
843         if (hdev->features[0][1] & LMP_HV2) {
844                 hdev->pkt_type  |= (HCI_HV2);
845                 hdev->esco_type |= (ESCO_HV2);
846         }
847
848         if (hdev->features[0][1] & LMP_HV3) {
849                 hdev->pkt_type  |= (HCI_HV3);
850                 hdev->esco_type |= (ESCO_HV3);
851         }
852
853         if (lmp_esco_capable(hdev))
854                 hdev->esco_type |= (ESCO_EV3);
855
856         if (hdev->features[0][4] & LMP_EV4)
857                 hdev->esco_type |= (ESCO_EV4);
858
859         if (hdev->features[0][4] & LMP_EV5)
860                 hdev->esco_type |= (ESCO_EV5);
861
862         if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
863                 hdev->esco_type |= (ESCO_2EV3);
864
865         if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
866                 hdev->esco_type |= (ESCO_3EV3);
867
868         if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
869                 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
870
871         return rp->status;
872 }
873
874 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
875                                          struct sk_buff *skb)
876 {
877         struct hci_rp_read_local_ext_features *rp = data;
878
879         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
880
881         if (rp->status)
882                 return rp->status;
883
884         if (hdev->max_page < rp->max_page) {
885                 if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
886                              &hdev->quirks))
887                         bt_dev_warn(hdev, "broken local ext features page 2");
888                 else
889                         hdev->max_page = rp->max_page;
890         }
891
892         if (rp->page < HCI_MAX_PAGES)
893                 memcpy(hdev->features[rp->page], rp->features, 8);
894
895         return rp->status;
896 }
897
898 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
899                                         struct sk_buff *skb)
900 {
901         struct hci_rp_read_flow_control_mode *rp = data;
902
903         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
904
905         if (rp->status)
906                 return rp->status;
907
908         hdev->flow_ctl_mode = rp->mode;
909
910         return rp->status;
911 }
912
913 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
914                                   struct sk_buff *skb)
915 {
916         struct hci_rp_read_buffer_size *rp = data;
917
918         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
919
920         if (rp->status)
921                 return rp->status;
922
923         hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
924         hdev->sco_mtu  = rp->sco_mtu;
925         hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
926         hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
927
928         if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
929                 hdev->sco_mtu  = 64;
930                 hdev->sco_pkts = 8;
931         }
932
933         hdev->acl_cnt = hdev->acl_pkts;
934         hdev->sco_cnt = hdev->sco_pkts;
935
936         BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
937                hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
938
939         return rp->status;
940 }
941
942 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
943                               struct sk_buff *skb)
944 {
945         struct hci_rp_read_bd_addr *rp = data;
946
947         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
948
949         if (rp->status)
950                 return rp->status;
951
952         if (test_bit(HCI_INIT, &hdev->flags))
953                 bacpy(&hdev->bdaddr, &rp->bdaddr);
954
955         if (hci_dev_test_flag(hdev, HCI_SETUP))
956                 bacpy(&hdev->setup_addr, &rp->bdaddr);
957
958         return rp->status;
959 }
960
961 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
962                                          struct sk_buff *skb)
963 {
964         struct hci_rp_read_local_pairing_opts *rp = data;
965
966         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
967
968         if (rp->status)
969                 return rp->status;
970
971         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
972             hci_dev_test_flag(hdev, HCI_CONFIG)) {
973                 hdev->pairing_opts = rp->pairing_opts;
974                 hdev->max_enc_key_size = rp->max_key_size;
975         }
976
977         return rp->status;
978 }
979
980 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
981                                          struct sk_buff *skb)
982 {
983         struct hci_rp_read_page_scan_activity *rp = data;
984
985         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
986
987         if (rp->status)
988                 return rp->status;
989
990         if (test_bit(HCI_INIT, &hdev->flags)) {
991                 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
992                 hdev->page_scan_window = __le16_to_cpu(rp->window);
993         }
994
995         return rp->status;
996 }
997
998 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
999                                           struct sk_buff *skb)
1000 {
1001         struct hci_ev_status *rp = data;
1002         struct hci_cp_write_page_scan_activity *sent;
1003
1004         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1005
1006         if (rp->status)
1007                 return rp->status;
1008
1009         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1010         if (!sent)
1011                 return rp->status;
1012
1013         hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1014         hdev->page_scan_window = __le16_to_cpu(sent->window);
1015
1016         return rp->status;
1017 }
1018
1019 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1020                                      struct sk_buff *skb)
1021 {
1022         struct hci_rp_read_page_scan_type *rp = data;
1023
1024         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1025
1026         if (rp->status)
1027                 return rp->status;
1028
1029         if (test_bit(HCI_INIT, &hdev->flags))
1030                 hdev->page_scan_type = rp->type;
1031
1032         return rp->status;
1033 }
1034
1035 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1036                                       struct sk_buff *skb)
1037 {
1038         struct hci_ev_status *rp = data;
1039         u8 *type;
1040
1041         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1042
1043         if (rp->status)
1044                 return rp->status;
1045
1046         type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1047         if (type)
1048                 hdev->page_scan_type = *type;
1049
1050         return rp->status;
1051 }
1052
1053 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
1054                                       struct sk_buff *skb)
1055 {
1056         struct hci_rp_read_data_block_size *rp = data;
1057
1058         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1059
1060         if (rp->status)
1061                 return rp->status;
1062
1063         hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
1064         hdev->block_len = __le16_to_cpu(rp->block_len);
1065         hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
1066
1067         hdev->block_cnt = hdev->num_blocks;
1068
1069         BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
1070                hdev->block_cnt, hdev->block_len);
1071
1072         return rp->status;
1073 }
1074
1075 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1076                             struct sk_buff *skb)
1077 {
1078         struct hci_rp_read_clock *rp = data;
1079         struct hci_cp_read_clock *cp;
1080         struct hci_conn *conn;
1081
1082         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1083
1084         if (rp->status)
1085                 return rp->status;
1086
1087         hci_dev_lock(hdev);
1088
1089         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1090         if (!cp)
1091                 goto unlock;
1092
1093         if (cp->which == 0x00) {
1094                 hdev->clock = le32_to_cpu(rp->clock);
1095                 goto unlock;
1096         }
1097
1098         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1099         if (conn) {
1100                 conn->clock = le32_to_cpu(rp->clock);
1101                 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1102         }
1103
1104 unlock:
1105         hci_dev_unlock(hdev);
1106         return rp->status;
1107 }
1108
1109 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
1110                                      struct sk_buff *skb)
1111 {
1112         struct hci_rp_read_local_amp_info *rp = data;
1113
1114         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1115
1116         if (rp->status)
1117                 return rp->status;
1118
1119         hdev->amp_status = rp->amp_status;
1120         hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
1121         hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
1122         hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
1123         hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
1124         hdev->amp_type = rp->amp_type;
1125         hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
1126         hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
1127         hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
1128         hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
1129
1130         return rp->status;
1131 }
1132
1133 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1134                                        struct sk_buff *skb)
1135 {
1136         struct hci_rp_read_inq_rsp_tx_power *rp = data;
1137
1138         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1139
1140         if (rp->status)
1141                 return rp->status;
1142
1143         hdev->inq_tx_power = rp->tx_power;
1144
1145         return rp->status;
1146 }
1147
1148 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1149                                              struct sk_buff *skb)
1150 {
1151         struct hci_rp_read_def_err_data_reporting *rp = data;
1152
1153         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1154
1155         if (rp->status)
1156                 return rp->status;
1157
1158         hdev->err_data_reporting = rp->err_data_reporting;
1159
1160         return rp->status;
1161 }
1162
1163 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1164                                               struct sk_buff *skb)
1165 {
1166         struct hci_ev_status *rp = data;
1167         struct hci_cp_write_def_err_data_reporting *cp;
1168
1169         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1170
1171         if (rp->status)
1172                 return rp->status;
1173
1174         cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1175         if (!cp)
1176                 return rp->status;
1177
1178         hdev->err_data_reporting = cp->err_data_reporting;
1179
1180         return rp->status;
1181 }
1182
1183 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1184                                 struct sk_buff *skb)
1185 {
1186         struct hci_rp_pin_code_reply *rp = data;
1187         struct hci_cp_pin_code_reply *cp;
1188         struct hci_conn *conn;
1189
1190         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1191
1192         hci_dev_lock(hdev);
1193
1194         if (hci_dev_test_flag(hdev, HCI_MGMT))
1195                 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1196
1197         if (rp->status)
1198                 goto unlock;
1199
1200         cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1201         if (!cp)
1202                 goto unlock;
1203
1204         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1205         if (conn)
1206                 conn->pin_length = cp->pin_len;
1207
1208 unlock:
1209         hci_dev_unlock(hdev);
1210         return rp->status;
1211 }
1212
1213 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1214                                     struct sk_buff *skb)
1215 {
1216         struct hci_rp_pin_code_neg_reply *rp = data;
1217
1218         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1219
1220         hci_dev_lock(hdev);
1221
1222         if (hci_dev_test_flag(hdev, HCI_MGMT))
1223                 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1224                                                  rp->status);
1225
1226         hci_dev_unlock(hdev);
1227
1228         return rp->status;
1229 }
1230
1231 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1232                                      struct sk_buff *skb)
1233 {
1234         struct hci_rp_le_read_buffer_size *rp = data;
1235
1236         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1237
1238         if (rp->status)
1239                 return rp->status;
1240
1241         hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1242         hdev->le_pkts = rp->le_max_pkt;
1243
1244         hdev->le_cnt = hdev->le_pkts;
1245
1246         BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1247
1248         return rp->status;
1249 }
1250
1251 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1252                                         struct sk_buff *skb)
1253 {
1254         struct hci_rp_le_read_local_features *rp = data;
1255
1256         BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1257
1258         if (rp->status)
1259                 return rp->status;
1260
1261         memcpy(hdev->le_features, rp->features, 8);
1262
1263         return rp->status;
1264 }
1265
1266 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1267                                       struct sk_buff *skb)
1268 {
1269         struct hci_rp_le_read_adv_tx_power *rp = data;
1270
1271         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1272
1273         if (rp->status)
1274                 return rp->status;
1275
1276         hdev->adv_tx_power = rp->tx_power;
1277
1278         return rp->status;
1279 }
1280
1281 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1282                                     struct sk_buff *skb)
1283 {
1284         struct hci_rp_user_confirm_reply *rp = data;
1285
1286         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1287
1288         hci_dev_lock(hdev);
1289
1290         if (hci_dev_test_flag(hdev, HCI_MGMT))
1291                 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1292                                                  rp->status);
1293
1294         hci_dev_unlock(hdev);
1295
1296         return rp->status;
1297 }
1298
1299 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1300                                         struct sk_buff *skb)
1301 {
1302         struct hci_rp_user_confirm_reply *rp = data;
1303
1304         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1305
1306         hci_dev_lock(hdev);
1307
1308         if (hci_dev_test_flag(hdev, HCI_MGMT))
1309                 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1310                                                      ACL_LINK, 0, rp->status);
1311
1312         hci_dev_unlock(hdev);
1313
1314         return rp->status;
1315 }
1316
1317 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1318                                     struct sk_buff *skb)
1319 {
1320         struct hci_rp_user_confirm_reply *rp = data;
1321
1322         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1323
1324         hci_dev_lock(hdev);
1325
1326         if (hci_dev_test_flag(hdev, HCI_MGMT))
1327                 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1328                                                  0, rp->status);
1329
1330         hci_dev_unlock(hdev);
1331
1332         return rp->status;
1333 }
1334
1335 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1336                                         struct sk_buff *skb)
1337 {
1338         struct hci_rp_user_confirm_reply *rp = data;
1339
1340         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1341
1342         hci_dev_lock(hdev);
1343
1344         if (hci_dev_test_flag(hdev, HCI_MGMT))
1345                 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1346                                                      ACL_LINK, 0, rp->status);
1347
1348         hci_dev_unlock(hdev);
1349
1350         return rp->status;
1351 }
1352
1353 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1354                                      struct sk_buff *skb)
1355 {
1356         struct hci_rp_read_local_oob_data *rp = data;
1357
1358         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1359
1360         return rp->status;
1361 }
1362
1363 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1364                                          struct sk_buff *skb)
1365 {
1366         struct hci_rp_read_local_oob_ext_data *rp = data;
1367
1368         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1369
1370         return rp->status;
1371 }
1372
1373 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1374                                     struct sk_buff *skb)
1375 {
1376         struct hci_ev_status *rp = data;
1377         bdaddr_t *sent;
1378
1379         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1380
1381         if (rp->status)
1382                 return rp->status;
1383
1384         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1385         if (!sent)
1386                 return rp->status;
1387
1388         hci_dev_lock(hdev);
1389
1390         bacpy(&hdev->random_addr, sent);
1391
1392         if (!bacmp(&hdev->rpa, sent)) {
1393                 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1394                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1395                                    secs_to_jiffies(hdev->rpa_timeout));
1396         }
1397
1398         hci_dev_unlock(hdev);
1399
1400         return rp->status;
1401 }
1402
1403 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1404                                     struct sk_buff *skb)
1405 {
1406         struct hci_ev_status *rp = data;
1407         struct hci_cp_le_set_default_phy *cp;
1408
1409         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1410
1411         if (rp->status)
1412                 return rp->status;
1413
1414         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1415         if (!cp)
1416                 return rp->status;
1417
1418         hci_dev_lock(hdev);
1419
1420         hdev->le_tx_def_phys = cp->tx_phys;
1421         hdev->le_rx_def_phys = cp->rx_phys;
1422
1423         hci_dev_unlock(hdev);
1424
1425         return rp->status;
1426 }
1427
1428 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1429                                             struct sk_buff *skb)
1430 {
1431         struct hci_ev_status *rp = data;
1432         struct hci_cp_le_set_adv_set_rand_addr *cp;
1433         struct adv_info *adv;
1434
1435         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1436
1437         if (rp->status)
1438                 return rp->status;
1439
1440         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1441         /* Update only in case the adv instance since handle 0x00 shall be using
1442          * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1443          * non-extended adverting.
1444          */
1445         if (!cp || !cp->handle)
1446                 return rp->status;
1447
1448         hci_dev_lock(hdev);
1449
1450         adv = hci_find_adv_instance(hdev, cp->handle);
1451         if (adv) {
1452                 bacpy(&adv->random_addr, &cp->bdaddr);
1453                 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1454                         adv->rpa_expired = false;
1455                         queue_delayed_work(hdev->workqueue,
1456                                            &adv->rpa_expired_cb,
1457                                            secs_to_jiffies(hdev->rpa_timeout));
1458                 }
1459         }
1460
1461         hci_dev_unlock(hdev);
1462
1463         return rp->status;
1464 }
1465
1466 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1467                                    struct sk_buff *skb)
1468 {
1469         struct hci_ev_status *rp = data;
1470         u8 *instance;
1471         int err;
1472
1473         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1474
1475         if (rp->status)
1476                 return rp->status;
1477
1478         instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1479         if (!instance)
1480                 return rp->status;
1481
1482         hci_dev_lock(hdev);
1483
1484         err = hci_remove_adv_instance(hdev, *instance);
1485         if (!err)
1486                 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1487                                          *instance);
1488
1489         hci_dev_unlock(hdev);
1490
1491         return rp->status;
1492 }
1493
1494 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1495                                    struct sk_buff *skb)
1496 {
1497         struct hci_ev_status *rp = data;
1498         struct adv_info *adv, *n;
1499         int err;
1500
1501         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1502
1503         if (rp->status)
1504                 return rp->status;
1505
1506         if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1507                 return rp->status;
1508
1509         hci_dev_lock(hdev);
1510
1511         list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1512                 u8 instance = adv->instance;
1513
1514                 err = hci_remove_adv_instance(hdev, instance);
1515                 if (!err)
1516                         mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1517                                                  hdev, instance);
1518         }
1519
1520         hci_dev_unlock(hdev);
1521
1522         return rp->status;
1523 }
1524
1525 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1526                                         struct sk_buff *skb)
1527 {
1528         struct hci_rp_le_read_transmit_power *rp = data;
1529
1530         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1531
1532         if (rp->status)
1533                 return rp->status;
1534
1535         hdev->min_le_tx_power = rp->min_le_tx_power;
1536         hdev->max_le_tx_power = rp->max_le_tx_power;
1537
1538         return rp->status;
1539 }
1540
1541 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1542                                      struct sk_buff *skb)
1543 {
1544         struct hci_ev_status *rp = data;
1545         struct hci_cp_le_set_privacy_mode *cp;
1546         struct hci_conn_params *params;
1547
1548         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1549
1550         if (rp->status)
1551                 return rp->status;
1552
1553         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1554         if (!cp)
1555                 return rp->status;
1556
1557         hci_dev_lock(hdev);
1558
1559         params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1560         if (params)
1561                 params->privacy_mode = cp->mode;
1562
1563         hci_dev_unlock(hdev);
1564
1565         return rp->status;
1566 }
1567
1568 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1569                                    struct sk_buff *skb)
1570 {
1571         struct hci_ev_status *rp = data;
1572         __u8 *sent;
1573
1574         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1575
1576         if (rp->status)
1577                 return rp->status;
1578
1579         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1580         if (!sent)
1581                 return rp->status;
1582
1583         hci_dev_lock(hdev);
1584
1585         /* If we're doing connection initiation as peripheral. Set a
1586          * timeout in case something goes wrong.
1587          */
1588         if (*sent) {
1589                 struct hci_conn *conn;
1590
1591                 hci_dev_set_flag(hdev, HCI_LE_ADV);
1592
1593                 conn = hci_lookup_le_connect(hdev);
1594                 if (conn)
1595                         queue_delayed_work(hdev->workqueue,
1596                                            &conn->le_conn_timeout,
1597                                            conn->conn_timeout);
1598         } else {
1599                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1600         }
1601
1602         hci_dev_unlock(hdev);
1603
1604         return rp->status;
1605 }
1606
1607 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1608                                        struct sk_buff *skb)
1609 {
1610         struct hci_cp_le_set_ext_adv_enable *cp;
1611         struct hci_cp_ext_adv_set *set;
1612         struct adv_info *adv = NULL, *n;
1613         struct hci_ev_status *rp = data;
1614
1615         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1616
1617         if (rp->status)
1618                 return rp->status;
1619
1620         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1621         if (!cp)
1622                 return rp->status;
1623
1624         set = (void *)cp->data;
1625
1626         hci_dev_lock(hdev);
1627
1628         if (cp->num_of_sets)
1629                 adv = hci_find_adv_instance(hdev, set->handle);
1630
1631         if (cp->enable) {
1632                 struct hci_conn *conn;
1633
1634                 hci_dev_set_flag(hdev, HCI_LE_ADV);
1635
1636                 if (adv)
1637                         adv->enabled = true;
1638
1639                 conn = hci_lookup_le_connect(hdev);
1640                 if (conn)
1641                         queue_delayed_work(hdev->workqueue,
1642                                            &conn->le_conn_timeout,
1643                                            conn->conn_timeout);
1644         } else {
1645                 if (cp->num_of_sets) {
1646                         if (adv)
1647                                 adv->enabled = false;
1648
1649                         /* If just one instance was disabled check if there are
1650                          * any other instance enabled before clearing HCI_LE_ADV
1651                          */
1652                         list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1653                                                  list) {
1654                                 if (adv->enabled)
1655                                         goto unlock;
1656                         }
1657                 } else {
1658                         /* All instances shall be considered disabled */
1659                         list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1660                                                  list)
1661                                 adv->enabled = false;
1662                 }
1663
1664                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1665         }
1666
1667 unlock:
1668         hci_dev_unlock(hdev);
1669         return rp->status;
1670 }
1671
1672 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1673                                    struct sk_buff *skb)
1674 {
1675         struct hci_cp_le_set_scan_param *cp;
1676         struct hci_ev_status *rp = data;
1677
1678         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1679
1680         if (rp->status)
1681                 return rp->status;
1682
1683         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1684         if (!cp)
1685                 return rp->status;
1686
1687         hci_dev_lock(hdev);
1688
1689         hdev->le_scan_type = cp->type;
1690
1691         hci_dev_unlock(hdev);
1692
1693         return rp->status;
1694 }
1695
1696 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1697                                        struct sk_buff *skb)
1698 {
1699         struct hci_cp_le_set_ext_scan_params *cp;
1700         struct hci_ev_status *rp = data;
1701         struct hci_cp_le_scan_phy_params *phy_param;
1702
1703         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1704
1705         if (rp->status)
1706                 return rp->status;
1707
1708         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1709         if (!cp)
1710                 return rp->status;
1711
1712         phy_param = (void *)cp->data;
1713
1714         hci_dev_lock(hdev);
1715
1716         hdev->le_scan_type = phy_param->type;
1717
1718         hci_dev_unlock(hdev);
1719
1720         return rp->status;
1721 }
1722
1723 static bool has_pending_adv_report(struct hci_dev *hdev)
1724 {
1725         struct discovery_state *d = &hdev->discovery;
1726
1727         return bacmp(&d->last_adv_addr, BDADDR_ANY);
1728 }
1729
1730 static void clear_pending_adv_report(struct hci_dev *hdev)
1731 {
1732         struct discovery_state *d = &hdev->discovery;
1733
1734         bacpy(&d->last_adv_addr, BDADDR_ANY);
1735         d->last_adv_data_len = 0;
1736 }
1737
1738 #ifndef TIZEN_BT
1739 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1740                                      u8 bdaddr_type, s8 rssi, u32 flags,
1741                                      u8 *data, u8 len)
1742 {
1743         struct discovery_state *d = &hdev->discovery;
1744
1745         if (len > HCI_MAX_AD_LENGTH)
1746                 return;
1747
1748         bacpy(&d->last_adv_addr, bdaddr);
1749         d->last_adv_addr_type = bdaddr_type;
1750         d->last_adv_rssi = rssi;
1751         d->last_adv_flags = flags;
1752         memcpy(d->last_adv_data, data, len);
1753         d->last_adv_data_len = len;
1754 }
1755 #endif
1756
1757 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1758 {
1759         hci_dev_lock(hdev);
1760
1761         switch (enable) {
1762         case LE_SCAN_ENABLE:
1763                 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1764                 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1765                         clear_pending_adv_report(hdev);
1766                 if (hci_dev_test_flag(hdev, HCI_MESH))
1767                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1768                 break;
1769
1770         case LE_SCAN_DISABLE:
1771                 /* We do this here instead of when setting DISCOVERY_STOPPED
1772                  * since the latter would potentially require waiting for
1773                  * inquiry to stop too.
1774                  */
1775                 if (has_pending_adv_report(hdev)) {
1776                         struct discovery_state *d = &hdev->discovery;
1777
1778                         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1779                                           d->last_adv_addr_type, NULL,
1780                                           d->last_adv_rssi, d->last_adv_flags,
1781                                           d->last_adv_data,
1782                                           d->last_adv_data_len, NULL, 0, 0);
1783                 }
1784
1785                 /* Cancel this timer so that we don't try to disable scanning
1786                  * when it's already disabled.
1787                  */
1788                 cancel_delayed_work(&hdev->le_scan_disable);
1789
1790                 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1791
1792                 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1793                  * interrupted scanning due to a connect request. Mark
1794                  * therefore discovery as stopped.
1795                  */
1796                 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1797 #ifndef TIZEN_BT /* The below line is kernel bug. */
1798                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1799 #else
1800                         hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
1801 #endif
1802                 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1803                          hdev->discovery.state == DISCOVERY_FINDING)
1804                         queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1805
1806                 break;
1807
1808         default:
1809                 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1810                            enable);
1811                 break;
1812         }
1813
1814         hci_dev_unlock(hdev);
1815 }
1816
1817 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1818                                     struct sk_buff *skb)
1819 {
1820         struct hci_cp_le_set_scan_enable *cp;
1821         struct hci_ev_status *rp = data;
1822
1823         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1824
1825         if (rp->status)
1826                 return rp->status;
1827
1828         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1829         if (!cp)
1830                 return rp->status;
1831
1832         le_set_scan_enable_complete(hdev, cp->enable);
1833
1834         return rp->status;
1835 }
1836
1837 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1838                                         struct sk_buff *skb)
1839 {
1840         struct hci_cp_le_set_ext_scan_enable *cp;
1841         struct hci_ev_status *rp = data;
1842
1843         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1844
1845         if (rp->status)
1846                 return rp->status;
1847
1848         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1849         if (!cp)
1850                 return rp->status;
1851
1852         le_set_scan_enable_complete(hdev, cp->enable);
1853
1854         return rp->status;
1855 }
1856
1857 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1858                                       struct sk_buff *skb)
1859 {
1860         struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1861
1862         bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1863                    rp->num_of_sets);
1864
1865         if (rp->status)
1866                 return rp->status;
1867
1868         hdev->le_num_of_adv_sets = rp->num_of_sets;
1869
1870         return rp->status;
1871 }
1872
1873 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1874                                           struct sk_buff *skb)
1875 {
1876         struct hci_rp_le_read_accept_list_size *rp = data;
1877
1878         bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1879
1880         if (rp->status)
1881                 return rp->status;
1882
1883         hdev->le_accept_list_size = rp->size;
1884
1885         return rp->status;
1886 }
1887
1888 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1889                                       struct sk_buff *skb)
1890 {
1891         struct hci_ev_status *rp = data;
1892
1893         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1894
1895         if (rp->status)
1896                 return rp->status;
1897
1898         hci_dev_lock(hdev);
1899         hci_bdaddr_list_clear(&hdev->le_accept_list);
1900         hci_dev_unlock(hdev);
1901
1902         return rp->status;
1903 }
1904
1905 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1906                                        struct sk_buff *skb)
1907 {
1908         struct hci_cp_le_add_to_accept_list *sent;
1909         struct hci_ev_status *rp = data;
1910
1911         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1912
1913         if (rp->status)
1914                 return rp->status;
1915
1916         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1917         if (!sent)
1918                 return rp->status;
1919
1920         hci_dev_lock(hdev);
1921         hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1922                             sent->bdaddr_type);
1923         hci_dev_unlock(hdev);
1924
1925         return rp->status;
1926 }
1927
1928 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1929                                          struct sk_buff *skb)
1930 {
1931         struct hci_cp_le_del_from_accept_list *sent;
1932         struct hci_ev_status *rp = data;
1933
1934         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1935
1936         if (rp->status)
1937                 return rp->status;
1938
1939         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1940         if (!sent)
1941                 return rp->status;
1942
1943         hci_dev_lock(hdev);
1944         hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1945                             sent->bdaddr_type);
1946         hci_dev_unlock(hdev);
1947
1948         return rp->status;
1949 }
1950
1951 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1952                                           struct sk_buff *skb)
1953 {
1954         struct hci_rp_le_read_supported_states *rp = data;
1955
1956         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1957
1958         if (rp->status)
1959                 return rp->status;
1960
1961         memcpy(hdev->le_states, rp->le_states, 8);
1962
1963         return rp->status;
1964 }
1965
1966 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1967                                       struct sk_buff *skb)
1968 {
1969         struct hci_rp_le_read_def_data_len *rp = data;
1970
1971         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1972
1973         if (rp->status)
1974                 return rp->status;
1975
1976         hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1977         hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1978
1979         return rp->status;
1980 }
1981
1982 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
1983                                        struct sk_buff *skb)
1984 {
1985         struct hci_cp_le_write_def_data_len *sent;
1986         struct hci_ev_status *rp = data;
1987
1988         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1989
1990         if (rp->status)
1991                 return rp->status;
1992
1993         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1994         if (!sent)
1995                 return rp->status;
1996
1997         hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1998         hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1999
2000         return rp->status;
2001 }
2002
2003 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
2004                                        struct sk_buff *skb)
2005 {
2006         struct hci_cp_le_add_to_resolv_list *sent;
2007         struct hci_ev_status *rp = data;
2008
2009         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2010
2011         if (rp->status)
2012                 return rp->status;
2013
2014         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
2015         if (!sent)
2016                 return rp->status;
2017
2018         hci_dev_lock(hdev);
2019         hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2020                                 sent->bdaddr_type, sent->peer_irk,
2021                                 sent->local_irk);
2022         hci_dev_unlock(hdev);
2023
2024         return rp->status;
2025 }
2026
2027 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2028                                          struct sk_buff *skb)
2029 {
2030         struct hci_cp_le_del_from_resolv_list *sent;
2031         struct hci_ev_status *rp = data;
2032
2033         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2034
2035         if (rp->status)
2036                 return rp->status;
2037
2038         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2039         if (!sent)
2040                 return rp->status;
2041
2042         hci_dev_lock(hdev);
2043         hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2044                             sent->bdaddr_type);
2045         hci_dev_unlock(hdev);
2046
2047         return rp->status;
2048 }
2049
2050 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2051                                       struct sk_buff *skb)
2052 {
2053         struct hci_ev_status *rp = data;
2054
2055         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2056
2057         if (rp->status)
2058                 return rp->status;
2059
2060         hci_dev_lock(hdev);
2061         hci_bdaddr_list_clear(&hdev->le_resolv_list);
2062         hci_dev_unlock(hdev);
2063
2064         return rp->status;
2065 }
2066
2067 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2068                                           struct sk_buff *skb)
2069 {
2070         struct hci_rp_le_read_resolv_list_size *rp = data;
2071
2072         bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2073
2074         if (rp->status)
2075                 return rp->status;
2076
2077         hdev->le_resolv_list_size = rp->size;
2078
2079         return rp->status;
2080 }
2081
2082 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2083                                                struct sk_buff *skb)
2084 {
2085         struct hci_ev_status *rp = data;
2086         __u8 *sent;
2087
2088         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2089
2090         if (rp->status)
2091                 return rp->status;
2092
2093         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2094         if (!sent)
2095                 return rp->status;
2096
2097         hci_dev_lock(hdev);
2098
2099         if (*sent)
2100                 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2101         else
2102                 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2103
2104         hci_dev_unlock(hdev);
2105
2106         return rp->status;
2107 }
2108
2109 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2110                                       struct sk_buff *skb)
2111 {
2112         struct hci_rp_le_read_max_data_len *rp = data;
2113
2114         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2115
2116         if (rp->status)
2117                 return rp->status;
2118
2119         hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2120         hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2121         hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2122         hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2123
2124         return rp->status;
2125 }
2126
2127 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2128                                          struct sk_buff *skb)
2129 {
2130         struct hci_cp_write_le_host_supported *sent;
2131         struct hci_ev_status *rp = data;
2132
2133         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2134
2135         if (rp->status)
2136                 return rp->status;
2137
2138         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2139         if (!sent)
2140                 return rp->status;
2141
2142         hci_dev_lock(hdev);
2143
2144         if (sent->le) {
2145                 hdev->features[1][0] |= LMP_HOST_LE;
2146                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2147         } else {
2148                 hdev->features[1][0] &= ~LMP_HOST_LE;
2149                 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2150                 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2151         }
2152
2153         if (sent->simul)
2154                 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2155         else
2156                 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2157
2158         hci_dev_unlock(hdev);
2159
2160         return rp->status;
2161 }
2162
2163 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2164                                struct sk_buff *skb)
2165 {
2166         struct hci_cp_le_set_adv_param *cp;
2167         struct hci_ev_status *rp = data;
2168
2169         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2170
2171         if (rp->status)
2172                 return rp->status;
2173
2174         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2175         if (!cp)
2176                 return rp->status;
2177
2178         hci_dev_lock(hdev);
2179         hdev->adv_addr_type = cp->own_address_type;
2180         hci_dev_unlock(hdev);
2181
2182         return rp->status;
2183 }
2184
2185 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2186                                    struct sk_buff *skb)
2187 {
2188         struct hci_rp_le_set_ext_adv_params *rp = data;
2189         struct hci_cp_le_set_ext_adv_params *cp;
2190         struct adv_info *adv_instance;
2191
2192         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2193
2194         if (rp->status)
2195                 return rp->status;
2196
2197         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2198         if (!cp)
2199                 return rp->status;
2200
2201         hci_dev_lock(hdev);
2202         hdev->adv_addr_type = cp->own_addr_type;
2203         if (!cp->handle) {
2204                 /* Store in hdev for instance 0 */
2205                 hdev->adv_tx_power = rp->tx_power;
2206         } else {
2207                 adv_instance = hci_find_adv_instance(hdev, cp->handle);
2208                 if (adv_instance)
2209                         adv_instance->tx_power = rp->tx_power;
2210         }
2211         /* Update adv data as tx power is known now */
2212         hci_update_adv_data(hdev, cp->handle);
2213
2214         hci_dev_unlock(hdev);
2215
2216         return rp->status;
2217 }
2218
2219 #ifdef TIZEN_BT
2220 static u8 hci_cc_enable_rssi(struct hci_dev *hdev, void *data,
2221                              struct sk_buff *skb)
2222 {
2223         struct hci_cc_rsp_enable_rssi *rp = data;
2224
2225         BT_DBG("hci_cc_enable_rssi - %s status 0x%2.2x Event_LE_ext_Opcode 0x%2.2x",
2226                hdev->name, rp->status, rp->le_ext_opcode);
2227
2228         mgmt_enable_rssi_cc(hdev, rp, rp->status);
2229
2230         return rp->status;
2231 }
2232
2233 static u8 hci_cc_get_raw_rssi(struct hci_dev *hdev, void *data,
2234                               struct sk_buff *skb)
2235 {
2236         struct hci_cc_rp_get_raw_rssi *rp = data;
2237
2238         BT_DBG("hci_cc_get_raw_rssi- %s Get Raw Rssi Response[%2.2x %4.4x %2.2X]",
2239                hdev->name, rp->status, rp->conn_handle, rp->rssi_dbm);
2240
2241         mgmt_raw_rssi_response(hdev, rp, rp->status);
2242
2243         return rp->status;
2244 }
2245
2246 static void hci_vendor_ext_rssi_link_alert_evt(struct hci_dev *hdev,
2247                                                struct sk_buff *skb)
2248 {
2249         struct hci_ev_vendor_specific_rssi_alert *ev = (void *)skb->data;
2250
2251         BT_DBG("RSSI event LE_RSSI_LINK_ALERT %X", LE_RSSI_LINK_ALERT);
2252
2253         mgmt_rssi_alert_evt(hdev, ev->conn_handle, ev->alert_type,
2254                             ev->rssi_dbm);
2255 }
2256
2257 static void hci_vendor_specific_group_ext_evt(struct hci_dev *hdev,
2258                                               struct sk_buff *skb)
2259 {
2260         struct hci_ev_ext_vendor_specific *ev = (void *)skb->data;
2261         __u8 event_le_ext_sub_code;
2262
2263         BT_DBG("RSSI event LE_META_VENDOR_SPECIFIC_GROUP_EVENT: %X",
2264                LE_META_VENDOR_SPECIFIC_GROUP_EVENT);
2265
2266         skb_pull(skb, sizeof(*ev));
2267         event_le_ext_sub_code = ev->event_le_ext_sub_code;
2268
2269         switch (event_le_ext_sub_code) {
2270         case LE_RSSI_LINK_ALERT:
2271                 hci_vendor_ext_rssi_link_alert_evt(hdev, skb);
2272                 break;
2273
2274         default:
2275                 break;
2276         }
2277 }
2278
2279 static void hci_vendor_specific_evt(struct hci_dev *hdev, void *data,
2280                                     struct sk_buff *skb)
2281 {
2282         struct hci_ev_vendor_specific *ev = (void *)skb->data;
2283         __u8 event_sub_code;
2284
2285         BT_DBG("hci_vendor_specific_evt");
2286
2287         skb_pull(skb, sizeof(*ev));
2288         event_sub_code = ev->event_sub_code;
2289
2290         switch (event_sub_code) {
2291         case LE_META_VENDOR_SPECIFIC_GROUP_EVENT:
2292                 hci_vendor_specific_group_ext_evt(hdev, skb);
2293                 break;
2294
2295         default:
2296                 break;
2297         }
2298 }
2299 #endif
2300
2301 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2302                            struct sk_buff *skb)
2303 {
2304         struct hci_rp_read_rssi *rp = data;
2305         struct hci_conn *conn;
2306
2307         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2308
2309         if (rp->status)
2310                 return rp->status;
2311
2312         hci_dev_lock(hdev);
2313
2314         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2315         if (conn)
2316                 conn->rssi = rp->rssi;
2317
2318         hci_dev_unlock(hdev);
2319
2320         return rp->status;
2321 }
2322
2323 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2324                                struct sk_buff *skb)
2325 {
2326         struct hci_cp_read_tx_power *sent;
2327         struct hci_rp_read_tx_power *rp = data;
2328         struct hci_conn *conn;
2329
2330         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2331
2332         if (rp->status)
2333                 return rp->status;
2334
2335         sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2336         if (!sent)
2337                 return rp->status;
2338
2339         hci_dev_lock(hdev);
2340
2341         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2342         if (!conn)
2343                 goto unlock;
2344
2345         switch (sent->type) {
2346         case 0x00:
2347                 conn->tx_power = rp->tx_power;
2348                 break;
2349         case 0x01:
2350                 conn->max_tx_power = rp->tx_power;
2351                 break;
2352         }
2353
2354 unlock:
2355         hci_dev_unlock(hdev);
2356         return rp->status;
2357 }
2358
2359 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2360                                       struct sk_buff *skb)
2361 {
2362         struct hci_ev_status *rp = data;
2363         u8 *mode;
2364
2365         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2366
2367         if (rp->status)
2368                 return rp->status;
2369
2370         mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2371         if (mode)
2372                 hdev->ssp_debug_mode = *mode;
2373
2374         return rp->status;
2375 }
2376
2377 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2378 {
2379         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2380
2381         if (status) {
2382                 hci_conn_check_pending(hdev);
2383                 return;
2384         }
2385
2386         set_bit(HCI_INQUIRY, &hdev->flags);
2387 }
2388
2389 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2390 {
2391         struct hci_cp_create_conn *cp;
2392         struct hci_conn *conn;
2393
2394         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2395
2396         cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2397         if (!cp)
2398                 return;
2399
2400         hci_dev_lock(hdev);
2401
2402         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2403
2404         bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2405
2406         if (status) {
2407                 if (conn && conn->state == BT_CONNECT) {
2408                         if (status != 0x0c || conn->attempt > 2) {
2409                                 conn->state = BT_CLOSED;
2410                                 hci_connect_cfm(conn, status);
2411                                 hci_conn_del(conn);
2412                         } else
2413                                 conn->state = BT_CONNECT2;
2414                 }
2415         } else {
2416                 if (!conn) {
2417                         conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
2418                                             HCI_ROLE_MASTER);
2419                         if (!conn)
2420                                 bt_dev_err(hdev, "no memory for new connection");
2421                 }
2422         }
2423
2424         hci_dev_unlock(hdev);
2425 }
2426
2427 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2428 {
2429         struct hci_cp_add_sco *cp;
2430         struct hci_conn *acl, *sco;
2431         __u16 handle;
2432
2433         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2434
2435         if (!status)
2436                 return;
2437
2438         cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2439         if (!cp)
2440                 return;
2441
2442         handle = __le16_to_cpu(cp->handle);
2443
2444         bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2445
2446         hci_dev_lock(hdev);
2447
2448         acl = hci_conn_hash_lookup_handle(hdev, handle);
2449         if (acl) {
2450                 sco = acl->link;
2451                 if (sco) {
2452                         sco->state = BT_CLOSED;
2453
2454                         hci_connect_cfm(sco, status);
2455                         hci_conn_del(sco);
2456                 }
2457         }
2458
2459         hci_dev_unlock(hdev);
2460 }
2461
2462 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2463 {
2464         struct hci_cp_auth_requested *cp;
2465         struct hci_conn *conn;
2466
2467         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2468
2469         if (!status)
2470                 return;
2471
2472         cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2473         if (!cp)
2474                 return;
2475
2476         hci_dev_lock(hdev);
2477
2478         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2479         if (conn) {
2480                 if (conn->state == BT_CONFIG) {
2481                         hci_connect_cfm(conn, status);
2482                         hci_conn_drop(conn);
2483                 }
2484         }
2485
2486         hci_dev_unlock(hdev);
2487 }
2488
2489 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2490 {
2491         struct hci_cp_set_conn_encrypt *cp;
2492         struct hci_conn *conn;
2493
2494         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2495
2496         if (!status)
2497                 return;
2498
2499         cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2500         if (!cp)
2501                 return;
2502
2503         hci_dev_lock(hdev);
2504
2505         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2506         if (conn) {
2507                 if (conn->state == BT_CONFIG) {
2508                         hci_connect_cfm(conn, status);
2509                         hci_conn_drop(conn);
2510                 }
2511         }
2512
2513         hci_dev_unlock(hdev);
2514 }
2515
2516 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2517                                     struct hci_conn *conn)
2518 {
2519         if (conn->state != BT_CONFIG || !conn->out)
2520                 return 0;
2521
2522         if (conn->pending_sec_level == BT_SECURITY_SDP)
2523                 return 0;
2524
2525         /* Only request authentication for SSP connections or non-SSP
2526          * devices with sec_level MEDIUM or HIGH or if MITM protection
2527          * is requested.
2528          */
2529         if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2530             conn->pending_sec_level != BT_SECURITY_FIPS &&
2531             conn->pending_sec_level != BT_SECURITY_HIGH &&
2532             conn->pending_sec_level != BT_SECURITY_MEDIUM)
2533                 return 0;
2534
2535         return 1;
2536 }
2537
2538 static int hci_resolve_name(struct hci_dev *hdev,
2539                                    struct inquiry_entry *e)
2540 {
2541         struct hci_cp_remote_name_req cp;
2542
2543         memset(&cp, 0, sizeof(cp));
2544
2545         bacpy(&cp.bdaddr, &e->data.bdaddr);
2546         cp.pscan_rep_mode = e->data.pscan_rep_mode;
2547         cp.pscan_mode = e->data.pscan_mode;
2548         cp.clock_offset = e->data.clock_offset;
2549
2550         return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2551 }
2552
2553 static bool hci_resolve_next_name(struct hci_dev *hdev)
2554 {
2555         struct discovery_state *discov = &hdev->discovery;
2556         struct inquiry_entry *e;
2557
2558         if (list_empty(&discov->resolve))
2559                 return false;
2560
2561         /* We should stop if we already spent too much time resolving names. */
2562         if (time_after(jiffies, discov->name_resolve_timeout)) {
2563                 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2564                 return false;
2565         }
2566
2567         e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2568         if (!e)
2569                 return false;
2570
2571         if (hci_resolve_name(hdev, e) == 0) {
2572                 e->name_state = NAME_PENDING;
2573                 return true;
2574         }
2575
2576         return false;
2577 }
2578
2579 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2580                                    bdaddr_t *bdaddr, u8 *name, u8 name_len)
2581 {
2582         struct discovery_state *discov = &hdev->discovery;
2583         struct inquiry_entry *e;
2584
2585 #ifdef TIZEN_BT
2586         /* Update the mgmt connected state if necessary. Be careful with
2587          * conn objects that exist but are not (yet) connected however.
2588          * Only those in BT_CONFIG or BT_CONNECTED states can be
2589          * considered connected.
2590          */
2591         if (conn &&
2592             (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) {
2593                 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2594                         mgmt_device_connected(hdev, conn, 0, name, name_len);
2595                 else
2596                         mgmt_device_name_update(hdev, bdaddr, name, name_len);
2597         }
2598 #else
2599         if (conn &&
2600             (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2601             !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2602                 mgmt_device_connected(hdev, conn, name, name_len);
2603 #endif
2604
2605         if (discov->state == DISCOVERY_STOPPED)
2606                 return;
2607
2608         if (discov->state == DISCOVERY_STOPPING)
2609                 goto discov_complete;
2610
2611         if (discov->state != DISCOVERY_RESOLVING)
2612                 return;
2613
2614         e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2615         /* If the device was not found in a list of found devices names of which
2616          * are pending. there is no need to continue resolving a next name as it
2617          * will be done upon receiving another Remote Name Request Complete
2618          * Event */
2619         if (!e)
2620                 return;
2621
2622         list_del(&e->list);
2623
2624         e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2625         mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2626                          name, name_len);
2627
2628         if (hci_resolve_next_name(hdev))
2629                 return;
2630
2631 discov_complete:
2632         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2633 }
2634
2635 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2636 {
2637         struct hci_cp_remote_name_req *cp;
2638         struct hci_conn *conn;
2639
2640         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2641
2642         /* If successful wait for the name req complete event before
2643          * checking for the need to do authentication */
2644         if (!status)
2645                 return;
2646
2647         cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2648         if (!cp)
2649                 return;
2650
2651         hci_dev_lock(hdev);
2652
2653         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2654
2655         if (hci_dev_test_flag(hdev, HCI_MGMT))
2656                 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2657
2658         if (!conn)
2659                 goto unlock;
2660
2661         if (!hci_outgoing_auth_needed(hdev, conn))
2662                 goto unlock;
2663
2664         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2665                 struct hci_cp_auth_requested auth_cp;
2666
2667                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2668
2669                 auth_cp.handle = __cpu_to_le16(conn->handle);
2670                 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2671                              sizeof(auth_cp), &auth_cp);
2672         }
2673
2674 unlock:
2675         hci_dev_unlock(hdev);
2676 }
2677
2678 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2679 {
2680         struct hci_cp_read_remote_features *cp;
2681         struct hci_conn *conn;
2682
2683         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2684
2685         if (!status)
2686                 return;
2687
2688         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2689         if (!cp)
2690                 return;
2691
2692         hci_dev_lock(hdev);
2693
2694         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2695         if (conn) {
2696                 if (conn->state == BT_CONFIG) {
2697                         hci_connect_cfm(conn, status);
2698                         hci_conn_drop(conn);
2699                 }
2700         }
2701
2702         hci_dev_unlock(hdev);
2703 }
2704
2705 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2706 {
2707         struct hci_cp_read_remote_ext_features *cp;
2708         struct hci_conn *conn;
2709
2710         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2711
2712         if (!status)
2713                 return;
2714
2715         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2716         if (!cp)
2717                 return;
2718
2719         hci_dev_lock(hdev);
2720
2721         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2722         if (conn) {
2723                 if (conn->state == BT_CONFIG) {
2724                         hci_connect_cfm(conn, status);
2725                         hci_conn_drop(conn);
2726                 }
2727         }
2728
2729         hci_dev_unlock(hdev);
2730 }
2731
2732 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2733 {
2734         struct hci_cp_setup_sync_conn *cp;
2735         struct hci_conn *acl, *sco;
2736         __u16 handle;
2737
2738         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2739
2740         if (!status)
2741                 return;
2742
2743         cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2744         if (!cp)
2745                 return;
2746
2747         handle = __le16_to_cpu(cp->handle);
2748
2749         bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2750
2751         hci_dev_lock(hdev);
2752
2753         acl = hci_conn_hash_lookup_handle(hdev, handle);
2754         if (acl) {
2755                 sco = acl->link;
2756                 if (sco) {
2757                         sco->state = BT_CLOSED;
2758
2759                         hci_connect_cfm(sco, status);
2760                         hci_conn_del(sco);
2761                 }
2762         }
2763
2764         hci_dev_unlock(hdev);
2765 }
2766
2767 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2768 {
2769         struct hci_cp_enhanced_setup_sync_conn *cp;
2770         struct hci_conn *acl, *sco;
2771         __u16 handle;
2772
2773         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2774
2775         if (!status)
2776                 return;
2777
2778         cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2779         if (!cp)
2780                 return;
2781
2782         handle = __le16_to_cpu(cp->handle);
2783
2784         bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2785
2786         hci_dev_lock(hdev);
2787
2788         acl = hci_conn_hash_lookup_handle(hdev, handle);
2789         if (acl) {
2790                 sco = acl->link;
2791                 if (sco) {
2792                         sco->state = BT_CLOSED;
2793
2794                         hci_connect_cfm(sco, status);
2795                         hci_conn_del(sco);
2796                 }
2797         }
2798
2799         hci_dev_unlock(hdev);
2800 }
2801
2802 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2803 {
2804         struct hci_cp_sniff_mode *cp;
2805         struct hci_conn *conn;
2806
2807         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2808
2809         if (!status)
2810                 return;
2811
2812         cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2813         if (!cp)
2814                 return;
2815
2816         hci_dev_lock(hdev);
2817
2818         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2819         if (conn) {
2820                 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2821
2822                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2823                         hci_sco_setup(conn, status);
2824         }
2825
2826         hci_dev_unlock(hdev);
2827 }
2828
2829 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2830 {
2831         struct hci_cp_exit_sniff_mode *cp;
2832         struct hci_conn *conn;
2833
2834         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2835
2836         if (!status)
2837                 return;
2838
2839         cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2840         if (!cp)
2841                 return;
2842
2843         hci_dev_lock(hdev);
2844
2845         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2846         if (conn) {
2847                 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2848
2849                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2850                         hci_sco_setup(conn, status);
2851         }
2852
2853         hci_dev_unlock(hdev);
2854 }
2855
2856 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2857 {
2858         struct hci_cp_disconnect *cp;
2859         struct hci_conn_params *params;
2860         struct hci_conn *conn;
2861         bool mgmt_conn;
2862
2863         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2864
2865         /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2866          * otherwise cleanup the connection immediately.
2867          */
2868         if (!status && !hdev->suspended)
2869                 return;
2870
2871         cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2872         if (!cp)
2873                 return;
2874
2875         hci_dev_lock(hdev);
2876
2877         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2878         if (!conn)
2879                 goto unlock;
2880
2881         if (status) {
2882                 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2883                                        conn->dst_type, status);
2884
2885                 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2886                         hdev->cur_adv_instance = conn->adv_instance;
2887                         hci_enable_advertising(hdev);
2888                 }
2889
2890                 goto done;
2891         }
2892
2893         mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2894
2895         if (conn->type == ACL_LINK) {
2896                 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2897                         hci_remove_link_key(hdev, &conn->dst);
2898         }
2899
2900         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2901         if (params) {
2902                 switch (params->auto_connect) {
2903                 case HCI_AUTO_CONN_LINK_LOSS:
2904                         if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2905                                 break;
2906                         fallthrough;
2907
2908                 case HCI_AUTO_CONN_DIRECT:
2909                 case HCI_AUTO_CONN_ALWAYS:
2910                         list_del_init(&params->action);
2911                         list_add(&params->action, &hdev->pend_le_conns);
2912                         break;
2913
2914                 default:
2915                         break;
2916                 }
2917         }
2918
2919         mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2920                                  cp->reason, mgmt_conn);
2921
2922         hci_disconn_cfm(conn, cp->reason);
2923
2924 done:
2925         /* If the disconnection failed for any reason, the upper layer
2926          * does not retry to disconnect in current implementation.
2927          * Hence, we need to do some basic cleanup here and re-enable
2928          * advertising if necessary.
2929          */
2930         hci_conn_del(conn);
2931 unlock:
2932         hci_dev_unlock(hdev);
2933 }
2934
2935 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2936 {
2937         /* When using controller based address resolution, then the new
2938          * address types 0x02 and 0x03 are used. These types need to be
2939          * converted back into either public address or random address type
2940          */
2941         switch (type) {
2942         case ADDR_LE_DEV_PUBLIC_RESOLVED:
2943                 if (resolved)
2944                         *resolved = true;
2945                 return ADDR_LE_DEV_PUBLIC;
2946         case ADDR_LE_DEV_RANDOM_RESOLVED:
2947                 if (resolved)
2948                         *resolved = true;
2949                 return ADDR_LE_DEV_RANDOM;
2950         }
2951
2952         if (resolved)
2953                 *resolved = false;
2954         return type;
2955 }
2956
2957 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2958                               u8 peer_addr_type, u8 own_address_type,
2959                               u8 filter_policy)
2960 {
2961         struct hci_conn *conn;
2962
2963         conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2964                                        peer_addr_type);
2965         if (!conn)
2966                 return;
2967
2968         own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2969
2970         /* Store the initiator and responder address information which
2971          * is needed for SMP. These values will not change during the
2972          * lifetime of the connection.
2973          */
2974         conn->init_addr_type = own_address_type;
2975         if (own_address_type == ADDR_LE_DEV_RANDOM)
2976                 bacpy(&conn->init_addr, &hdev->random_addr);
2977         else
2978                 bacpy(&conn->init_addr, &hdev->bdaddr);
2979
2980         conn->resp_addr_type = peer_addr_type;
2981         bacpy(&conn->resp_addr, peer_addr);
2982 }
2983
2984 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2985 {
2986         struct hci_cp_le_create_conn *cp;
2987
2988         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2989
2990         /* All connection failure handling is taken care of by the
2991          * hci_conn_failed function which is triggered by the HCI
2992          * request completion callbacks used for connecting.
2993          */
2994         if (status)
2995                 return;
2996
2997         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2998         if (!cp)
2999                 return;
3000
3001         hci_dev_lock(hdev);
3002
3003         cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
3004                           cp->own_address_type, cp->filter_policy);
3005
3006         hci_dev_unlock(hdev);
3007 }
3008
3009 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
3010 {
3011         struct hci_cp_le_ext_create_conn *cp;
3012
3013         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3014
3015         /* All connection failure handling is taken care of by the
3016          * hci_conn_failed function which is triggered by the HCI
3017          * request completion callbacks used for connecting.
3018          */
3019         if (status)
3020                 return;
3021
3022         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
3023         if (!cp)
3024                 return;
3025
3026         hci_dev_lock(hdev);
3027
3028         cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
3029                           cp->own_addr_type, cp->filter_policy);
3030
3031         hci_dev_unlock(hdev);
3032 }
3033
3034 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
3035 {
3036         struct hci_cp_le_read_remote_features *cp;
3037         struct hci_conn *conn;
3038
3039         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3040
3041         if (!status)
3042                 return;
3043
3044         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
3045         if (!cp)
3046                 return;
3047
3048         hci_dev_lock(hdev);
3049
3050         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3051         if (conn) {
3052                 if (conn->state == BT_CONFIG) {
3053                         hci_connect_cfm(conn, status);
3054                         hci_conn_drop(conn);
3055                 }
3056         }
3057
3058         hci_dev_unlock(hdev);
3059 }
3060
3061 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
3062 {
3063         struct hci_cp_le_start_enc *cp;
3064         struct hci_conn *conn;
3065
3066         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3067
3068         if (!status)
3069                 return;
3070
3071         hci_dev_lock(hdev);
3072
3073         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
3074         if (!cp)
3075                 goto unlock;
3076
3077         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3078         if (!conn)
3079                 goto unlock;
3080
3081         if (conn->state != BT_CONNECTED)
3082                 goto unlock;
3083
3084         hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3085         hci_conn_drop(conn);
3086
3087 unlock:
3088         hci_dev_unlock(hdev);
3089 }
3090
3091 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
3092 {
3093         struct hci_cp_switch_role *cp;
3094         struct hci_conn *conn;
3095
3096         BT_DBG("%s status 0x%2.2x", hdev->name, status);
3097
3098         if (!status)
3099                 return;
3100
3101         cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
3102         if (!cp)
3103                 return;
3104
3105         hci_dev_lock(hdev);
3106
3107         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
3108         if (conn)
3109                 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3110
3111         hci_dev_unlock(hdev);
3112 }
3113
3114 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
3115                                      struct sk_buff *skb)
3116 {
3117         struct hci_ev_status *ev = data;
3118         struct discovery_state *discov = &hdev->discovery;
3119         struct inquiry_entry *e;
3120
3121         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3122
3123         hci_conn_check_pending(hdev);
3124
3125         if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
3126                 return;
3127
3128         smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
3129         wake_up_bit(&hdev->flags, HCI_INQUIRY);
3130
3131         if (!hci_dev_test_flag(hdev, HCI_MGMT))
3132                 return;
3133
3134         hci_dev_lock(hdev);
3135
3136         if (discov->state != DISCOVERY_FINDING)
3137                 goto unlock;
3138
3139         if (list_empty(&discov->resolve)) {
3140                 /* When BR/EDR inquiry is active and no LE scanning is in
3141                  * progress, then change discovery state to indicate completion.
3142                  *
3143                  * When running LE scanning and BR/EDR inquiry simultaneously
3144                  * and the LE scan already finished, then change the discovery
3145                  * state to indicate completion.
3146                  */
3147                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3148                     !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3149                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3150                 goto unlock;
3151         }
3152
3153         e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3154         if (e && hci_resolve_name(hdev, e) == 0) {
3155                 e->name_state = NAME_PENDING;
3156                 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3157                 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3158         } else {
3159                 /* When BR/EDR inquiry is active and no LE scanning is in
3160                  * progress, then change discovery state to indicate completion.
3161                  *
3162                  * When running LE scanning and BR/EDR inquiry simultaneously
3163                  * and the LE scan already finished, then change the discovery
3164                  * state to indicate completion.
3165                  */
3166                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3167                     !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3168                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3169         }
3170
3171 unlock:
3172         hci_dev_unlock(hdev);
3173 }
3174
3175 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3176                                    struct sk_buff *skb)
3177 {
3178         struct hci_ev_inquiry_result *ev = edata;
3179         struct inquiry_data data;
3180         int i;
3181
3182         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3183                              flex_array_size(ev, info, ev->num)))
3184                 return;
3185
3186         bt_dev_dbg(hdev, "num %d", ev->num);
3187
3188         if (!ev->num)
3189                 return;
3190
3191         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3192                 return;
3193
3194         hci_dev_lock(hdev);
3195
3196         for (i = 0; i < ev->num; i++) {
3197                 struct inquiry_info *info = &ev->info[i];
3198                 u32 flags;
3199
3200                 bacpy(&data.bdaddr, &info->bdaddr);
3201                 data.pscan_rep_mode     = info->pscan_rep_mode;
3202                 data.pscan_period_mode  = info->pscan_period_mode;
3203                 data.pscan_mode         = info->pscan_mode;
3204                 memcpy(data.dev_class, info->dev_class, 3);
3205                 data.clock_offset       = info->clock_offset;
3206                 data.rssi               = HCI_RSSI_INVALID;
3207                 data.ssp_mode           = 0x00;
3208
3209                 flags = hci_inquiry_cache_update(hdev, &data, false);
3210
3211                 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3212                                   info->dev_class, HCI_RSSI_INVALID,
3213                                   flags, NULL, 0, NULL, 0, 0);
3214         }
3215
3216         hci_dev_unlock(hdev);
3217 }
3218
3219 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3220                                   struct sk_buff *skb)
3221 {
3222         struct hci_ev_conn_complete *ev = data;
3223         struct hci_conn *conn;
3224         u8 status = ev->status;
3225
3226         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3227
3228         hci_dev_lock(hdev);
3229
3230         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3231         if (!conn) {
3232                 /* In case of error status and there is no connection pending
3233                  * just unlock as there is nothing to cleanup.
3234                  */
3235                 if (ev->status)
3236                         goto unlock;
3237
3238                 /* Connection may not exist if auto-connected. Check the bredr
3239                  * allowlist to see if this device is allowed to auto connect.
3240                  * If link is an ACL type, create a connection class
3241                  * automatically.
3242                  *
3243                  * Auto-connect will only occur if the event filter is
3244                  * programmed with a given address. Right now, event filter is
3245                  * only used during suspend.
3246                  */
3247                 if (ev->link_type == ACL_LINK &&
3248                     hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3249                                                       &ev->bdaddr,
3250                                                       BDADDR_BREDR)) {
3251                         conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3252                                             HCI_ROLE_SLAVE);
3253                         if (!conn) {
3254                                 bt_dev_err(hdev, "no memory for new conn");
3255                                 goto unlock;
3256                         }
3257                 } else {
3258                         if (ev->link_type != SCO_LINK)
3259                                 goto unlock;
3260
3261                         conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3262                                                        &ev->bdaddr);
3263                         if (!conn)
3264                                 goto unlock;
3265
3266                         conn->type = SCO_LINK;
3267                 }
3268         }
3269
3270         /* The HCI_Connection_Complete event is only sent once per connection.
3271          * Processing it more than once per connection can corrupt kernel memory.
3272          *
3273          * As the connection handle is set here for the first time, it indicates
3274          * whether the connection is already set up.
3275          */
3276         if (conn->handle != HCI_CONN_HANDLE_UNSET) {
3277                 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3278                 goto unlock;
3279         }
3280
3281         if (!status) {
3282                 conn->handle = __le16_to_cpu(ev->handle);
3283                 if (conn->handle > HCI_CONN_HANDLE_MAX) {
3284                         bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
3285                                    conn->handle, HCI_CONN_HANDLE_MAX);
3286                         status = HCI_ERROR_INVALID_PARAMETERS;
3287                         goto done;
3288                 }
3289
3290                 if (conn->type == ACL_LINK) {
3291                         conn->state = BT_CONFIG;
3292                         hci_conn_hold(conn);
3293
3294                         if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3295                             !hci_find_link_key(hdev, &ev->bdaddr))
3296                                 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3297                         else
3298                                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3299                 } else
3300                         conn->state = BT_CONNECTED;
3301
3302                 hci_debugfs_create_conn(conn);
3303                 hci_conn_add_sysfs(conn);
3304
3305                 if (test_bit(HCI_AUTH, &hdev->flags))
3306                         set_bit(HCI_CONN_AUTH, &conn->flags);
3307
3308                 if (test_bit(HCI_ENCRYPT, &hdev->flags))
3309                         set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3310
3311                 /* Get remote features */
3312                 if (conn->type == ACL_LINK) {
3313                         struct hci_cp_read_remote_features cp;
3314                         cp.handle = ev->handle;
3315                         hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3316                                      sizeof(cp), &cp);
3317
3318                         hci_update_scan(hdev);
3319                 }
3320
3321                 /* Set packet type for incoming connection */
3322                 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3323                         struct hci_cp_change_conn_ptype cp;
3324                         cp.handle = ev->handle;
3325                         cp.pkt_type = cpu_to_le16(conn->pkt_type);
3326                         hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3327                                      &cp);
3328                 }
3329         }
3330
3331         if (conn->type == ACL_LINK)
3332                 hci_sco_setup(conn, ev->status);
3333
3334 done:
3335         if (status) {
3336                 hci_conn_failed(conn, status);
3337         } else if (ev->link_type == SCO_LINK) {
3338                 switch (conn->setting & SCO_AIRMODE_MASK) {
3339                 case SCO_AIRMODE_CVSD:
3340                         if (hdev->notify)
3341                                 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3342                         break;
3343                 }
3344
3345                 hci_connect_cfm(conn, status);
3346         }
3347
3348 unlock:
3349         hci_dev_unlock(hdev);
3350
3351         hci_conn_check_pending(hdev);
3352 }
3353
3354 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3355 {
3356         struct hci_cp_reject_conn_req cp;
3357
3358         bacpy(&cp.bdaddr, bdaddr);
3359         cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3360         hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3361 }
3362
3363 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3364                                  struct sk_buff *skb)
3365 {
3366         struct hci_ev_conn_request *ev = data;
3367         int mask = hdev->link_mode;
3368         struct inquiry_entry *ie;
3369         struct hci_conn *conn;
3370         __u8 flags = 0;
3371
3372         bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3373
3374         mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3375                                       &flags);
3376
3377         if (!(mask & HCI_LM_ACCEPT)) {
3378                 hci_reject_conn(hdev, &ev->bdaddr);
3379                 return;
3380         }
3381
3382         hci_dev_lock(hdev);
3383
3384         if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3385                                    BDADDR_BREDR)) {
3386                 hci_reject_conn(hdev, &ev->bdaddr);
3387                 goto unlock;
3388         }
3389
3390         /* Require HCI_CONNECTABLE or an accept list entry to accept the
3391          * connection. These features are only touched through mgmt so
3392          * only do the checks if HCI_MGMT is set.
3393          */
3394         if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3395             !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3396             !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3397                                                BDADDR_BREDR)) {
3398                 hci_reject_conn(hdev, &ev->bdaddr);
3399                 goto unlock;
3400         }
3401
3402         /* Connection accepted */
3403
3404         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3405         if (ie)
3406                 memcpy(ie->data.dev_class, ev->dev_class, 3);
3407
3408         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3409                         &ev->bdaddr);
3410         if (!conn) {
3411                 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3412                                     HCI_ROLE_SLAVE);
3413                 if (!conn) {
3414                         bt_dev_err(hdev, "no memory for new connection");
3415                         goto unlock;
3416                 }
3417         }
3418
3419         memcpy(conn->dev_class, ev->dev_class, 3);
3420
3421         hci_dev_unlock(hdev);
3422
3423         if (ev->link_type == ACL_LINK ||
3424             (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3425                 struct hci_cp_accept_conn_req cp;
3426                 conn->state = BT_CONNECT;
3427
3428                 bacpy(&cp.bdaddr, &ev->bdaddr);
3429
3430                 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3431                         cp.role = 0x00; /* Become central */
3432                 else
3433                         cp.role = 0x01; /* Remain peripheral */
3434
3435                 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3436         } else if (!(flags & HCI_PROTO_DEFER)) {
3437                 struct hci_cp_accept_sync_conn_req cp;
3438                 conn->state = BT_CONNECT;
3439
3440                 bacpy(&cp.bdaddr, &ev->bdaddr);
3441                 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3442
3443                 cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
3444                 cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
3445                 cp.max_latency    = cpu_to_le16(0xffff);
3446                 cp.content_format = cpu_to_le16(hdev->voice_setting);
3447                 cp.retrans_effort = 0xff;
3448
3449                 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3450                              &cp);
3451         } else {
3452                 conn->state = BT_CONNECT2;
3453                 hci_connect_cfm(conn, 0);
3454         }
3455
3456         return;
3457 unlock:
3458         hci_dev_unlock(hdev);
3459 }
3460
3461 static u8 hci_to_mgmt_reason(u8 err)
3462 {
3463         switch (err) {
3464         case HCI_ERROR_CONNECTION_TIMEOUT:
3465                 return MGMT_DEV_DISCONN_TIMEOUT;
3466         case HCI_ERROR_REMOTE_USER_TERM:
3467         case HCI_ERROR_REMOTE_LOW_RESOURCES:
3468         case HCI_ERROR_REMOTE_POWER_OFF:
3469                 return MGMT_DEV_DISCONN_REMOTE;
3470         case HCI_ERROR_LOCAL_HOST_TERM:
3471                 return MGMT_DEV_DISCONN_LOCAL_HOST;
3472         default:
3473                 return MGMT_DEV_DISCONN_UNKNOWN;
3474         }
3475 }
3476
3477 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3478                                      struct sk_buff *skb)
3479 {
3480         struct hci_ev_disconn_complete *ev = data;
3481         u8 reason;
3482         struct hci_conn_params *params;
3483         struct hci_conn *conn;
3484         bool mgmt_connected;
3485
3486         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3487
3488         hci_dev_lock(hdev);
3489
3490         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3491         if (!conn)
3492                 goto unlock;
3493
3494         if (ev->status) {
3495                 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3496                                        conn->dst_type, ev->status);
3497                 goto unlock;
3498         }
3499
3500         conn->state = BT_CLOSED;
3501
3502         mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3503
3504         if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3505                 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3506         else
3507                 reason = hci_to_mgmt_reason(ev->reason);
3508
3509         mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3510                                 reason, mgmt_connected);
3511
3512         if (conn->type == ACL_LINK) {
3513                 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3514                         hci_remove_link_key(hdev, &conn->dst);
3515
3516                 hci_update_scan(hdev);
3517         }
3518
3519         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3520         if (params) {
3521                 switch (params->auto_connect) {
3522                 case HCI_AUTO_CONN_LINK_LOSS:
3523                         if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3524                                 break;
3525                         fallthrough;
3526
3527                 case HCI_AUTO_CONN_DIRECT:
3528                 case HCI_AUTO_CONN_ALWAYS:
3529                         list_del_init(&params->action);
3530                         list_add(&params->action, &hdev->pend_le_conns);
3531                         hci_update_passive_scan(hdev);
3532                         break;
3533
3534                 default:
3535                         break;
3536                 }
3537         }
3538
3539         hci_disconn_cfm(conn, ev->reason);
3540
3541         /* Re-enable advertising if necessary, since it might
3542          * have been disabled by the connection. From the
3543          * HCI_LE_Set_Advertise_Enable command description in
3544          * the core specification (v4.0):
3545          * "The Controller shall continue advertising until the Host
3546          * issues an LE_Set_Advertise_Enable command with
3547          * Advertising_Enable set to 0x00 (Advertising is disabled)
3548          * or until a connection is created or until the Advertising
3549          * is timed out due to Directed Advertising."
3550          */
3551         if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3552                 hdev->cur_adv_instance = conn->adv_instance;
3553                 hci_enable_advertising(hdev);
3554         }
3555
3556         hci_conn_del(conn);
3557
3558 unlock:
3559         hci_dev_unlock(hdev);
3560 }
3561
3562 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3563                                   struct sk_buff *skb)
3564 {
3565         struct hci_ev_auth_complete *ev = data;
3566         struct hci_conn *conn;
3567
3568         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3569
3570         hci_dev_lock(hdev);
3571
3572         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3573         if (!conn)
3574                 goto unlock;
3575
3576         if (!ev->status) {
3577                 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3578
3579                 if (!hci_conn_ssp_enabled(conn) &&
3580                     test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
3581                         bt_dev_info(hdev, "re-auth of legacy device is not possible.");
3582                 } else {
3583                         set_bit(HCI_CONN_AUTH, &conn->flags);
3584                         conn->sec_level = conn->pending_sec_level;
3585                 }
3586         } else {
3587                 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3588                         set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3589
3590                 mgmt_auth_failed(conn, ev->status);
3591         }
3592
3593         clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3594         clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3595
3596         if (conn->state == BT_CONFIG) {
3597                 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3598                         struct hci_cp_set_conn_encrypt cp;
3599                         cp.handle  = ev->handle;
3600                         cp.encrypt = 0x01;
3601                         hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3602                                      &cp);
3603                 } else {
3604                         conn->state = BT_CONNECTED;
3605                         hci_connect_cfm(conn, ev->status);
3606                         hci_conn_drop(conn);
3607                 }
3608         } else {
3609                 hci_auth_cfm(conn, ev->status);
3610
3611                 hci_conn_hold(conn);
3612                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3613                 hci_conn_drop(conn);
3614         }
3615
3616         if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3617                 if (!ev->status) {
3618                         struct hci_cp_set_conn_encrypt cp;
3619                         cp.handle  = ev->handle;
3620                         cp.encrypt = 0x01;
3621                         hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3622                                      &cp);
3623                 } else {
3624                         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3625                         hci_encrypt_cfm(conn, ev->status);
3626                 }
3627         }
3628
3629 unlock:
3630         hci_dev_unlock(hdev);
3631 }
3632
3633 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3634                                 struct sk_buff *skb)
3635 {
3636         struct hci_ev_remote_name *ev = data;
3637         struct hci_conn *conn;
3638
3639         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3640
3641         hci_conn_check_pending(hdev);
3642
3643         hci_dev_lock(hdev);
3644
3645         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3646
3647         if (!hci_dev_test_flag(hdev, HCI_MGMT))
3648                 goto check_auth;
3649
3650         if (ev->status == 0)
3651                 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3652                                        strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3653         else
3654                 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3655
3656 check_auth:
3657         if (!conn)
3658                 goto unlock;
3659
3660         if (!hci_outgoing_auth_needed(hdev, conn))
3661                 goto unlock;
3662
3663         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3664                 struct hci_cp_auth_requested cp;
3665
3666                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3667
3668                 cp.handle = __cpu_to_le16(conn->handle);
3669                 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3670         }
3671
3672 unlock:
3673         hci_dev_unlock(hdev);
3674 }
3675
3676 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3677                                    struct sk_buff *skb)
3678 {
3679         struct hci_ev_encrypt_change *ev = data;
3680         struct hci_conn *conn;
3681
3682         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3683
3684         hci_dev_lock(hdev);
3685
3686         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3687         if (!conn)
3688                 goto unlock;
3689
3690         if (!ev->status) {
3691                 if (ev->encrypt) {
3692                         /* Encryption implies authentication */
3693                         set_bit(HCI_CONN_AUTH, &conn->flags);
3694                         set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3695                         conn->sec_level = conn->pending_sec_level;
3696
3697                         /* P-256 authentication key implies FIPS */
3698                         if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3699                                 set_bit(HCI_CONN_FIPS, &conn->flags);
3700
3701                         if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3702                             conn->type == LE_LINK)
3703                                 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3704                 } else {
3705                         clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3706                         clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3707                 }
3708         }
3709
3710         /* We should disregard the current RPA and generate a new one
3711          * whenever the encryption procedure fails.
3712          */
3713         if (ev->status && conn->type == LE_LINK) {
3714                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3715                 hci_adv_instances_set_rpa_expired(hdev, true);
3716         }
3717
3718         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3719
3720         /* Check link security requirements are met */
3721         if (!hci_conn_check_link_mode(conn))
3722                 ev->status = HCI_ERROR_AUTH_FAILURE;
3723
3724         if (ev->status && conn->state == BT_CONNECTED) {
3725                 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3726                         set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3727
3728                 /* Notify upper layers so they can cleanup before
3729                  * disconnecting.
3730                  */
3731                 hci_encrypt_cfm(conn, ev->status);
3732                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3733                 hci_conn_drop(conn);
3734                 goto unlock;
3735         }
3736
3737         /* Try reading the encryption key size for encrypted ACL links */
3738         if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3739                 struct hci_cp_read_enc_key_size cp;
3740
3741                 /* Only send HCI_Read_Encryption_Key_Size if the
3742                  * controller really supports it. If it doesn't, assume
3743                  * the default size (16).
3744                  */
3745                 if (!(hdev->commands[20] & 0x10)) {
3746                         conn->enc_key_size = HCI_LINK_KEY_SIZE;
3747                         goto notify;
3748                 }
3749
3750                 cp.handle = cpu_to_le16(conn->handle);
3751                 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3752                                  sizeof(cp), &cp)) {
3753                         bt_dev_err(hdev, "sending read key size failed");
3754                         conn->enc_key_size = HCI_LINK_KEY_SIZE;
3755                         goto notify;
3756                 }
3757
3758                 goto unlock;
3759         }
3760
3761         /* Set the default Authenticated Payload Timeout after
3762          * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3763          * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3764          * sent when the link is active and Encryption is enabled, the conn
3765          * type can be either LE or ACL and controller must support LMP Ping.
3766          * Ensure for AES-CCM encryption as well.
3767          */
3768         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3769             test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3770             ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3771              (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3772                 struct hci_cp_write_auth_payload_to cp;
3773
3774                 cp.handle = cpu_to_le16(conn->handle);
3775                 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3776                 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3777                              sizeof(cp), &cp);
3778         }
3779
3780 notify:
3781         hci_encrypt_cfm(conn, ev->status);
3782
3783 unlock:
3784         hci_dev_unlock(hdev);
3785 }
3786
3787 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3788                                              struct sk_buff *skb)
3789 {
3790         struct hci_ev_change_link_key_complete *ev = data;
3791         struct hci_conn *conn;
3792
3793         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3794
3795         hci_dev_lock(hdev);
3796
3797         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3798         if (conn) {
3799                 if (!ev->status)
3800                         set_bit(HCI_CONN_SECURE, &conn->flags);
3801
3802                 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3803
3804                 hci_key_change_cfm(conn, ev->status);
3805         }
3806
3807         hci_dev_unlock(hdev);
3808 }
3809
3810 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3811                                     struct sk_buff *skb)
3812 {
3813         struct hci_ev_remote_features *ev = data;
3814         struct hci_conn *conn;
3815
3816         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3817
3818         hci_dev_lock(hdev);
3819
3820         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3821         if (!conn)
3822                 goto unlock;
3823
3824         if (!ev->status)
3825                 memcpy(conn->features[0], ev->features, 8);
3826
3827         if (conn->state != BT_CONFIG)
3828                 goto unlock;
3829
3830         if (!ev->status && lmp_ext_feat_capable(hdev) &&
3831             lmp_ext_feat_capable(conn)) {
3832                 struct hci_cp_read_remote_ext_features cp;
3833                 cp.handle = ev->handle;
3834                 cp.page = 0x01;
3835                 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3836                              sizeof(cp), &cp);
3837                 goto unlock;
3838         }
3839
3840         if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3841                 struct hci_cp_remote_name_req cp;
3842                 memset(&cp, 0, sizeof(cp));
3843                 bacpy(&cp.bdaddr, &conn->dst);
3844                 cp.pscan_rep_mode = 0x02;
3845                 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3846         } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3847                 mgmt_device_connected(hdev, conn, NULL, 0);
3848
3849         if (!hci_outgoing_auth_needed(hdev, conn)) {
3850                 conn->state = BT_CONNECTED;
3851                 hci_connect_cfm(conn, ev->status);
3852                 hci_conn_drop(conn);
3853         }
3854
3855 unlock:
3856         hci_dev_unlock(hdev);
3857 }
3858
3859 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3860 {
3861         cancel_delayed_work(&hdev->cmd_timer);
3862
3863         rcu_read_lock();
3864         if (!test_bit(HCI_RESET, &hdev->flags)) {
3865                 if (ncmd) {
3866                         cancel_delayed_work(&hdev->ncmd_timer);
3867                         atomic_set(&hdev->cmd_cnt, 1);
3868                 } else {
3869                         if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3870                                 queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3871                                                    HCI_NCMD_TIMEOUT);
3872                 }
3873         }
3874         rcu_read_unlock();
3875 }
3876
3877 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3878                                         struct sk_buff *skb)
3879 {
3880         struct hci_rp_le_read_buffer_size_v2 *rp = data;
3881
3882         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3883
3884         if (rp->status)
3885                 return rp->status;
3886
3887         hdev->le_mtu   = __le16_to_cpu(rp->acl_mtu);
3888         hdev->le_pkts  = rp->acl_max_pkt;
3889         hdev->iso_mtu  = __le16_to_cpu(rp->iso_mtu);
3890         hdev->iso_pkts = rp->iso_max_pkt;
3891
3892         hdev->le_cnt  = hdev->le_pkts;
3893         hdev->iso_cnt = hdev->iso_pkts;
3894
3895         BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3896                hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3897
3898         return rp->status;
3899 }
3900
3901 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3902                                    struct sk_buff *skb)
3903 {
3904         struct hci_rp_le_set_cig_params *rp = data;
3905         struct hci_conn *conn;
3906         int i = 0;
3907
3908         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3909
3910         hci_dev_lock(hdev);
3911
3912         if (rp->status) {
3913                 while ((conn = hci_conn_hash_lookup_cig(hdev, rp->cig_id))) {
3914                         conn->state = BT_CLOSED;
3915                         hci_connect_cfm(conn, rp->status);
3916                         hci_conn_del(conn);
3917                 }
3918                 goto unlock;
3919         }
3920
3921         rcu_read_lock();
3922
3923         list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
3924                 if (conn->type != ISO_LINK || conn->iso_qos.cig != rp->cig_id ||
3925                     conn->state == BT_CONNECTED)
3926                         continue;
3927
3928                 conn->handle = __le16_to_cpu(rp->handle[i++]);
3929
3930                 bt_dev_dbg(hdev, "%p handle 0x%4.4x link %p", conn,
3931                            conn->handle, conn->link);
3932
3933                 /* Create CIS if LE is already connected */
3934                 if (conn->link && conn->link->state == BT_CONNECTED) {
3935                         rcu_read_unlock();
3936                         hci_le_create_cis(conn->link);
3937                         rcu_read_lock();
3938                 }
3939
3940                 if (i == rp->num_handles)
3941                         break;
3942         }
3943
3944         rcu_read_unlock();
3945
3946 unlock:
3947         hci_dev_unlock(hdev);
3948
3949         return rp->status;
3950 }
3951
3952 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3953                                    struct sk_buff *skb)
3954 {
3955         struct hci_rp_le_setup_iso_path *rp = data;
3956         struct hci_cp_le_setup_iso_path *cp;
3957         struct hci_conn *conn;
3958
3959         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3960
3961         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3962         if (!cp)
3963                 return rp->status;
3964
3965         hci_dev_lock(hdev);
3966
3967         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3968         if (!conn)
3969                 goto unlock;
3970
3971         if (rp->status) {
3972                 hci_connect_cfm(conn, rp->status);
3973                 hci_conn_del(conn);
3974                 goto unlock;
3975         }
3976
3977         switch (cp->direction) {
3978         /* Input (Host to Controller) */
3979         case 0x00:
3980                 /* Only confirm connection if output only */
3981                 if (conn->iso_qos.out.sdu && !conn->iso_qos.in.sdu)
3982                         hci_connect_cfm(conn, rp->status);
3983                 break;
3984         /* Output (Controller to Host) */
3985         case 0x01:
3986                 /* Confirm connection since conn->iso_qos is always configured
3987                  * last.
3988                  */
3989                 hci_connect_cfm(conn, rp->status);
3990                 break;
3991         }
3992
3993 unlock:
3994         hci_dev_unlock(hdev);
3995         return rp->status;
3996 }
3997
3998 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3999 {
4000         bt_dev_dbg(hdev, "status 0x%2.2x", status);
4001 }
4002
4003 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
4004                                    struct sk_buff *skb)
4005 {
4006         struct hci_ev_status *rp = data;
4007         struct hci_cp_le_set_per_adv_params *cp;
4008
4009         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4010
4011         if (rp->status)
4012                 return rp->status;
4013
4014         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
4015         if (!cp)
4016                 return rp->status;
4017
4018         /* TODO: set the conn state */
4019         return rp->status;
4020 }
4021
4022 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
4023                                        struct sk_buff *skb)
4024 {
4025         struct hci_ev_status *rp = data;
4026         __u8 *sent;
4027
4028         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4029
4030         if (rp->status)
4031                 return rp->status;
4032
4033         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
4034         if (!sent)
4035                 return rp->status;
4036
4037         hci_dev_lock(hdev);
4038
4039         if (*sent)
4040                 hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
4041         else
4042                 hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
4043
4044         hci_dev_unlock(hdev);
4045
4046         return rp->status;
4047 }
4048
4049 #define HCI_CC_VL(_op, _func, _min, _max) \
4050 { \
4051         .op = _op, \
4052         .func = _func, \
4053         .min_len = _min, \
4054         .max_len = _max, \
4055 }
4056
4057 #define HCI_CC(_op, _func, _len) \
4058         HCI_CC_VL(_op, _func, _len, _len)
4059
4060 #define HCI_CC_STATUS(_op, _func) \
4061         HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4062
4063 static const struct hci_cc {
4064         u16  op;
4065         u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4066         u16  min_len;
4067         u16  max_len;
4068 } hci_cc_table[] = {
4069         HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4070         HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4071         HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4072         HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
4073                       hci_cc_remote_name_req_cancel),
4074         HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4075                sizeof(struct hci_rp_role_discovery)),
4076         HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4077                sizeof(struct hci_rp_read_link_policy)),
4078         HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4079                sizeof(struct hci_rp_write_link_policy)),
4080         HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4081                sizeof(struct hci_rp_read_def_link_policy)),
4082         HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4083                       hci_cc_write_def_link_policy),
4084         HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4085         HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4086                sizeof(struct hci_rp_read_stored_link_key)),
4087         HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4088                sizeof(struct hci_rp_delete_stored_link_key)),
4089         HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4090         HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4091                sizeof(struct hci_rp_read_local_name)),
4092         HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4093         HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4094         HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4095         HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4096         HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4097                sizeof(struct hci_rp_read_class_of_dev)),
4098         HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4099         HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4100                sizeof(struct hci_rp_read_voice_setting)),
4101         HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4102         HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4103                sizeof(struct hci_rp_read_num_supported_iac)),
4104         HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4105         HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4106         HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4107                sizeof(struct hci_rp_read_auth_payload_to)),
4108         HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4109                sizeof(struct hci_rp_write_auth_payload_to)),
4110         HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4111                sizeof(struct hci_rp_read_local_version)),
4112         HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4113                sizeof(struct hci_rp_read_local_commands)),
4114         HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4115                sizeof(struct hci_rp_read_local_features)),
4116         HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4117                sizeof(struct hci_rp_read_local_ext_features)),
4118         HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4119                sizeof(struct hci_rp_read_buffer_size)),
4120         HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4121                sizeof(struct hci_rp_read_bd_addr)),
4122         HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4123                sizeof(struct hci_rp_read_local_pairing_opts)),
4124         HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4125                sizeof(struct hci_rp_read_page_scan_activity)),
4126         HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4127                       hci_cc_write_page_scan_activity),
4128         HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4129                sizeof(struct hci_rp_read_page_scan_type)),
4130         HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4131         HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
4132                sizeof(struct hci_rp_read_data_block_size)),
4133         HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
4134                sizeof(struct hci_rp_read_flow_control_mode)),
4135         HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
4136                sizeof(struct hci_rp_read_local_amp_info)),
4137         HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4138                sizeof(struct hci_rp_read_clock)),
4139         HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4140                sizeof(struct hci_rp_read_enc_key_size)),
4141         HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4142                sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4143         HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4144                hci_cc_read_def_err_data_reporting,
4145                sizeof(struct hci_rp_read_def_err_data_reporting)),
4146         HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4147                       hci_cc_write_def_err_data_reporting),
4148         HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4149                sizeof(struct hci_rp_pin_code_reply)),
4150         HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4151                sizeof(struct hci_rp_pin_code_neg_reply)),
4152         HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4153                sizeof(struct hci_rp_read_local_oob_data)),
4154         HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4155                sizeof(struct hci_rp_read_local_oob_ext_data)),
4156         HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4157                sizeof(struct hci_rp_le_read_buffer_size)),
4158         HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4159                sizeof(struct hci_rp_le_read_local_features)),
4160         HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4161                sizeof(struct hci_rp_le_read_adv_tx_power)),
4162         HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4163                sizeof(struct hci_rp_user_confirm_reply)),
4164         HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4165                sizeof(struct hci_rp_user_confirm_reply)),
4166         HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4167                sizeof(struct hci_rp_user_confirm_reply)),
4168         HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4169                sizeof(struct hci_rp_user_confirm_reply)),
4170         HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4171         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4172         HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4173         HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4174         HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4175                hci_cc_le_read_accept_list_size,
4176                sizeof(struct hci_rp_le_read_accept_list_size)),
4177         HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4178         HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4179                       hci_cc_le_add_to_accept_list),
4180         HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4181                       hci_cc_le_del_from_accept_list),
4182         HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4183                sizeof(struct hci_rp_le_read_supported_states)),
4184         HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4185                sizeof(struct hci_rp_le_read_def_data_len)),
4186         HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4187                       hci_cc_le_write_def_data_len),
4188         HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4189                       hci_cc_le_add_to_resolv_list),
4190         HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4191                       hci_cc_le_del_from_resolv_list),
4192         HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4193                       hci_cc_le_clear_resolv_list),
4194         HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4195                sizeof(struct hci_rp_le_read_resolv_list_size)),
4196         HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4197                       hci_cc_le_set_addr_resolution_enable),
4198         HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4199                sizeof(struct hci_rp_le_read_max_data_len)),
4200         HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4201                       hci_cc_write_le_host_supported),
4202         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4203         HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4204                sizeof(struct hci_rp_read_rssi)),
4205         HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4206                sizeof(struct hci_rp_read_tx_power)),
4207         HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4208         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4209                       hci_cc_le_set_ext_scan_param),
4210         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4211                       hci_cc_le_set_ext_scan_enable),
4212         HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4213         HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4214                hci_cc_le_read_num_adv_sets,
4215                sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4216         HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4217                sizeof(struct hci_rp_le_set_ext_adv_params)),
4218         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4219                       hci_cc_le_set_ext_adv_enable),
4220         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4221                       hci_cc_le_set_adv_set_random_addr),
4222         HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4223         HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4224         HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4225         HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4226                       hci_cc_le_set_per_adv_enable),
4227         HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4228                sizeof(struct hci_rp_le_read_transmit_power)),
4229 #ifdef TIZEN_BT
4230         HCI_CC(HCI_OP_ENABLE_RSSI, hci_cc_enable_rssi,
4231                sizeof(struct hci_cc_rsp_enable_rssi)),
4232         HCI_CC(HCI_OP_GET_RAW_RSSI, hci_cc_get_raw_rssi,
4233                sizeof(struct hci_cc_rp_get_raw_rssi)),
4234 #endif
4235         HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4236         HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4237                sizeof(struct hci_rp_le_read_buffer_size_v2)),
4238         HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4239                   sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4240         HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4241                sizeof(struct hci_rp_le_setup_iso_path)),
4242 };
4243
4244 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4245                       struct sk_buff *skb)
4246 {
4247         void *data;
4248
4249         if (skb->len < cc->min_len) {
4250                 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4251                            cc->op, skb->len, cc->min_len);
4252                 return HCI_ERROR_UNSPECIFIED;
4253         }
4254
4255         /* Just warn if the length is over max_len size it still be possible to
4256          * partially parse the cc so leave to callback to decide if that is
4257          * acceptable.
4258          */
4259         if (skb->len > cc->max_len)
4260                 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4261                             cc->op, skb->len, cc->max_len);
4262
4263         data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4264         if (!data)
4265                 return HCI_ERROR_UNSPECIFIED;
4266
4267         return cc->func(hdev, data, skb);
4268 }
4269
4270 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4271                                  struct sk_buff *skb, u16 *opcode, u8 *status,
4272                                  hci_req_complete_t *req_complete,
4273                                  hci_req_complete_skb_t *req_complete_skb)
4274 {
4275         struct hci_ev_cmd_complete *ev = data;
4276         int i;
4277
4278         *opcode = __le16_to_cpu(ev->opcode);
4279
4280         bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4281
4282         for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4283                 if (hci_cc_table[i].op == *opcode) {
4284                         *status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4285                         break;
4286                 }
4287         }
4288
4289         if (i == ARRAY_SIZE(hci_cc_table)) {
4290                 /* Unknown opcode, assume byte 0 contains the status, so
4291                  * that e.g. __hci_cmd_sync() properly returns errors
4292                  * for vendor specific commands send by HCI drivers.
4293                  * If a vendor doesn't actually follow this convention we may
4294                  * need to introduce a vendor CC table in order to properly set
4295                  * the status.
4296                  */
4297                 *status = skb->data[0];
4298         }
4299
4300         handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4301
4302         hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4303                              req_complete_skb);
4304
4305         if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4306                 bt_dev_err(hdev,
4307                            "unexpected event for opcode 0x%4.4x", *opcode);
4308                 return;
4309         }
4310
4311         if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4312                 queue_work(hdev->workqueue, &hdev->cmd_work);
4313 }
4314
4315 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4316 {
4317         struct hci_cp_le_create_cis *cp;
4318         int i;
4319
4320         bt_dev_dbg(hdev, "status 0x%2.2x", status);
4321
4322         if (!status)
4323                 return;
4324
4325         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4326         if (!cp)
4327                 return;
4328
4329         hci_dev_lock(hdev);
4330
4331         /* Remove connection if command failed */
4332         for (i = 0; cp->num_cis; cp->num_cis--, i++) {
4333                 struct hci_conn *conn;
4334                 u16 handle;
4335
4336                 handle = __le16_to_cpu(cp->cis[i].cis_handle);
4337
4338                 conn = hci_conn_hash_lookup_handle(hdev, handle);
4339                 if (conn) {
4340                         conn->state = BT_CLOSED;
4341                         hci_connect_cfm(conn, status);
4342                         hci_conn_del(conn);
4343                 }
4344         }
4345
4346         hci_dev_unlock(hdev);
4347 }
4348
4349 #define HCI_CS(_op, _func) \
4350 { \
4351         .op = _op, \
4352         .func = _func, \
4353 }
4354
4355 static const struct hci_cs {
4356         u16  op;
4357         void (*func)(struct hci_dev *hdev, __u8 status);
4358 } hci_cs_table[] = {
4359         HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4360         HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4361         HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4362         HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4363         HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4364         HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4365         HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4366         HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4367         HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4368                hci_cs_read_remote_ext_features),
4369         HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4370         HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4371                hci_cs_enhanced_setup_sync_conn),
4372         HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4373         HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4374         HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4375         HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4376         HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4377         HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4378         HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4379         HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4380         HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4381 };
4382
4383 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4384                                struct sk_buff *skb, u16 *opcode, u8 *status,
4385                                hci_req_complete_t *req_complete,
4386                                hci_req_complete_skb_t *req_complete_skb)
4387 {
4388         struct hci_ev_cmd_status *ev = data;
4389         int i;
4390
4391         *opcode = __le16_to_cpu(ev->opcode);
4392         *status = ev->status;
4393
4394         bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4395
4396         for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4397                 if (hci_cs_table[i].op == *opcode) {
4398                         hci_cs_table[i].func(hdev, ev->status);
4399                         break;
4400                 }
4401         }
4402
4403         handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4404
4405         /* Indicate request completion if the command failed. Also, if
4406          * we're not waiting for a special event and we get a success
4407          * command status we should try to flag the request as completed
4408          * (since for this kind of commands there will not be a command
4409          * complete event).
4410          */
4411         if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
4412                 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4413                                      req_complete_skb);
4414                 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4415                         bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4416                                    *opcode);
4417                         return;
4418                 }
4419         }
4420
4421         if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4422                 queue_work(hdev->workqueue, &hdev->cmd_work);
4423 }
4424
4425 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4426                                    struct sk_buff *skb)
4427 {
4428         struct hci_ev_hardware_error *ev = data;
4429
4430         bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4431
4432 #ifdef TIZEN_BT
4433         hci_dev_lock(hdev);
4434         mgmt_hardware_error(hdev, ev->code);
4435         hci_dev_unlock(hdev);
4436 #endif
4437         hdev->hw_error_code = ev->code;
4438
4439         queue_work(hdev->req_workqueue, &hdev->error_reset);
4440 }
4441
4442 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4443                                 struct sk_buff *skb)
4444 {
4445         struct hci_ev_role_change *ev = data;
4446         struct hci_conn *conn;
4447
4448         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4449
4450         hci_dev_lock(hdev);
4451
4452         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4453         if (conn) {
4454                 if (!ev->status)
4455                         conn->role = ev->role;
4456
4457                 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4458
4459                 hci_role_switch_cfm(conn, ev->status, ev->role);
4460         }
4461
4462         hci_dev_unlock(hdev);
4463 }
4464
4465 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4466                                   struct sk_buff *skb)
4467 {
4468         struct hci_ev_num_comp_pkts *ev = data;
4469         int i;
4470
4471         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4472                              flex_array_size(ev, handles, ev->num)))
4473                 return;
4474
4475         if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4476                 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4477                 return;
4478         }
4479
4480         bt_dev_dbg(hdev, "num %d", ev->num);
4481
4482         for (i = 0; i < ev->num; i++) {
4483                 struct hci_comp_pkts_info *info = &ev->handles[i];
4484                 struct hci_conn *conn;
4485                 __u16  handle, count;
4486
4487                 handle = __le16_to_cpu(info->handle);
4488                 count  = __le16_to_cpu(info->count);
4489
4490                 conn = hci_conn_hash_lookup_handle(hdev, handle);
4491                 if (!conn)
4492                         continue;
4493
4494                 conn->sent -= count;
4495
4496                 switch (conn->type) {
4497                 case ACL_LINK:
4498                         hdev->acl_cnt += count;
4499                         if (hdev->acl_cnt > hdev->acl_pkts)
4500                                 hdev->acl_cnt = hdev->acl_pkts;
4501                         break;
4502
4503                 case LE_LINK:
4504                         if (hdev->le_pkts) {
4505                                 hdev->le_cnt += count;
4506                                 if (hdev->le_cnt > hdev->le_pkts)
4507                                         hdev->le_cnt = hdev->le_pkts;
4508                         } else {
4509                                 hdev->acl_cnt += count;
4510                                 if (hdev->acl_cnt > hdev->acl_pkts)
4511                                         hdev->acl_cnt = hdev->acl_pkts;
4512                         }
4513                         break;
4514
4515                 case SCO_LINK:
4516                         hdev->sco_cnt += count;
4517                         if (hdev->sco_cnt > hdev->sco_pkts)
4518                                 hdev->sco_cnt = hdev->sco_pkts;
4519                         break;
4520
4521                 case ISO_LINK:
4522                         if (hdev->iso_pkts) {
4523                                 hdev->iso_cnt += count;
4524                                 if (hdev->iso_cnt > hdev->iso_pkts)
4525                                         hdev->iso_cnt = hdev->iso_pkts;
4526                         } else if (hdev->le_pkts) {
4527                                 hdev->le_cnt += count;
4528                                 if (hdev->le_cnt > hdev->le_pkts)
4529                                         hdev->le_cnt = hdev->le_pkts;
4530                         } else {
4531                                 hdev->acl_cnt += count;
4532                                 if (hdev->acl_cnt > hdev->acl_pkts)
4533                                         hdev->acl_cnt = hdev->acl_pkts;
4534                         }
4535                         break;
4536
4537                 default:
4538                         bt_dev_err(hdev, "unknown type %d conn %p",
4539                                    conn->type, conn);
4540                         break;
4541                 }
4542         }
4543
4544         queue_work(hdev->workqueue, &hdev->tx_work);
4545 }
4546
4547 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4548                                                  __u16 handle)
4549 {
4550         struct hci_chan *chan;
4551
4552         switch (hdev->dev_type) {
4553         case HCI_PRIMARY:
4554                 return hci_conn_hash_lookup_handle(hdev, handle);
4555         case HCI_AMP:
4556                 chan = hci_chan_lookup_handle(hdev, handle);
4557                 if (chan)
4558                         return chan->conn;
4559                 break;
4560         default:
4561                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4562                 break;
4563         }
4564
4565         return NULL;
4566 }
4567
4568 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
4569                                     struct sk_buff *skb)
4570 {
4571         struct hci_ev_num_comp_blocks *ev = data;
4572         int i;
4573
4574         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
4575                              flex_array_size(ev, handles, ev->num_hndl)))
4576                 return;
4577
4578         if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4579                 bt_dev_err(hdev, "wrong event for mode %d",
4580                            hdev->flow_ctl_mode);
4581                 return;
4582         }
4583
4584         bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
4585                    ev->num_hndl);
4586
4587         for (i = 0; i < ev->num_hndl; i++) {
4588                 struct hci_comp_blocks_info *info = &ev->handles[i];
4589                 struct hci_conn *conn = NULL;
4590                 __u16  handle, block_count;
4591
4592                 handle = __le16_to_cpu(info->handle);
4593                 block_count = __le16_to_cpu(info->blocks);
4594
4595                 conn = __hci_conn_lookup_handle(hdev, handle);
4596                 if (!conn)
4597                         continue;
4598
4599                 conn->sent -= block_count;
4600
4601                 switch (conn->type) {
4602                 case ACL_LINK:
4603                 case AMP_LINK:
4604                         hdev->block_cnt += block_count;
4605                         if (hdev->block_cnt > hdev->num_blocks)
4606                                 hdev->block_cnt = hdev->num_blocks;
4607                         break;
4608
4609                 default:
4610                         bt_dev_err(hdev, "unknown type %d conn %p",
4611                                    conn->type, conn);
4612                         break;
4613                 }
4614         }
4615
4616         queue_work(hdev->workqueue, &hdev->tx_work);
4617 }
4618
4619 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4620                                 struct sk_buff *skb)
4621 {
4622         struct hci_ev_mode_change *ev = data;
4623         struct hci_conn *conn;
4624
4625         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4626
4627         hci_dev_lock(hdev);
4628
4629         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4630         if (conn) {
4631                 conn->mode = ev->mode;
4632
4633                 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4634                                         &conn->flags)) {
4635                         if (conn->mode == HCI_CM_ACTIVE)
4636                                 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4637                         else
4638                                 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4639                 }
4640
4641                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4642                         hci_sco_setup(conn, ev->status);
4643         }
4644
4645         hci_dev_unlock(hdev);
4646 }
4647
4648 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4649                                      struct sk_buff *skb)
4650 {
4651         struct hci_ev_pin_code_req *ev = data;
4652         struct hci_conn *conn;
4653
4654         bt_dev_dbg(hdev, "");
4655
4656         hci_dev_lock(hdev);
4657
4658         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4659         if (!conn)
4660                 goto unlock;
4661
4662         if (conn->state == BT_CONNECTED) {
4663                 hci_conn_hold(conn);
4664                 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4665                 hci_conn_drop(conn);
4666         }
4667
4668         if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4669             !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4670                 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4671                              sizeof(ev->bdaddr), &ev->bdaddr);
4672         } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4673                 u8 secure;
4674
4675                 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4676                         secure = 1;
4677                 else
4678                         secure = 0;
4679
4680                 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4681         }
4682
4683 unlock:
4684         hci_dev_unlock(hdev);
4685 }
4686
4687 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4688 {
4689         if (key_type == HCI_LK_CHANGED_COMBINATION)
4690                 return;
4691
4692         conn->pin_length = pin_len;
4693         conn->key_type = key_type;
4694
4695         switch (key_type) {
4696         case HCI_LK_LOCAL_UNIT:
4697         case HCI_LK_REMOTE_UNIT:
4698         case HCI_LK_DEBUG_COMBINATION:
4699                 return;
4700         case HCI_LK_COMBINATION:
4701                 if (pin_len == 16)
4702                         conn->pending_sec_level = BT_SECURITY_HIGH;
4703                 else
4704                         conn->pending_sec_level = BT_SECURITY_MEDIUM;
4705                 break;
4706         case HCI_LK_UNAUTH_COMBINATION_P192:
4707         case HCI_LK_UNAUTH_COMBINATION_P256:
4708                 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4709                 break;
4710         case HCI_LK_AUTH_COMBINATION_P192:
4711                 conn->pending_sec_level = BT_SECURITY_HIGH;
4712                 break;
4713         case HCI_LK_AUTH_COMBINATION_P256:
4714                 conn->pending_sec_level = BT_SECURITY_FIPS;
4715                 break;
4716         }
4717 }
4718
4719 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4720                                      struct sk_buff *skb)
4721 {
4722         struct hci_ev_link_key_req *ev = data;
4723         struct hci_cp_link_key_reply cp;
4724         struct hci_conn *conn;
4725         struct link_key *key;
4726
4727         bt_dev_dbg(hdev, "");
4728
4729         if (!hci_dev_test_flag(hdev, HCI_MGMT))
4730                 return;
4731
4732         hci_dev_lock(hdev);
4733
4734         key = hci_find_link_key(hdev, &ev->bdaddr);
4735         if (!key) {
4736                 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4737                 goto not_found;
4738         }
4739
4740         bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4741
4742         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4743         if (conn) {
4744                 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4745
4746                 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4747                      key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4748                     conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4749                         bt_dev_dbg(hdev, "ignoring unauthenticated key");
4750                         goto not_found;
4751                 }
4752
4753                 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4754                     (conn->pending_sec_level == BT_SECURITY_HIGH ||
4755                      conn->pending_sec_level == BT_SECURITY_FIPS)) {
4756                         bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4757                         goto not_found;
4758                 }
4759
4760                 conn_set_key(conn, key->type, key->pin_len);
4761         }
4762
4763         bacpy(&cp.bdaddr, &ev->bdaddr);
4764         memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4765
4766         hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4767
4768         hci_dev_unlock(hdev);
4769
4770         return;
4771
4772 not_found:
4773         hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4774         hci_dev_unlock(hdev);
4775 }
4776
4777 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4778                                     struct sk_buff *skb)
4779 {
4780         struct hci_ev_link_key_notify *ev = data;
4781         struct hci_conn *conn;
4782         struct link_key *key;
4783         bool persistent;
4784         u8 pin_len = 0;
4785
4786         bt_dev_dbg(hdev, "");
4787
4788         hci_dev_lock(hdev);
4789
4790         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4791         if (!conn)
4792                 goto unlock;
4793
4794         hci_conn_hold(conn);
4795         conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4796         hci_conn_drop(conn);
4797
4798         set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4799         conn_set_key(conn, ev->key_type, conn->pin_length);
4800
4801         if (!hci_dev_test_flag(hdev, HCI_MGMT))
4802                 goto unlock;
4803
4804         key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4805                                 ev->key_type, pin_len, &persistent);
4806         if (!key)
4807                 goto unlock;
4808
4809         /* Update connection information since adding the key will have
4810          * fixed up the type in the case of changed combination keys.
4811          */
4812         if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4813                 conn_set_key(conn, key->type, key->pin_len);
4814
4815         mgmt_new_link_key(hdev, key, persistent);
4816
4817         /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4818          * is set. If it's not set simply remove the key from the kernel
4819          * list (we've still notified user space about it but with
4820          * store_hint being 0).
4821          */
4822         if (key->type == HCI_LK_DEBUG_COMBINATION &&
4823             !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4824                 list_del_rcu(&key->list);
4825                 kfree_rcu(key, rcu);
4826                 goto unlock;
4827         }
4828
4829         if (persistent)
4830                 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4831         else
4832                 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4833
4834 unlock:
4835         hci_dev_unlock(hdev);
4836 }
4837
4838 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4839                                  struct sk_buff *skb)
4840 {
4841         struct hci_ev_clock_offset *ev = data;
4842         struct hci_conn *conn;
4843
4844         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4845
4846         hci_dev_lock(hdev);
4847
4848         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4849         if (conn && !ev->status) {
4850                 struct inquiry_entry *ie;
4851
4852                 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4853                 if (ie) {
4854                         ie->data.clock_offset = ev->clock_offset;
4855                         ie->timestamp = jiffies;
4856                 }
4857         }
4858
4859         hci_dev_unlock(hdev);
4860 }
4861
4862 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4863                                     struct sk_buff *skb)
4864 {
4865         struct hci_ev_pkt_type_change *ev = data;
4866         struct hci_conn *conn;
4867
4868         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4869
4870         hci_dev_lock(hdev);
4871
4872         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4873         if (conn && !ev->status)
4874                 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4875
4876         hci_dev_unlock(hdev);
4877 }
4878
4879 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4880                                    struct sk_buff *skb)
4881 {
4882         struct hci_ev_pscan_rep_mode *ev = data;
4883         struct inquiry_entry *ie;
4884
4885         bt_dev_dbg(hdev, "");
4886
4887         hci_dev_lock(hdev);
4888
4889         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4890         if (ie) {
4891                 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4892                 ie->timestamp = jiffies;
4893         }
4894
4895         hci_dev_unlock(hdev);
4896 }
4897
4898 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4899                                              struct sk_buff *skb)
4900 {
4901         struct hci_ev_inquiry_result_rssi *ev = edata;
4902         struct inquiry_data data;
4903         int i;
4904
4905         bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4906
4907         if (!ev->num)
4908                 return;
4909
4910         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4911                 return;
4912
4913         hci_dev_lock(hdev);
4914
4915         if (skb->len == array_size(ev->num,
4916                                    sizeof(struct inquiry_info_rssi_pscan))) {
4917                 struct inquiry_info_rssi_pscan *info;
4918
4919                 for (i = 0; i < ev->num; i++) {
4920                         u32 flags;
4921
4922                         info = hci_ev_skb_pull(hdev, skb,
4923                                                HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4924                                                sizeof(*info));
4925                         if (!info) {
4926                                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4927                                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4928                                 goto unlock;
4929                         }
4930
4931                         bacpy(&data.bdaddr, &info->bdaddr);
4932                         data.pscan_rep_mode     = info->pscan_rep_mode;
4933                         data.pscan_period_mode  = info->pscan_period_mode;
4934                         data.pscan_mode         = info->pscan_mode;
4935                         memcpy(data.dev_class, info->dev_class, 3);
4936                         data.clock_offset       = info->clock_offset;
4937                         data.rssi               = info->rssi;
4938                         data.ssp_mode           = 0x00;
4939
4940                         flags = hci_inquiry_cache_update(hdev, &data, false);
4941
4942                         mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4943                                           info->dev_class, info->rssi,
4944                                           flags, NULL, 0, NULL, 0, 0);
4945                 }
4946         } else if (skb->len == array_size(ev->num,
4947                                           sizeof(struct inquiry_info_rssi))) {
4948                 struct inquiry_info_rssi *info;
4949
4950                 for (i = 0; i < ev->num; i++) {
4951                         u32 flags;
4952
4953                         info = hci_ev_skb_pull(hdev, skb,
4954                                                HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4955                                                sizeof(*info));
4956                         if (!info) {
4957                                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4958                                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4959                                 goto unlock;
4960                         }
4961
4962                         bacpy(&data.bdaddr, &info->bdaddr);
4963                         data.pscan_rep_mode     = info->pscan_rep_mode;
4964                         data.pscan_period_mode  = info->pscan_period_mode;
4965                         data.pscan_mode         = 0x00;
4966                         memcpy(data.dev_class, info->dev_class, 3);
4967                         data.clock_offset       = info->clock_offset;
4968                         data.rssi               = info->rssi;
4969                         data.ssp_mode           = 0x00;
4970
4971                         flags = hci_inquiry_cache_update(hdev, &data, false);
4972
4973                         mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4974                                           info->dev_class, info->rssi,
4975                                           flags, NULL, 0, NULL, 0, 0);
4976                 }
4977         } else {
4978                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4979                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4980         }
4981 unlock:
4982         hci_dev_unlock(hdev);
4983 }
4984
4985 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4986                                         struct sk_buff *skb)
4987 {
4988         struct hci_ev_remote_ext_features *ev = data;
4989         struct hci_conn *conn;
4990
4991         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4992
4993         hci_dev_lock(hdev);
4994
4995         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4996         if (!conn)
4997                 goto unlock;
4998
4999         if (ev->page < HCI_MAX_PAGES)
5000                 memcpy(conn->features[ev->page], ev->features, 8);
5001
5002         if (!ev->status && ev->page == 0x01) {
5003                 struct inquiry_entry *ie;
5004
5005                 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
5006                 if (ie)
5007                         ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5008
5009                 if (ev->features[0] & LMP_HOST_SSP) {
5010                         set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5011                 } else {
5012                         /* It is mandatory by the Bluetooth specification that
5013                          * Extended Inquiry Results are only used when Secure
5014                          * Simple Pairing is enabled, but some devices violate
5015                          * this.
5016                          *
5017                          * To make these devices work, the internal SSP
5018                          * enabled flag needs to be cleared if the remote host
5019                          * features do not indicate SSP support */
5020                         clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5021                 }
5022
5023                 if (ev->features[0] & LMP_HOST_SC)
5024                         set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
5025         }
5026
5027         if (conn->state != BT_CONFIG)
5028                 goto unlock;
5029
5030         if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
5031                 struct hci_cp_remote_name_req cp;
5032                 memset(&cp, 0, sizeof(cp));
5033                 bacpy(&cp.bdaddr, &conn->dst);
5034                 cp.pscan_rep_mode = 0x02;
5035                 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
5036         } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5037                 mgmt_device_connected(hdev, conn, NULL, 0);
5038
5039         if (!hci_outgoing_auth_needed(hdev, conn)) {
5040                 conn->state = BT_CONNECTED;
5041                 hci_connect_cfm(conn, ev->status);
5042                 hci_conn_drop(conn);
5043         }
5044
5045 unlock:
5046         hci_dev_unlock(hdev);
5047 }
5048
5049 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
5050                                        struct sk_buff *skb)
5051 {
5052         struct hci_ev_sync_conn_complete *ev = data;
5053         struct hci_conn *conn;
5054         u8 status = ev->status;
5055
5056         switch (ev->link_type) {
5057         case SCO_LINK:
5058         case ESCO_LINK:
5059                 break;
5060         default:
5061                 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
5062                  * for HCI_Synchronous_Connection_Complete is limited to
5063                  * either SCO or eSCO
5064                  */
5065                 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
5066                 return;
5067         }
5068
5069         bt_dev_dbg(hdev, "status 0x%2.2x", status);
5070
5071         hci_dev_lock(hdev);
5072
5073         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
5074         if (!conn) {
5075                 if (ev->link_type == ESCO_LINK)
5076                         goto unlock;
5077
5078                 /* When the link type in the event indicates SCO connection
5079                  * and lookup of the connection object fails, then check
5080                  * if an eSCO connection object exists.
5081                  *
5082                  * The core limits the synchronous connections to either
5083                  * SCO or eSCO. The eSCO connection is preferred and tried
5084                  * to be setup first and until successfully established,
5085                  * the link type will be hinted as eSCO.
5086                  */
5087                 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
5088                 if (!conn)
5089                         goto unlock;
5090         }
5091
5092         /* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
5093          * Processing it more than once per connection can corrupt kernel memory.
5094          *
5095          * As the connection handle is set here for the first time, it indicates
5096          * whether the connection is already set up.
5097          */
5098         if (conn->handle != HCI_CONN_HANDLE_UNSET) {
5099                 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
5100                 goto unlock;
5101         }
5102
5103         switch (status) {
5104         case 0x00:
5105                 conn->handle = __le16_to_cpu(ev->handle);
5106                 if (conn->handle > HCI_CONN_HANDLE_MAX) {
5107                         bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
5108                                    conn->handle, HCI_CONN_HANDLE_MAX);
5109                         status = HCI_ERROR_INVALID_PARAMETERS;
5110                         conn->state = BT_CLOSED;
5111                         break;
5112                 }
5113
5114                 conn->state  = BT_CONNECTED;
5115                 conn->type   = ev->link_type;
5116
5117                 hci_debugfs_create_conn(conn);
5118                 hci_conn_add_sysfs(conn);
5119                 break;
5120
5121         case 0x10:      /* Connection Accept Timeout */
5122         case 0x0d:      /* Connection Rejected due to Limited Resources */
5123         case 0x11:      /* Unsupported Feature or Parameter Value */
5124         case 0x1c:      /* SCO interval rejected */
5125         case 0x1a:      /* Unsupported Remote Feature */
5126         case 0x1e:      /* Invalid LMP Parameters */
5127         case 0x1f:      /* Unspecified error */
5128         case 0x20:      /* Unsupported LMP Parameter value */
5129                 if (conn->out) {
5130                         conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5131                                         (hdev->esco_type & EDR_ESCO_MASK);
5132                         if (hci_setup_sync(conn, conn->link->handle))
5133                                 goto unlock;
5134                 }
5135                 fallthrough;
5136
5137         default:
5138                 conn->state = BT_CLOSED;
5139                 break;
5140         }
5141
5142         bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5143         /* Notify only in case of SCO over HCI transport data path which
5144          * is zero and non-zero value shall be non-HCI transport data path
5145          */
5146         if (conn->codec.data_path == 0 && hdev->notify) {
5147                 switch (ev->air_mode) {
5148                 case 0x02:
5149                         hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5150                         break;
5151                 case 0x03:
5152                         hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5153                         break;
5154                 }
5155         }
5156
5157         hci_connect_cfm(conn, status);
5158         if (status)
5159                 hci_conn_del(conn);
5160
5161 unlock:
5162         hci_dev_unlock(hdev);
5163 }
5164
5165 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5166 {
5167         size_t parsed = 0;
5168
5169         while (parsed < eir_len) {
5170                 u8 field_len = eir[0];
5171
5172                 if (field_len == 0)
5173                         return parsed;
5174
5175                 parsed += field_len + 1;
5176                 eir += field_len + 1;
5177         }
5178
5179         return eir_len;
5180 }
5181
5182 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5183                                             struct sk_buff *skb)
5184 {
5185         struct hci_ev_ext_inquiry_result *ev = edata;
5186         struct inquiry_data data;
5187         size_t eir_len;
5188         int i;
5189
5190         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5191                              flex_array_size(ev, info, ev->num)))
5192                 return;
5193
5194         bt_dev_dbg(hdev, "num %d", ev->num);
5195
5196         if (!ev->num)
5197                 return;
5198
5199         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5200                 return;
5201
5202         hci_dev_lock(hdev);
5203
5204         for (i = 0; i < ev->num; i++) {
5205                 struct extended_inquiry_info *info = &ev->info[i];
5206                 u32 flags;
5207                 bool name_known;
5208
5209                 bacpy(&data.bdaddr, &info->bdaddr);
5210                 data.pscan_rep_mode     = info->pscan_rep_mode;
5211                 data.pscan_period_mode  = info->pscan_period_mode;
5212                 data.pscan_mode         = 0x00;
5213                 memcpy(data.dev_class, info->dev_class, 3);
5214                 data.clock_offset       = info->clock_offset;
5215                 data.rssi               = info->rssi;
5216                 data.ssp_mode           = 0x01;
5217
5218                 if (hci_dev_test_flag(hdev, HCI_MGMT))
5219                         name_known = eir_get_data(info->data,
5220                                                   sizeof(info->data),
5221                                                   EIR_NAME_COMPLETE, NULL);
5222                 else
5223                         name_known = true;
5224
5225                 flags = hci_inquiry_cache_update(hdev, &data, name_known);
5226
5227                 eir_len = eir_get_length(info->data, sizeof(info->data));
5228
5229                 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5230                                   info->dev_class, info->rssi,
5231                                   flags, info->data, eir_len, NULL, 0, 0);
5232         }
5233
5234         hci_dev_unlock(hdev);
5235 }
5236
5237 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5238                                          struct sk_buff *skb)
5239 {
5240         struct hci_ev_key_refresh_complete *ev = data;
5241         struct hci_conn *conn;
5242
5243         bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5244                    __le16_to_cpu(ev->handle));
5245
5246         hci_dev_lock(hdev);
5247
5248         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5249         if (!conn)
5250                 goto unlock;
5251
5252         /* For BR/EDR the necessary steps are taken through the
5253          * auth_complete event.
5254          */
5255         if (conn->type != LE_LINK)
5256                 goto unlock;
5257
5258         if (!ev->status)
5259                 conn->sec_level = conn->pending_sec_level;
5260
5261         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5262
5263         if (ev->status && conn->state == BT_CONNECTED) {
5264                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5265                 hci_conn_drop(conn);
5266                 goto unlock;
5267         }
5268
5269         if (conn->state == BT_CONFIG) {
5270                 if (!ev->status)
5271                         conn->state = BT_CONNECTED;
5272
5273                 hci_connect_cfm(conn, ev->status);
5274                 hci_conn_drop(conn);
5275         } else {
5276                 hci_auth_cfm(conn, ev->status);
5277
5278                 hci_conn_hold(conn);
5279                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5280                 hci_conn_drop(conn);
5281         }
5282
5283 unlock:
5284         hci_dev_unlock(hdev);
5285 }
5286
5287 static u8 hci_get_auth_req(struct hci_conn *conn)
5288 {
5289         /* If remote requests no-bonding follow that lead */
5290         if (conn->remote_auth == HCI_AT_NO_BONDING ||
5291             conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5292                 return conn->remote_auth | (conn->auth_type & 0x01);
5293
5294         /* If both remote and local have enough IO capabilities, require
5295          * MITM protection
5296          */
5297         if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5298             conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5299                 return conn->remote_auth | 0x01;
5300
5301         /* No MITM protection possible so ignore remote requirement */
5302         return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5303 }
5304
5305 static u8 bredr_oob_data_present(struct hci_conn *conn)
5306 {
5307         struct hci_dev *hdev = conn->hdev;
5308         struct oob_data *data;
5309
5310         data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5311         if (!data)
5312                 return 0x00;
5313
5314         if (bredr_sc_enabled(hdev)) {
5315                 /* When Secure Connections is enabled, then just
5316                  * return the present value stored with the OOB
5317                  * data. The stored value contains the right present
5318                  * information. However it can only be trusted when
5319                  * not in Secure Connection Only mode.
5320                  */
5321                 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5322                         return data->present;
5323
5324                 /* When Secure Connections Only mode is enabled, then
5325                  * the P-256 values are required. If they are not
5326                  * available, then do not declare that OOB data is
5327                  * present.
5328                  */
5329                 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
5330                     !memcmp(data->hash256, ZERO_KEY, 16))
5331                         return 0x00;
5332
5333                 return 0x02;
5334         }
5335
5336         /* When Secure Connections is not enabled or actually
5337          * not supported by the hardware, then check that if
5338          * P-192 data values are present.
5339          */
5340         if (!memcmp(data->rand192, ZERO_KEY, 16) ||
5341             !memcmp(data->hash192, ZERO_KEY, 16))
5342                 return 0x00;
5343
5344         return 0x01;
5345 }
5346
5347 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5348                                     struct sk_buff *skb)
5349 {
5350         struct hci_ev_io_capa_request *ev = data;
5351         struct hci_conn *conn;
5352
5353         bt_dev_dbg(hdev, "");
5354
5355         hci_dev_lock(hdev);
5356
5357         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5358         if (!conn)
5359                 goto unlock;
5360
5361         hci_conn_hold(conn);
5362
5363         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5364                 goto unlock;
5365
5366         /* Allow pairing if we're pairable, the initiators of the
5367          * pairing or if the remote is not requesting bonding.
5368          */
5369         if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5370             test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5371             (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5372                 struct hci_cp_io_capability_reply cp;
5373
5374                 bacpy(&cp.bdaddr, &ev->bdaddr);
5375                 /* Change the IO capability from KeyboardDisplay
5376                  * to DisplayYesNo as it is not supported by BT spec. */
5377                 cp.capability = (conn->io_capability == 0x04) ?
5378                                 HCI_IO_DISPLAY_YESNO : conn->io_capability;
5379
5380                 /* If we are initiators, there is no remote information yet */
5381                 if (conn->remote_auth == 0xff) {
5382                         /* Request MITM protection if our IO caps allow it
5383                          * except for the no-bonding case.
5384                          */
5385                         if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5386                             conn->auth_type != HCI_AT_NO_BONDING)
5387                                 conn->auth_type |= 0x01;
5388                 } else {
5389                         conn->auth_type = hci_get_auth_req(conn);
5390                 }
5391
5392                 /* If we're not bondable, force one of the non-bondable
5393                  * authentication requirement values.
5394                  */
5395                 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5396                         conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5397
5398                 cp.authentication = conn->auth_type;
5399                 cp.oob_data = bredr_oob_data_present(conn);
5400
5401                 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5402                              sizeof(cp), &cp);
5403         } else {
5404                 struct hci_cp_io_capability_neg_reply cp;
5405
5406                 bacpy(&cp.bdaddr, &ev->bdaddr);
5407                 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5408
5409                 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5410                              sizeof(cp), &cp);
5411         }
5412
5413 unlock:
5414         hci_dev_unlock(hdev);
5415 }
5416
5417 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5418                                   struct sk_buff *skb)
5419 {
5420         struct hci_ev_io_capa_reply *ev = data;
5421         struct hci_conn *conn;
5422
5423         bt_dev_dbg(hdev, "");
5424
5425         hci_dev_lock(hdev);
5426
5427         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5428         if (!conn)
5429                 goto unlock;
5430
5431         conn->remote_cap = ev->capability;
5432         conn->remote_auth = ev->authentication;
5433
5434 unlock:
5435         hci_dev_unlock(hdev);
5436 }
5437
5438 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5439                                          struct sk_buff *skb)
5440 {
5441         struct hci_ev_user_confirm_req *ev = data;
5442         int loc_mitm, rem_mitm, confirm_hint = 0;
5443         struct hci_conn *conn;
5444
5445         bt_dev_dbg(hdev, "");
5446
5447         hci_dev_lock(hdev);
5448
5449         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5450                 goto unlock;
5451
5452         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5453         if (!conn)
5454                 goto unlock;
5455
5456         loc_mitm = (conn->auth_type & 0x01);
5457         rem_mitm = (conn->remote_auth & 0x01);
5458
5459         /* If we require MITM but the remote device can't provide that
5460          * (it has NoInputNoOutput) then reject the confirmation
5461          * request. We check the security level here since it doesn't
5462          * necessarily match conn->auth_type.
5463          */
5464         if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5465             conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5466                 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5467                 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5468                              sizeof(ev->bdaddr), &ev->bdaddr);
5469                 goto unlock;
5470         }
5471
5472         /* If no side requires MITM protection; auto-accept */
5473         if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5474             (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5475
5476                 /* If we're not the initiators request authorization to
5477                  * proceed from user space (mgmt_user_confirm with
5478                  * confirm_hint set to 1). The exception is if neither
5479                  * side had MITM or if the local IO capability is
5480                  * NoInputNoOutput, in which case we do auto-accept
5481                  */
5482                 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5483                     conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5484                     (loc_mitm || rem_mitm)) {
5485                         bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5486                         confirm_hint = 1;
5487                         goto confirm;
5488                 }
5489
5490                 /* If there already exists link key in local host, leave the
5491                  * decision to user space since the remote device could be
5492                  * legitimate or malicious.
5493                  */
5494                 if (hci_find_link_key(hdev, &ev->bdaddr)) {
5495                         bt_dev_dbg(hdev, "Local host already has link key");
5496                         confirm_hint = 1;
5497                         goto confirm;
5498                 }
5499
5500                 BT_DBG("Auto-accept of user confirmation with %ums delay",
5501                        hdev->auto_accept_delay);
5502
5503                 if (hdev->auto_accept_delay > 0) {
5504                         int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5505                         queue_delayed_work(conn->hdev->workqueue,
5506                                            &conn->auto_accept_work, delay);
5507                         goto unlock;
5508                 }
5509
5510                 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5511                              sizeof(ev->bdaddr), &ev->bdaddr);
5512                 goto unlock;
5513         }
5514
5515 confirm:
5516         mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5517                                   le32_to_cpu(ev->passkey), confirm_hint);
5518
5519 unlock:
5520         hci_dev_unlock(hdev);
5521 }
5522
5523 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5524                                          struct sk_buff *skb)
5525 {
5526         struct hci_ev_user_passkey_req *ev = data;
5527
5528         bt_dev_dbg(hdev, "");
5529
5530         if (hci_dev_test_flag(hdev, HCI_MGMT))
5531                 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5532 }
5533
5534 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5535                                         struct sk_buff *skb)
5536 {
5537         struct hci_ev_user_passkey_notify *ev = data;
5538         struct hci_conn *conn;
5539
5540         bt_dev_dbg(hdev, "");
5541
5542         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5543         if (!conn)
5544                 return;
5545
5546         conn->passkey_notify = __le32_to_cpu(ev->passkey);
5547         conn->passkey_entered = 0;
5548
5549         if (hci_dev_test_flag(hdev, HCI_MGMT))
5550                 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5551                                          conn->dst_type, conn->passkey_notify,
5552                                          conn->passkey_entered);
5553 }
5554
5555 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5556                                     struct sk_buff *skb)
5557 {
5558         struct hci_ev_keypress_notify *ev = data;
5559         struct hci_conn *conn;
5560
5561         bt_dev_dbg(hdev, "");
5562
5563         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5564         if (!conn)
5565                 return;
5566
5567         switch (ev->type) {
5568         case HCI_KEYPRESS_STARTED:
5569                 conn->passkey_entered = 0;
5570                 return;
5571
5572         case HCI_KEYPRESS_ENTERED:
5573                 conn->passkey_entered++;
5574                 break;
5575
5576         case HCI_KEYPRESS_ERASED:
5577                 conn->passkey_entered--;
5578                 break;
5579
5580         case HCI_KEYPRESS_CLEARED:
5581                 conn->passkey_entered = 0;
5582                 break;
5583
5584         case HCI_KEYPRESS_COMPLETED:
5585                 return;
5586         }
5587
5588         if (hci_dev_test_flag(hdev, HCI_MGMT))
5589                 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5590                                          conn->dst_type, conn->passkey_notify,
5591                                          conn->passkey_entered);
5592 }
5593
5594 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5595                                          struct sk_buff *skb)
5596 {
5597         struct hci_ev_simple_pair_complete *ev = data;
5598         struct hci_conn *conn;
5599
5600         bt_dev_dbg(hdev, "");
5601
5602         hci_dev_lock(hdev);
5603
5604         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5605         if (!conn)
5606                 goto unlock;
5607
5608         /* Reset the authentication requirement to unknown */
5609         conn->remote_auth = 0xff;
5610
5611         /* To avoid duplicate auth_failed events to user space we check
5612          * the HCI_CONN_AUTH_PEND flag which will be set if we
5613          * initiated the authentication. A traditional auth_complete
5614          * event gets always produced as initiator and is also mapped to
5615          * the mgmt_auth_failed event */
5616         if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5617                 mgmt_auth_failed(conn, ev->status);
5618
5619         hci_conn_drop(conn);
5620
5621 unlock:
5622         hci_dev_unlock(hdev);
5623 }
5624
5625 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5626                                          struct sk_buff *skb)
5627 {
5628         struct hci_ev_remote_host_features *ev = data;
5629         struct inquiry_entry *ie;
5630         struct hci_conn *conn;
5631
5632         bt_dev_dbg(hdev, "");
5633
5634         hci_dev_lock(hdev);
5635
5636         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5637         if (conn)
5638                 memcpy(conn->features[1], ev->features, 8);
5639
5640         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5641         if (ie)
5642                 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5643
5644         hci_dev_unlock(hdev);
5645 }
5646
5647 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5648                                             struct sk_buff *skb)
5649 {
5650         struct hci_ev_remote_oob_data_request *ev = edata;
5651         struct oob_data *data;
5652
5653         bt_dev_dbg(hdev, "");
5654
5655         hci_dev_lock(hdev);
5656
5657         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5658                 goto unlock;
5659
5660         data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5661         if (!data) {
5662                 struct hci_cp_remote_oob_data_neg_reply cp;
5663
5664                 bacpy(&cp.bdaddr, &ev->bdaddr);
5665                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5666                              sizeof(cp), &cp);
5667                 goto unlock;
5668         }
5669
5670         if (bredr_sc_enabled(hdev)) {
5671                 struct hci_cp_remote_oob_ext_data_reply cp;
5672
5673                 bacpy(&cp.bdaddr, &ev->bdaddr);
5674                 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5675                         memset(cp.hash192, 0, sizeof(cp.hash192));
5676                         memset(cp.rand192, 0, sizeof(cp.rand192));
5677                 } else {
5678                         memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5679                         memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5680                 }
5681                 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5682                 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5683
5684                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5685                              sizeof(cp), &cp);
5686         } else {
5687                 struct hci_cp_remote_oob_data_reply cp;
5688
5689                 bacpy(&cp.bdaddr, &ev->bdaddr);
5690                 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5691                 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5692
5693                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5694                              sizeof(cp), &cp);
5695         }
5696
5697 unlock:
5698         hci_dev_unlock(hdev);
5699 }
5700
5701 #if IS_ENABLED(CONFIG_BT_HS)
5702 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
5703                                   struct sk_buff *skb)
5704 {
5705         struct hci_ev_channel_selected *ev = data;
5706         struct hci_conn *hcon;
5707
5708         bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
5709
5710         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5711         if (!hcon)
5712                 return;
5713
5714         amp_read_loc_assoc_final_data(hdev, hcon);
5715 }
5716
5717 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
5718                                       struct sk_buff *skb)
5719 {
5720         struct hci_ev_phy_link_complete *ev = data;
5721         struct hci_conn *hcon, *bredr_hcon;
5722
5723         bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
5724                    ev->status);
5725
5726         hci_dev_lock(hdev);
5727
5728         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5729         if (!hcon)
5730                 goto unlock;
5731
5732         if (!hcon->amp_mgr)
5733                 goto unlock;
5734
5735         if (ev->status) {
5736                 hci_conn_del(hcon);
5737                 goto unlock;
5738         }
5739
5740         bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5741
5742         hcon->state = BT_CONNECTED;
5743         bacpy(&hcon->dst, &bredr_hcon->dst);
5744
5745         hci_conn_hold(hcon);
5746         hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5747         hci_conn_drop(hcon);
5748
5749         hci_debugfs_create_conn(hcon);
5750         hci_conn_add_sysfs(hcon);
5751
5752         amp_physical_cfm(bredr_hcon, hcon);
5753
5754 unlock:
5755         hci_dev_unlock(hdev);
5756 }
5757
5758 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
5759                                      struct sk_buff *skb)
5760 {
5761         struct hci_ev_logical_link_complete *ev = data;
5762         struct hci_conn *hcon;
5763         struct hci_chan *hchan;
5764         struct amp_mgr *mgr;
5765
5766         bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5767                    le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
5768
5769         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5770         if (!hcon)
5771                 return;
5772
5773         /* Create AMP hchan */
5774         hchan = hci_chan_create(hcon);
5775         if (!hchan)
5776                 return;
5777
5778         hchan->handle = le16_to_cpu(ev->handle);
5779         hchan->amp = true;
5780
5781         BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5782
5783         mgr = hcon->amp_mgr;
5784         if (mgr && mgr->bredr_chan) {
5785                 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5786
5787                 l2cap_chan_lock(bredr_chan);
5788
5789                 bredr_chan->conn->mtu = hdev->block_mtu;
5790                 l2cap_logical_cfm(bredr_chan, hchan, 0);
5791                 hci_conn_hold(hcon);
5792
5793                 l2cap_chan_unlock(bredr_chan);
5794         }
5795 }
5796
5797 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
5798                                              struct sk_buff *skb)
5799 {
5800         struct hci_ev_disconn_logical_link_complete *ev = data;
5801         struct hci_chan *hchan;
5802
5803         bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
5804                    le16_to_cpu(ev->handle), ev->status);
5805
5806         if (ev->status)
5807                 return;
5808
5809         hci_dev_lock(hdev);
5810
5811         hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5812         if (!hchan || !hchan->amp)
5813                 goto unlock;
5814
5815         amp_destroy_logical_link(hchan, ev->reason);
5816
5817 unlock:
5818         hci_dev_unlock(hdev);
5819 }
5820
5821 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
5822                                              struct sk_buff *skb)
5823 {
5824         struct hci_ev_disconn_phy_link_complete *ev = data;
5825         struct hci_conn *hcon;
5826
5827         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5828
5829         if (ev->status)
5830                 return;
5831
5832         hci_dev_lock(hdev);
5833
5834         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5835         if (hcon && hcon->type == AMP_LINK) {
5836                 hcon->state = BT_CLOSED;
5837                 hci_disconn_cfm(hcon, ev->reason);
5838                 hci_conn_del(hcon);
5839         }
5840
5841         hci_dev_unlock(hdev);
5842 }
5843 #endif
5844
5845 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5846                                 u8 bdaddr_type, bdaddr_t *local_rpa)
5847 {
5848         if (conn->out) {
5849                 conn->dst_type = bdaddr_type;
5850                 conn->resp_addr_type = bdaddr_type;
5851                 bacpy(&conn->resp_addr, bdaddr);
5852
5853                 /* Check if the controller has set a Local RPA then it must be
5854                  * used instead or hdev->rpa.
5855                  */
5856                 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5857                         conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5858                         bacpy(&conn->init_addr, local_rpa);
5859                 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5860                         conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5861                         bacpy(&conn->init_addr, &conn->hdev->rpa);
5862                 } else {
5863                         hci_copy_identity_address(conn->hdev, &conn->init_addr,
5864                                                   &conn->init_addr_type);
5865                 }
5866         } else {
5867                 conn->resp_addr_type = conn->hdev->adv_addr_type;
5868                 /* Check if the controller has set a Local RPA then it must be
5869                  * used instead or hdev->rpa.
5870                  */
5871                 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5872                         conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5873                         bacpy(&conn->resp_addr, local_rpa);
5874                 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5875                         /* In case of ext adv, resp_addr will be updated in
5876                          * Adv Terminated event.
5877                          */
5878                         if (!ext_adv_capable(conn->hdev))
5879                                 bacpy(&conn->resp_addr,
5880                                       &conn->hdev->random_addr);
5881                 } else {
5882                         bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5883                 }
5884
5885                 conn->init_addr_type = bdaddr_type;
5886                 bacpy(&conn->init_addr, bdaddr);
5887
5888                 /* For incoming connections, set the default minimum
5889                  * and maximum connection interval. They will be used
5890                  * to check if the parameters are in range and if not
5891                  * trigger the connection update procedure.
5892                  */
5893                 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5894                 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5895         }
5896 }
5897
5898 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5899                                  bdaddr_t *bdaddr, u8 bdaddr_type,
5900                                  bdaddr_t *local_rpa, u8 role, u16 handle,
5901                                  u16 interval, u16 latency,
5902                                  u16 supervision_timeout)
5903 {
5904         struct hci_conn_params *params;
5905         struct hci_conn *conn;
5906         struct smp_irk *irk;
5907         u8 addr_type;
5908
5909         hci_dev_lock(hdev);
5910
5911         /* All controllers implicitly stop advertising in the event of a
5912          * connection, so ensure that the state bit is cleared.
5913          */
5914         hci_dev_clear_flag(hdev, HCI_LE_ADV);
5915
5916         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
5917         if (!conn) {
5918                 /* In case of error status and there is no connection pending
5919                  * just unlock as there is nothing to cleanup.
5920                  */
5921                 if (status)
5922                         goto unlock;
5923
5924                 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5925                 if (!conn) {
5926                         bt_dev_err(hdev, "no memory for new connection");
5927                         goto unlock;
5928                 }
5929
5930                 conn->dst_type = bdaddr_type;
5931
5932                 /* If we didn't have a hci_conn object previously
5933                  * but we're in central role this must be something
5934                  * initiated using an accept list. Since accept list based
5935                  * connections are not "first class citizens" we don't
5936                  * have full tracking of them. Therefore, we go ahead
5937                  * with a "best effort" approach of determining the
5938                  * initiator address based on the HCI_PRIVACY flag.
5939                  */
5940                 if (conn->out) {
5941                         conn->resp_addr_type = bdaddr_type;
5942                         bacpy(&conn->resp_addr, bdaddr);
5943                         if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5944                                 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5945                                 bacpy(&conn->init_addr, &hdev->rpa);
5946                         } else {
5947                                 hci_copy_identity_address(hdev,
5948                                                           &conn->init_addr,
5949                                                           &conn->init_addr_type);
5950                         }
5951                 }
5952         } else {
5953                 cancel_delayed_work(&conn->le_conn_timeout);
5954         }
5955
5956         /* The HCI_LE_Connection_Complete event is only sent once per connection.
5957          * Processing it more than once per connection can corrupt kernel memory.
5958          *
5959          * As the connection handle is set here for the first time, it indicates
5960          * whether the connection is already set up.
5961          */
5962         if (conn->handle != HCI_CONN_HANDLE_UNSET) {
5963                 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5964                 goto unlock;
5965         }
5966
5967         le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5968
5969         /* Lookup the identity address from the stored connection
5970          * address and address type.
5971          *
5972          * When establishing connections to an identity address, the
5973          * connection procedure will store the resolvable random
5974          * address first. Now if it can be converted back into the
5975          * identity address, start using the identity address from
5976          * now on.
5977          */
5978         irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5979         if (irk) {
5980                 bacpy(&conn->dst, &irk->bdaddr);
5981                 conn->dst_type = irk->addr_type;
5982         }
5983
5984         conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5985
5986         if (handle > HCI_CONN_HANDLE_MAX) {
5987                 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle,
5988                            HCI_CONN_HANDLE_MAX);
5989                 status = HCI_ERROR_INVALID_PARAMETERS;
5990         }
5991
5992         /* All connection failure handling is taken care of by the
5993          * hci_conn_failed function which is triggered by the HCI
5994          * request completion callbacks used for connecting.
5995          */
5996         if (status)
5997                 goto unlock;
5998
5999         /* Drop the connection if it has been aborted */
6000         if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
6001                 hci_conn_drop(conn);
6002                 goto unlock;
6003         }
6004
6005         if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
6006                 addr_type = BDADDR_LE_PUBLIC;
6007         else
6008                 addr_type = BDADDR_LE_RANDOM;
6009
6010         /* Drop the connection if the device is blocked */
6011         if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
6012                 hci_conn_drop(conn);
6013                 goto unlock;
6014         }
6015
6016         if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
6017                 mgmt_device_connected(hdev, conn, NULL, 0);
6018
6019         conn->sec_level = BT_SECURITY_LOW;
6020         conn->handle = handle;
6021         conn->state = BT_CONFIG;
6022
6023         /* Store current advertising instance as connection advertising instance
6024          * when sotfware rotation is in use so it can be re-enabled when
6025          * disconnected.
6026          */
6027         if (!ext_adv_capable(hdev))
6028                 conn->adv_instance = hdev->cur_adv_instance;
6029
6030         conn->le_conn_interval = interval;
6031         conn->le_conn_latency = latency;
6032         conn->le_supv_timeout = supervision_timeout;
6033
6034         hci_debugfs_create_conn(conn);
6035         hci_conn_add_sysfs(conn);
6036
6037         /* The remote features procedure is defined for central
6038          * role only. So only in case of an initiated connection
6039          * request the remote features.
6040          *
6041          * If the local controller supports peripheral-initiated features
6042          * exchange, then requesting the remote features in peripheral
6043          * role is possible. Otherwise just transition into the
6044          * connected state without requesting the remote features.
6045          */
6046         if (conn->out ||
6047             (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
6048                 struct hci_cp_le_read_remote_features cp;
6049
6050                 cp.handle = __cpu_to_le16(conn->handle);
6051
6052                 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
6053                              sizeof(cp), &cp);
6054
6055                 hci_conn_hold(conn);
6056         } else {
6057                 conn->state = BT_CONNECTED;
6058                 hci_connect_cfm(conn, status);
6059         }
6060
6061         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
6062                                            conn->dst_type);
6063         if (params) {
6064                 list_del_init(&params->action);
6065                 if (params->conn) {
6066                         hci_conn_drop(params->conn);
6067                         hci_conn_put(params->conn);
6068                         params->conn = NULL;
6069                 }
6070         }
6071
6072 unlock:
6073         hci_update_passive_scan(hdev);
6074         hci_dev_unlock(hdev);
6075 }
6076
6077 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
6078                                      struct sk_buff *skb)
6079 {
6080         struct hci_ev_le_conn_complete *ev = data;
6081
6082         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6083
6084         le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6085                              NULL, ev->role, le16_to_cpu(ev->handle),
6086                              le16_to_cpu(ev->interval),
6087                              le16_to_cpu(ev->latency),
6088                              le16_to_cpu(ev->supervision_timeout));
6089 }
6090
6091 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
6092                                          struct sk_buff *skb)
6093 {
6094         struct hci_ev_le_enh_conn_complete *ev = data;
6095
6096         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6097
6098         le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6099                              &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
6100                              le16_to_cpu(ev->interval),
6101                              le16_to_cpu(ev->latency),
6102                              le16_to_cpu(ev->supervision_timeout));
6103 }
6104
6105 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
6106                                     struct sk_buff *skb)
6107 {
6108         struct hci_evt_le_ext_adv_set_term *ev = data;
6109         struct hci_conn *conn;
6110         struct adv_info *adv, *n;
6111
6112         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6113
6114         /* The Bluetooth Core 5.3 specification clearly states that this event
6115          * shall not be sent when the Host disables the advertising set. So in
6116          * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
6117          *
6118          * When the Host disables an advertising set, all cleanup is done via
6119          * its command callback and not needed to be duplicated here.
6120          */
6121         if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
6122                 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
6123                 return;
6124         }
6125
6126         hci_dev_lock(hdev);
6127
6128         adv = hci_find_adv_instance(hdev, ev->handle);
6129
6130         if (ev->status) {
6131                 if (!adv)
6132                         goto unlock;
6133
6134                 /* Remove advertising as it has been terminated */
6135                 hci_remove_adv_instance(hdev, ev->handle);
6136                 mgmt_advertising_removed(NULL, hdev, ev->handle);
6137
6138                 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
6139                         if (adv->enabled)
6140                                 goto unlock;
6141                 }
6142
6143                 /* We are no longer advertising, clear HCI_LE_ADV */
6144                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
6145                 goto unlock;
6146         }
6147
6148         if (adv)
6149                 adv->enabled = false;
6150
6151         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
6152         if (conn) {
6153                 /* Store handle in the connection so the correct advertising
6154                  * instance can be re-enabled when disconnected.
6155                  */
6156                 conn->adv_instance = ev->handle;
6157
6158                 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
6159                     bacmp(&conn->resp_addr, BDADDR_ANY))
6160                         goto unlock;
6161
6162                 if (!ev->handle) {
6163                         bacpy(&conn->resp_addr, &hdev->random_addr);
6164                         goto unlock;
6165                 }
6166
6167                 if (adv)
6168                         bacpy(&conn->resp_addr, &adv->random_addr);
6169         }
6170
6171 unlock:
6172         hci_dev_unlock(hdev);
6173 }
6174
6175 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
6176                                             struct sk_buff *skb)
6177 {
6178         struct hci_ev_le_conn_update_complete *ev = data;
6179         struct hci_conn *conn;
6180
6181         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6182
6183         if (ev->status)
6184                 return;
6185
6186         hci_dev_lock(hdev);
6187
6188         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6189         if (conn) {
6190 #ifdef TIZEN_BT
6191                 if (ev->status) {
6192                         hci_dev_unlock(hdev);
6193                         mgmt_le_conn_update_failed(hdev, &conn->dst,
6194                                 conn->type, conn->dst_type, ev->status);
6195                         return;
6196                 }
6197 #endif
6198                 conn->le_conn_interval = le16_to_cpu(ev->interval);
6199                 conn->le_conn_latency = le16_to_cpu(ev->latency);
6200                 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
6201         }
6202
6203         hci_dev_unlock(hdev);
6204
6205 #ifdef TIZEN_BT
6206         mgmt_le_conn_updated(hdev, &conn->dst, conn->type,
6207                                 conn->dst_type, conn->le_conn_interval,
6208                                 conn->le_conn_latency, conn->le_supv_timeout);
6209 #endif
6210 }
6211
6212 /* This function requires the caller holds hdev->lock */
6213 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
6214                                               bdaddr_t *addr,
6215                                               u8 addr_type, bool addr_resolved,
6216                                               u8 adv_type)
6217 {
6218         struct hci_conn *conn;
6219         struct hci_conn_params *params;
6220
6221         /* If the event is not connectable don't proceed further */
6222         if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
6223                 return NULL;
6224
6225         /* Ignore if the device is blocked or hdev is suspended */
6226         if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
6227             hdev->suspended)
6228                 return NULL;
6229
6230         /* Most controller will fail if we try to create new connections
6231          * while we have an existing one in peripheral role.
6232          */
6233         if (hdev->conn_hash.le_num_peripheral > 0 &&
6234             (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
6235              !(hdev->le_states[3] & 0x10)))
6236                 return NULL;
6237
6238         /* If we're not connectable only connect devices that we have in
6239          * our pend_le_conns list.
6240          */
6241         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
6242                                            addr_type);
6243         if (!params)
6244                 return NULL;
6245
6246         if (!params->explicit_connect) {
6247                 switch (params->auto_connect) {
6248                 case HCI_AUTO_CONN_DIRECT:
6249                         /* Only devices advertising with ADV_DIRECT_IND are
6250                          * triggering a connection attempt. This is allowing
6251                          * incoming connections from peripheral devices.
6252                          */
6253                         if (adv_type != LE_ADV_DIRECT_IND)
6254                                 return NULL;
6255                         break;
6256                 case HCI_AUTO_CONN_ALWAYS:
6257                         /* Devices advertising with ADV_IND or ADV_DIRECT_IND
6258                          * are triggering a connection attempt. This means
6259                          * that incoming connections from peripheral device are
6260                          * accepted and also outgoing connections to peripheral
6261                          * devices are established when found.
6262                          */
6263                         break;
6264                 default:
6265                         return NULL;
6266                 }
6267         }
6268
6269         conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
6270                               BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
6271                               HCI_ROLE_MASTER);
6272         if (!IS_ERR(conn)) {
6273                 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6274                  * by higher layer that tried to connect, if no then
6275                  * store the pointer since we don't really have any
6276                  * other owner of the object besides the params that
6277                  * triggered it. This way we can abort the connection if
6278                  * the parameters get removed and keep the reference
6279                  * count consistent once the connection is established.
6280                  */
6281
6282                 if (!params->explicit_connect)
6283                         params->conn = hci_conn_get(conn);
6284
6285                 return conn;
6286         }
6287
6288         switch (PTR_ERR(conn)) {
6289         case -EBUSY:
6290                 /* If hci_connect() returns -EBUSY it means there is already
6291                  * an LE connection attempt going on. Since controllers don't
6292                  * support more than one connection attempt at the time, we
6293                  * don't consider this an error case.
6294                  */
6295                 break;
6296         default:
6297                 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6298                 return NULL;
6299         }
6300
6301         return NULL;
6302 }
6303
6304 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6305                                u8 bdaddr_type, bdaddr_t *direct_addr,
6306                                u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
6307                                bool ext_adv, bool ctl_time, u64 instant)
6308 {
6309         struct discovery_state *d = &hdev->discovery;
6310         struct smp_irk *irk;
6311         struct hci_conn *conn;
6312         bool match, bdaddr_resolved;
6313         u32 flags;
6314         u8 *ptr;
6315
6316         switch (type) {
6317         case LE_ADV_IND:
6318         case LE_ADV_DIRECT_IND:
6319         case LE_ADV_SCAN_IND:
6320         case LE_ADV_NONCONN_IND:
6321         case LE_ADV_SCAN_RSP:
6322                 break;
6323         default:
6324                 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6325                                        "type: 0x%02x", type);
6326                 return;
6327         }
6328
6329         if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
6330                 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
6331                 return;
6332         }
6333
6334         /* Find the end of the data in case the report contains padded zero
6335          * bytes at the end causing an invalid length value.
6336          *
6337          * When data is NULL, len is 0 so there is no need for extra ptr
6338          * check as 'ptr < data + 0' is already false in such case.
6339          */
6340         for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6341                 if (ptr + 1 + *ptr > data + len)
6342                         break;
6343         }
6344
6345         /* Adjust for actual length. This handles the case when remote
6346          * device is advertising with incorrect data length.
6347          */
6348         len = ptr - data;
6349
6350         /* If the direct address is present, then this report is from
6351          * a LE Direct Advertising Report event. In that case it is
6352          * important to see if the address is matching the local
6353          * controller address.
6354          */
6355         if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6356                 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6357                                                   &bdaddr_resolved);
6358
6359                 /* Only resolvable random addresses are valid for these
6360                  * kind of reports and others can be ignored.
6361                  */
6362                 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6363                         return;
6364
6365                 /* If the controller is not using resolvable random
6366                  * addresses, then this report can be ignored.
6367                  */
6368                 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6369                         return;
6370
6371                 /* If the local IRK of the controller does not match
6372                  * with the resolvable random address provided, then
6373                  * this report can be ignored.
6374                  */
6375                 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6376                         return;
6377         }
6378
6379         /* Check if we need to convert to identity address */
6380         irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6381         if (irk) {
6382                 bdaddr = &irk->bdaddr;
6383                 bdaddr_type = irk->addr_type;
6384         }
6385
6386         bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6387
6388         /* Check if we have been requested to connect to this device.
6389          *
6390          * direct_addr is set only for directed advertising reports (it is NULL
6391          * for advertising reports) and is already verified to be RPA above.
6392          */
6393         conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6394                                      type);
6395         if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
6396                 /* Store report for later inclusion by
6397                  * mgmt_device_connected
6398                  */
6399                 memcpy(conn->le_adv_data, data, len);
6400                 conn->le_adv_data_len = len;
6401         }
6402
6403         if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6404                 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6405         else
6406                 flags = 0;
6407
6408         /* All scan results should be sent up for Mesh systems */
6409         if (hci_dev_test_flag(hdev, HCI_MESH)) {
6410                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6411                                   rssi, flags, data, len, NULL, 0, instant);
6412                 return;
6413         }
6414
6415         /* Passive scanning shouldn't trigger any device found events,
6416          * except for devices marked as CONN_REPORT for which we do send
6417          * device found events, or advertisement monitoring requested.
6418          */
6419         if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6420                 if (type == LE_ADV_DIRECT_IND)
6421                         return;
6422
6423 #ifndef TIZEN_BT
6424                 /* Handle all adv packet in platform */
6425                 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6426                                                bdaddr, bdaddr_type) &&
6427                     idr_is_empty(&hdev->adv_monitors_idr))
6428                         return;
6429 #endif
6430
6431 #ifdef TIZEN_BT
6432                 mgmt_le_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6433                                   rssi, flags, data, len, NULL, 0, type);
6434 #else
6435                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6436                                   rssi, flags, data, len, NULL, 0, 0);
6437 #endif
6438                 return;
6439         }
6440
6441         /* When receiving a scan response, then there is no way to
6442          * know if the remote device is connectable or not. However
6443          * since scan responses are merged with a previously seen
6444          * advertising report, the flags field from that report
6445          * will be used.
6446          *
6447          * In the unlikely case that a controller just sends a scan
6448          * response event that doesn't match the pending report, then
6449          * it is marked as a standalone SCAN_RSP.
6450          */
6451         if (type == LE_ADV_SCAN_RSP)
6452                 flags = MGMT_DEV_FOUND_SCAN_RSP;
6453
6454 #ifdef TIZEN_BT
6455         /* Disable adv ind and scan rsp merging */
6456         mgmt_le_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6457                                   rssi, flags, data, len, NULL, 0, type);
6458 #else
6459         /* If there's nothing pending either store the data from this
6460          * event or send an immediate device found event if the data
6461          * should not be stored for later.
6462          */
6463         if (!ext_adv && !has_pending_adv_report(hdev)) {
6464                 /* If the report will trigger a SCAN_REQ store it for
6465                  * later merging.
6466                  */
6467                 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6468                         store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6469                                                  rssi, flags, data, len);
6470                         return;
6471                 }
6472
6473                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6474                                   rssi, flags, data, len, NULL, 0, 0);
6475                 return;
6476         }
6477
6478         /* Check if the pending report is for the same device as the new one */
6479         match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6480                  bdaddr_type == d->last_adv_addr_type);
6481
6482         /* If the pending data doesn't match this report or this isn't a
6483          * scan response (e.g. we got a duplicate ADV_IND) then force
6484          * sending of the pending data.
6485          */
6486         if (type != LE_ADV_SCAN_RSP || !match) {
6487                 /* Send out whatever is in the cache, but skip duplicates */
6488                 if (!match)
6489                         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6490                                           d->last_adv_addr_type, NULL,
6491                                           d->last_adv_rssi, d->last_adv_flags,
6492                                           d->last_adv_data,
6493                                           d->last_adv_data_len, NULL, 0, 0);
6494
6495                 /* If the new report will trigger a SCAN_REQ store it for
6496                  * later merging.
6497                  */
6498                 if (!ext_adv && (type == LE_ADV_IND ||
6499                                  type == LE_ADV_SCAN_IND)) {
6500                         store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6501                                                  rssi, flags, data, len);
6502                         return;
6503                 }
6504
6505                 /* The advertising reports cannot be merged, so clear
6506                  * the pending report and send out a device found event.
6507                  */
6508                 clear_pending_adv_report(hdev);
6509                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6510                                   rssi, flags, data, len, NULL, 0, 0);
6511                 return;
6512         }
6513
6514         /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6515          * the new event is a SCAN_RSP. We can therefore proceed with
6516          * sending a merged device found event.
6517          */
6518         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6519                           d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6520                           d->last_adv_data, d->last_adv_data_len, data, len, 0);
6521         clear_pending_adv_report(hdev);
6522 #endif
6523 }
6524
6525 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6526                                   struct sk_buff *skb)
6527 {
6528         struct hci_ev_le_advertising_report *ev = data;
6529         u64 instant = jiffies;
6530
6531         if (!ev->num)
6532                 return;
6533
6534         hci_dev_lock(hdev);
6535
6536         while (ev->num--) {
6537                 struct hci_ev_le_advertising_info *info;
6538                 s8 rssi;
6539
6540                 info = hci_le_ev_skb_pull(hdev, skb,
6541                                           HCI_EV_LE_ADVERTISING_REPORT,
6542                                           sizeof(*info));
6543                 if (!info)
6544                         break;
6545
6546                 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6547                                         info->length + 1))
6548                         break;
6549
6550                 if (info->length <= HCI_MAX_AD_LENGTH) {
6551                         rssi = info->data[info->length];
6552                         process_adv_report(hdev, info->type, &info->bdaddr,
6553                                            info->bdaddr_type, NULL, 0, rssi,
6554                                            info->data, info->length, false,
6555                                            false, instant);
6556                 } else {
6557                         bt_dev_err(hdev, "Dropping invalid advertising data");
6558                 }
6559         }
6560
6561         hci_dev_unlock(hdev);
6562 }
6563
6564 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6565 {
6566         if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6567                 switch (evt_type) {
6568                 case LE_LEGACY_ADV_IND:
6569                         return LE_ADV_IND;
6570                 case LE_LEGACY_ADV_DIRECT_IND:
6571                         return LE_ADV_DIRECT_IND;
6572                 case LE_LEGACY_ADV_SCAN_IND:
6573                         return LE_ADV_SCAN_IND;
6574                 case LE_LEGACY_NONCONN_IND:
6575                         return LE_ADV_NONCONN_IND;
6576                 case LE_LEGACY_SCAN_RSP_ADV:
6577                 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6578                         return LE_ADV_SCAN_RSP;
6579                 }
6580
6581                 goto invalid;
6582         }
6583
6584         if (evt_type & LE_EXT_ADV_CONN_IND) {
6585                 if (evt_type & LE_EXT_ADV_DIRECT_IND)
6586                         return LE_ADV_DIRECT_IND;
6587
6588                 return LE_ADV_IND;
6589         }
6590
6591         if (evt_type & LE_EXT_ADV_SCAN_RSP)
6592                 return LE_ADV_SCAN_RSP;
6593
6594         if (evt_type & LE_EXT_ADV_SCAN_IND)
6595                 return LE_ADV_SCAN_IND;
6596
6597         if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6598             evt_type & LE_EXT_ADV_DIRECT_IND)
6599                 return LE_ADV_NONCONN_IND;
6600
6601 invalid:
6602         bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6603                                evt_type);
6604
6605         return LE_ADV_INVALID;
6606 }
6607
6608 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6609                                       struct sk_buff *skb)
6610 {
6611         struct hci_ev_le_ext_adv_report *ev = data;
6612         u64 instant = jiffies;
6613
6614         if (!ev->num)
6615                 return;
6616
6617         hci_dev_lock(hdev);
6618
6619         while (ev->num--) {
6620                 struct hci_ev_le_ext_adv_info *info;
6621                 u8 legacy_evt_type;
6622                 u16 evt_type;
6623
6624                 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6625                                           sizeof(*info));
6626                 if (!info)
6627                         break;
6628
6629                 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6630                                         info->length))
6631                         break;
6632
6633                 evt_type = __le16_to_cpu(info->type);
6634                 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6635                 if (legacy_evt_type != LE_ADV_INVALID) {
6636                         process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6637                                            info->bdaddr_type, NULL, 0,
6638                                            info->rssi, info->data, info->length,
6639                                            !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6640                                            false, instant);
6641                 }
6642         }
6643
6644         hci_dev_unlock(hdev);
6645 }
6646
6647 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6648 {
6649         struct hci_cp_le_pa_term_sync cp;
6650
6651         memset(&cp, 0, sizeof(cp));
6652         cp.handle = handle;
6653
6654         return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6655 }
6656
6657 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6658                                             struct sk_buff *skb)
6659 {
6660         struct hci_ev_le_pa_sync_established *ev = data;
6661         int mask = hdev->link_mode;
6662         __u8 flags = 0;
6663
6664         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6665
6666         if (ev->status)
6667                 return;
6668
6669         hci_dev_lock(hdev);
6670
6671         hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6672
6673         mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6674         if (!(mask & HCI_LM_ACCEPT))
6675                 hci_le_pa_term_sync(hdev, ev->handle);
6676
6677         hci_dev_unlock(hdev);
6678 }
6679
6680 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6681                                             struct sk_buff *skb)
6682 {
6683         struct hci_ev_le_remote_feat_complete *ev = data;
6684         struct hci_conn *conn;
6685
6686         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6687
6688         hci_dev_lock(hdev);
6689
6690         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6691         if (conn) {
6692                 if (!ev->status)
6693                         memcpy(conn->features[0], ev->features, 8);
6694
6695                 if (conn->state == BT_CONFIG) {
6696                         __u8 status;
6697
6698                         /* If the local controller supports peripheral-initiated
6699                          * features exchange, but the remote controller does
6700                          * not, then it is possible that the error code 0x1a
6701                          * for unsupported remote feature gets returned.
6702                          *
6703                          * In this specific case, allow the connection to
6704                          * transition into connected state and mark it as
6705                          * successful.
6706                          */
6707                         if (!conn->out && ev->status == 0x1a &&
6708                             (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6709                                 status = 0x00;
6710                         else
6711                                 status = ev->status;
6712
6713                         conn->state = BT_CONNECTED;
6714                         hci_connect_cfm(conn, status);
6715                         hci_conn_drop(conn);
6716                 }
6717         }
6718
6719         hci_dev_unlock(hdev);
6720 }
6721
6722 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6723                                    struct sk_buff *skb)
6724 {
6725         struct hci_ev_le_ltk_req *ev = data;
6726         struct hci_cp_le_ltk_reply cp;
6727         struct hci_cp_le_ltk_neg_reply neg;
6728         struct hci_conn *conn;
6729         struct smp_ltk *ltk;
6730
6731         bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6732
6733         hci_dev_lock(hdev);
6734
6735         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6736         if (conn == NULL)
6737                 goto not_found;
6738
6739         ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6740         if (!ltk)
6741                 goto not_found;
6742
6743         if (smp_ltk_is_sc(ltk)) {
6744                 /* With SC both EDiv and Rand are set to zero */
6745                 if (ev->ediv || ev->rand)
6746                         goto not_found;
6747         } else {
6748                 /* For non-SC keys check that EDiv and Rand match */
6749                 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6750                         goto not_found;
6751         }
6752
6753         memcpy(cp.ltk, ltk->val, ltk->enc_size);
6754         memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6755         cp.handle = cpu_to_le16(conn->handle);
6756
6757         conn->pending_sec_level = smp_ltk_sec_level(ltk);
6758
6759         conn->enc_key_size = ltk->enc_size;
6760
6761         hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6762
6763         /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6764          * temporary key used to encrypt a connection following
6765          * pairing. It is used during the Encrypted Session Setup to
6766          * distribute the keys. Later, security can be re-established
6767          * using a distributed LTK.
6768          */
6769         if (ltk->type == SMP_STK) {
6770                 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6771                 list_del_rcu(&ltk->list);
6772                 kfree_rcu(ltk, rcu);
6773         } else {
6774                 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6775         }
6776
6777         hci_dev_unlock(hdev);
6778
6779         return;
6780
6781 not_found:
6782         neg.handle = ev->handle;
6783         hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6784         hci_dev_unlock(hdev);
6785 }
6786
6787 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6788                                       u8 reason)
6789 {
6790         struct hci_cp_le_conn_param_req_neg_reply cp;
6791
6792         cp.handle = cpu_to_le16(handle);
6793         cp.reason = reason;
6794
6795         hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6796                      &cp);
6797 }
6798
6799 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6800                                              struct sk_buff *skb)
6801 {
6802         struct hci_ev_le_remote_conn_param_req *ev = data;
6803         struct hci_cp_le_conn_param_req_reply cp;
6804         struct hci_conn *hcon;
6805         u16 handle, min, max, latency, timeout;
6806
6807         bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6808
6809         handle = le16_to_cpu(ev->handle);
6810         min = le16_to_cpu(ev->interval_min);
6811         max = le16_to_cpu(ev->interval_max);
6812         latency = le16_to_cpu(ev->latency);
6813         timeout = le16_to_cpu(ev->timeout);
6814
6815         hcon = hci_conn_hash_lookup_handle(hdev, handle);
6816         if (!hcon || hcon->state != BT_CONNECTED)
6817                 return send_conn_param_neg_reply(hdev, handle,
6818                                                  HCI_ERROR_UNKNOWN_CONN_ID);
6819
6820         if (hci_check_conn_params(min, max, latency, timeout))
6821                 return send_conn_param_neg_reply(hdev, handle,
6822                                                  HCI_ERROR_INVALID_LL_PARAMS);
6823
6824         if (hcon->role == HCI_ROLE_MASTER) {
6825                 struct hci_conn_params *params;
6826                 u8 store_hint;
6827
6828                 hci_dev_lock(hdev);
6829
6830                 params = hci_conn_params_lookup(hdev, &hcon->dst,
6831                                                 hcon->dst_type);
6832                 if (params) {
6833                         params->conn_min_interval = min;
6834                         params->conn_max_interval = max;
6835                         params->conn_latency = latency;
6836                         params->supervision_timeout = timeout;
6837                         store_hint = 0x01;
6838                 } else {
6839                         store_hint = 0x00;
6840                 }
6841
6842                 hci_dev_unlock(hdev);
6843
6844                 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6845                                     store_hint, min, max, latency, timeout);
6846         }
6847
6848         cp.handle = ev->handle;
6849         cp.interval_min = ev->interval_min;
6850         cp.interval_max = ev->interval_max;
6851         cp.latency = ev->latency;
6852         cp.timeout = ev->timeout;
6853         cp.min_ce_len = 0;
6854         cp.max_ce_len = 0;
6855
6856         hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6857 }
6858
6859 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6860                                          struct sk_buff *skb)
6861 {
6862         struct hci_ev_le_direct_adv_report *ev = data;
6863         u64 instant = jiffies;
6864         int i;
6865
6866         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6867                                 flex_array_size(ev, info, ev->num)))
6868                 return;
6869
6870         if (!ev->num)
6871                 return;
6872
6873         hci_dev_lock(hdev);
6874
6875         for (i = 0; i < ev->num; i++) {
6876                 struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6877
6878                 process_adv_report(hdev, info->type, &info->bdaddr,
6879                                    info->bdaddr_type, &info->direct_addr,
6880                                    info->direct_addr_type, info->rssi, NULL, 0,
6881                                    false, false, instant);
6882         }
6883
6884         hci_dev_unlock(hdev);
6885 }
6886
6887 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6888                                   struct sk_buff *skb)
6889 {
6890         struct hci_ev_le_phy_update_complete *ev = data;
6891         struct hci_conn *conn;
6892
6893         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6894
6895         if (ev->status)
6896                 return;
6897
6898         hci_dev_lock(hdev);
6899
6900         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6901         if (!conn)
6902                 goto unlock;
6903
6904         conn->le_tx_phy = ev->tx_phy;
6905         conn->le_rx_phy = ev->rx_phy;
6906
6907 unlock:
6908         hci_dev_unlock(hdev);
6909 }
6910
6911 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
6912                                         struct sk_buff *skb)
6913 {
6914         struct hci_evt_le_cis_established *ev = data;
6915         struct hci_conn *conn;
6916         u16 handle = __le16_to_cpu(ev->handle);
6917
6918         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6919
6920         hci_dev_lock(hdev);
6921
6922         conn = hci_conn_hash_lookup_handle(hdev, handle);
6923         if (!conn) {
6924                 bt_dev_err(hdev,
6925                            "Unable to find connection with handle 0x%4.4x",
6926                            handle);
6927                 goto unlock;
6928         }
6929
6930         if (conn->type != ISO_LINK) {
6931                 bt_dev_err(hdev,
6932                            "Invalid connection link type handle 0x%4.4x",
6933                            handle);
6934                 goto unlock;
6935         }
6936
6937         if (conn->role == HCI_ROLE_SLAVE) {
6938                 __le32 interval;
6939
6940                 memset(&interval, 0, sizeof(interval));
6941
6942                 memcpy(&interval, ev->c_latency, sizeof(ev->c_latency));
6943                 conn->iso_qos.in.interval = le32_to_cpu(interval);
6944                 memcpy(&interval, ev->p_latency, sizeof(ev->p_latency));
6945                 conn->iso_qos.out.interval = le32_to_cpu(interval);
6946                 conn->iso_qos.in.latency = le16_to_cpu(ev->interval);
6947                 conn->iso_qos.out.latency = le16_to_cpu(ev->interval);
6948                 conn->iso_qos.in.sdu = le16_to_cpu(ev->c_mtu);
6949                 conn->iso_qos.out.sdu = le16_to_cpu(ev->p_mtu);
6950                 conn->iso_qos.in.phy = ev->c_phy;
6951                 conn->iso_qos.out.phy = ev->p_phy;
6952         }
6953
6954         if (!ev->status) {
6955                 conn->state = BT_CONNECTED;
6956                 hci_debugfs_create_conn(conn);
6957                 hci_conn_add_sysfs(conn);
6958                 hci_iso_setup_path(conn);
6959                 goto unlock;
6960         }
6961
6962         hci_connect_cfm(conn, ev->status);
6963         hci_conn_del(conn);
6964
6965 unlock:
6966         hci_dev_unlock(hdev);
6967 }
6968
6969 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
6970 {
6971         struct hci_cp_le_reject_cis cp;
6972
6973         memset(&cp, 0, sizeof(cp));
6974         cp.handle = handle;
6975         cp.reason = HCI_ERROR_REJ_BAD_ADDR;
6976         hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
6977 }
6978
6979 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
6980 {
6981         struct hci_cp_le_accept_cis cp;
6982
6983         memset(&cp, 0, sizeof(cp));
6984         cp.handle = handle;
6985         hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
6986 }
6987
6988 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
6989                                struct sk_buff *skb)
6990 {
6991         struct hci_evt_le_cis_req *ev = data;
6992         u16 acl_handle, cis_handle;
6993         struct hci_conn *acl, *cis;
6994         int mask;
6995         __u8 flags = 0;
6996
6997         acl_handle = __le16_to_cpu(ev->acl_handle);
6998         cis_handle = __le16_to_cpu(ev->cis_handle);
6999
7000         bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
7001                    acl_handle, cis_handle, ev->cig_id, ev->cis_id);
7002
7003         hci_dev_lock(hdev);
7004
7005         acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
7006         if (!acl)
7007                 goto unlock;
7008
7009         mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
7010         if (!(mask & HCI_LM_ACCEPT)) {
7011                 hci_le_reject_cis(hdev, ev->cis_handle);
7012                 goto unlock;
7013         }
7014
7015         cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
7016         if (!cis) {
7017                 cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE);
7018                 if (!cis) {
7019                         hci_le_reject_cis(hdev, ev->cis_handle);
7020                         goto unlock;
7021                 }
7022                 cis->handle = cis_handle;
7023         }
7024
7025         cis->iso_qos.cig = ev->cig_id;
7026         cis->iso_qos.cis = ev->cis_id;
7027
7028         if (!(flags & HCI_PROTO_DEFER)) {
7029                 hci_le_accept_cis(hdev, ev->cis_handle);
7030         } else {
7031                 cis->state = BT_CONNECT2;
7032                 hci_connect_cfm(cis, 0);
7033         }
7034
7035 unlock:
7036         hci_dev_unlock(hdev);
7037 }
7038
7039 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
7040                                            struct sk_buff *skb)
7041 {
7042         struct hci_evt_le_create_big_complete *ev = data;
7043         struct hci_conn *conn;
7044
7045         BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
7046
7047         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
7048                                 flex_array_size(ev, bis_handle, ev->num_bis)))
7049                 return;
7050
7051         hci_dev_lock(hdev);
7052
7053         conn = hci_conn_hash_lookup_big(hdev, ev->handle);
7054         if (!conn)
7055                 goto unlock;
7056
7057         if (conn->type != ISO_LINK) {
7058                 bt_dev_err(hdev,
7059                            "Invalid connection link type handle 0x%2.2x",
7060                            ev->handle);
7061                 goto unlock;
7062         }
7063
7064         if (ev->num_bis)
7065                 conn->handle = __le16_to_cpu(ev->bis_handle[0]);
7066
7067         if (!ev->status) {
7068                 conn->state = BT_CONNECTED;
7069                 hci_debugfs_create_conn(conn);
7070                 hci_conn_add_sysfs(conn);
7071                 hci_iso_setup_path(conn);
7072                 goto unlock;
7073         }
7074
7075         hci_connect_cfm(conn, ev->status);
7076         hci_conn_del(conn);
7077
7078 unlock:
7079         hci_dev_unlock(hdev);
7080 }
7081
7082 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
7083                                             struct sk_buff *skb)
7084 {
7085         struct hci_evt_le_big_sync_estabilished *ev = data;
7086         struct hci_conn *bis;
7087         int i;
7088
7089         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7090
7091         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7092                                 flex_array_size(ev, bis, ev->num_bis)))
7093                 return;
7094
7095         if (ev->status)
7096                 return;
7097
7098         hci_dev_lock(hdev);
7099
7100         for (i = 0; i < ev->num_bis; i++) {
7101                 u16 handle = le16_to_cpu(ev->bis[i]);
7102                 __le32 interval;
7103
7104                 bis = hci_conn_hash_lookup_handle(hdev, handle);
7105                 if (!bis) {
7106                         bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
7107                                            HCI_ROLE_SLAVE);
7108                         if (!bis)
7109                                 continue;
7110                         bis->handle = handle;
7111                 }
7112
7113                 bis->iso_qos.big = ev->handle;
7114                 memset(&interval, 0, sizeof(interval));
7115                 memcpy(&interval, ev->latency, sizeof(ev->latency));
7116                 bis->iso_qos.in.interval = le32_to_cpu(interval);
7117                 /* Convert ISO Interval (1.25 ms slots) to latency (ms) */
7118                 bis->iso_qos.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
7119                 bis->iso_qos.in.sdu = le16_to_cpu(ev->max_pdu);
7120
7121                 hci_iso_setup_path(bis);
7122         }
7123
7124         hci_dev_unlock(hdev);
7125 }
7126
7127 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7128                                            struct sk_buff *skb)
7129 {
7130         struct hci_evt_le_big_info_adv_report *ev = data;
7131         int mask = hdev->link_mode;
7132         __u8 flags = 0;
7133
7134         bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7135
7136         hci_dev_lock(hdev);
7137
7138         mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
7139         if (!(mask & HCI_LM_ACCEPT))
7140                 hci_le_pa_term_sync(hdev, ev->sync_handle);
7141
7142         hci_dev_unlock(hdev);
7143 }
7144
7145 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7146 [_op] = { \
7147         .func = _func, \
7148         .min_len = _min_len, \
7149         .max_len = _max_len, \
7150 }
7151
7152 #define HCI_LE_EV(_op, _func, _len) \
7153         HCI_LE_EV_VL(_op, _func, _len, _len)
7154
7155 #define HCI_LE_EV_STATUS(_op, _func) \
7156         HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7157
7158 /* Entries in this table shall have their position according to the subevent
7159  * opcode they handle so the use of the macros above is recommend since it does
7160  * attempt to initialize at its proper index using Designated Initializers that
7161  * way events without a callback function can be ommited.
7162  */
7163 static const struct hci_le_ev {
7164         void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7165         u16  min_len;
7166         u16  max_len;
7167 } hci_le_ev_table[U8_MAX + 1] = {
7168         /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7169         HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7170                   sizeof(struct hci_ev_le_conn_complete)),
7171         /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7172         HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7173                      sizeof(struct hci_ev_le_advertising_report),
7174                      HCI_MAX_EVENT_SIZE),
7175         /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7176         HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7177                   hci_le_conn_update_complete_evt,
7178                   sizeof(struct hci_ev_le_conn_update_complete)),
7179         /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7180         HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7181                   hci_le_remote_feat_complete_evt,
7182                   sizeof(struct hci_ev_le_remote_feat_complete)),
7183         /* [0x05 = HCI_EV_LE_LTK_REQ] */
7184         HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7185                   sizeof(struct hci_ev_le_ltk_req)),
7186         /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7187         HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7188                   hci_le_remote_conn_param_req_evt,
7189                   sizeof(struct hci_ev_le_remote_conn_param_req)),
7190         /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7191         HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7192                   hci_le_enh_conn_complete_evt,
7193                   sizeof(struct hci_ev_le_enh_conn_complete)),
7194         /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7195         HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7196                      sizeof(struct hci_ev_le_direct_adv_report),
7197                      HCI_MAX_EVENT_SIZE),
7198         /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7199         HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7200                   sizeof(struct hci_ev_le_phy_update_complete)),
7201         /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7202         HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7203                      sizeof(struct hci_ev_le_ext_adv_report),
7204                      HCI_MAX_EVENT_SIZE),
7205         /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7206         HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7207                   hci_le_pa_sync_estabilished_evt,
7208                   sizeof(struct hci_ev_le_pa_sync_established)),
7209         /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7210         HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7211                   sizeof(struct hci_evt_le_ext_adv_set_term)),
7212         /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7213         HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7214                   sizeof(struct hci_evt_le_cis_established)),
7215         /* [0x1a = HCI_EVT_LE_CIS_REQ] */
7216         HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7217                   sizeof(struct hci_evt_le_cis_req)),
7218         /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7219         HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7220                      hci_le_create_big_complete_evt,
7221                      sizeof(struct hci_evt_le_create_big_complete),
7222                      HCI_MAX_EVENT_SIZE),
7223         /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7224         HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7225                      hci_le_big_sync_established_evt,
7226                      sizeof(struct hci_evt_le_big_sync_estabilished),
7227                      HCI_MAX_EVENT_SIZE),
7228         /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7229         HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7230                      hci_le_big_info_adv_report_evt,
7231                      sizeof(struct hci_evt_le_big_info_adv_report),
7232                      HCI_MAX_EVENT_SIZE),
7233 };
7234
7235 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7236                             struct sk_buff *skb, u16 *opcode, u8 *status,
7237                             hci_req_complete_t *req_complete,
7238                             hci_req_complete_skb_t *req_complete_skb)
7239 {
7240         struct hci_ev_le_meta *ev = data;
7241         const struct hci_le_ev *subev;
7242
7243         bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7244
7245         /* Only match event if command OGF is for LE */
7246         if (hdev->sent_cmd &&
7247             hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
7248             hci_skb_event(hdev->sent_cmd) == ev->subevent) {
7249                 *opcode = hci_skb_opcode(hdev->sent_cmd);
7250                 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7251                                      req_complete_skb);
7252         }
7253
7254         subev = &hci_le_ev_table[ev->subevent];
7255         if (!subev->func)
7256                 return;
7257
7258         if (skb->len < subev->min_len) {
7259                 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7260                            ev->subevent, skb->len, subev->min_len);
7261                 return;
7262         }
7263
7264         /* Just warn if the length is over max_len size it still be
7265          * possible to partially parse the event so leave to callback to
7266          * decide if that is acceptable.
7267          */
7268         if (skb->len > subev->max_len)
7269                 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7270                             ev->subevent, skb->len, subev->max_len);
7271         data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7272         if (!data)
7273                 return;
7274
7275         subev->func(hdev, data, skb);
7276 }
7277
7278 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7279                                  u8 event, struct sk_buff *skb)
7280 {
7281         struct hci_ev_cmd_complete *ev;
7282         struct hci_event_hdr *hdr;
7283
7284         if (!skb)
7285                 return false;
7286
7287         hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7288         if (!hdr)
7289                 return false;
7290
7291         if (event) {
7292                 if (hdr->evt != event)
7293                         return false;
7294                 return true;
7295         }
7296
7297         /* Check if request ended in Command Status - no way to retrieve
7298          * any extra parameters in this case.
7299          */
7300         if (hdr->evt == HCI_EV_CMD_STATUS)
7301                 return false;
7302
7303         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7304                 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7305                            hdr->evt);
7306                 return false;
7307         }
7308
7309         ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7310         if (!ev)
7311                 return false;
7312
7313         if (opcode != __le16_to_cpu(ev->opcode)) {
7314                 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7315                        __le16_to_cpu(ev->opcode));
7316                 return false;
7317         }
7318
7319         return true;
7320 }
7321
7322 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7323                                   struct sk_buff *skb)
7324 {
7325         struct hci_ev_le_advertising_info *adv;
7326         struct hci_ev_le_direct_adv_info *direct_adv;
7327         struct hci_ev_le_ext_adv_info *ext_adv;
7328         const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7329         const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7330
7331         hci_dev_lock(hdev);
7332
7333         /* If we are currently suspended and this is the first BT event seen,
7334          * save the wake reason associated with the event.
7335          */
7336         if (!hdev->suspended || hdev->wake_reason)
7337                 goto unlock;
7338
7339         /* Default to remote wake. Values for wake_reason are documented in the
7340          * Bluez mgmt api docs.
7341          */
7342         hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7343
7344         /* Once configured for remote wakeup, we should only wake up for
7345          * reconnections. It's useful to see which device is waking us up so
7346          * keep track of the bdaddr of the connection event that woke us up.
7347          */
7348         if (event == HCI_EV_CONN_REQUEST) {
7349                 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7350                 hdev->wake_addr_type = BDADDR_BREDR;
7351         } else if (event == HCI_EV_CONN_COMPLETE) {
7352                 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7353                 hdev->wake_addr_type = BDADDR_BREDR;
7354         } else if (event == HCI_EV_LE_META) {
7355                 struct hci_ev_le_meta *le_ev = (void *)skb->data;
7356                 u8 subevent = le_ev->subevent;
7357                 u8 *ptr = &skb->data[sizeof(*le_ev)];
7358                 u8 num_reports = *ptr;
7359
7360                 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7361                      subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7362                      subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7363                     num_reports) {
7364                         adv = (void *)(ptr + 1);
7365                         direct_adv = (void *)(ptr + 1);
7366                         ext_adv = (void *)(ptr + 1);
7367
7368                         switch (subevent) {
7369                         case HCI_EV_LE_ADVERTISING_REPORT:
7370                                 bacpy(&hdev->wake_addr, &adv->bdaddr);
7371                                 hdev->wake_addr_type = adv->bdaddr_type;
7372                                 break;
7373                         case HCI_EV_LE_DIRECT_ADV_REPORT:
7374                                 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7375                                 hdev->wake_addr_type = direct_adv->bdaddr_type;
7376                                 break;
7377                         case HCI_EV_LE_EXT_ADV_REPORT:
7378                                 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7379                                 hdev->wake_addr_type = ext_adv->bdaddr_type;
7380                                 break;
7381                         }
7382                 }
7383         } else {
7384                 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7385         }
7386
7387 unlock:
7388         hci_dev_unlock(hdev);
7389 }
7390
7391 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7392 [_op] = { \
7393         .req = false, \
7394         .func = _func, \
7395         .min_len = _min_len, \
7396         .max_len = _max_len, \
7397 }
7398
7399 #define HCI_EV(_op, _func, _len) \
7400         HCI_EV_VL(_op, _func, _len, _len)
7401
7402 #define HCI_EV_STATUS(_op, _func) \
7403         HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7404
7405 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7406 [_op] = { \
7407         .req = true, \
7408         .func_req = _func, \
7409         .min_len = _min_len, \
7410         .max_len = _max_len, \
7411 }
7412
7413 #define HCI_EV_REQ(_op, _func, _len) \
7414         HCI_EV_REQ_VL(_op, _func, _len, _len)
7415
7416 /* Entries in this table shall have their position according to the event opcode
7417  * they handle so the use of the macros above is recommend since it does attempt
7418  * to initialize at its proper index using Designated Initializers that way
7419  * events without a callback function don't have entered.
7420  */
7421 static const struct hci_ev {
7422         bool req;
7423         union {
7424                 void (*func)(struct hci_dev *hdev, void *data,
7425                              struct sk_buff *skb);
7426                 void (*func_req)(struct hci_dev *hdev, void *data,
7427                                  struct sk_buff *skb, u16 *opcode, u8 *status,
7428                                  hci_req_complete_t *req_complete,
7429                                  hci_req_complete_skb_t *req_complete_skb);
7430         };
7431         u16  min_len;
7432         u16  max_len;
7433 } hci_ev_table[U8_MAX + 1] = {
7434         /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7435         HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7436         /* [0x02 = HCI_EV_INQUIRY_RESULT] */
7437         HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7438                   sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7439         /* [0x03 = HCI_EV_CONN_COMPLETE] */
7440         HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7441                sizeof(struct hci_ev_conn_complete)),
7442         /* [0x04 = HCI_EV_CONN_REQUEST] */
7443         HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7444                sizeof(struct hci_ev_conn_request)),
7445         /* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7446         HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7447                sizeof(struct hci_ev_disconn_complete)),
7448         /* [0x06 = HCI_EV_AUTH_COMPLETE] */
7449         HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7450                sizeof(struct hci_ev_auth_complete)),
7451         /* [0x07 = HCI_EV_REMOTE_NAME] */
7452         HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7453                sizeof(struct hci_ev_remote_name)),
7454         /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7455         HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7456                sizeof(struct hci_ev_encrypt_change)),
7457         /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7458         HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7459                hci_change_link_key_complete_evt,
7460                sizeof(struct hci_ev_change_link_key_complete)),
7461         /* [0x0b = HCI_EV_REMOTE_FEATURES] */
7462         HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7463                sizeof(struct hci_ev_remote_features)),
7464         /* [0x0e = HCI_EV_CMD_COMPLETE] */
7465         HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7466                       sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7467         /* [0x0f = HCI_EV_CMD_STATUS] */
7468         HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7469                    sizeof(struct hci_ev_cmd_status)),
7470         /* [0x10 = HCI_EV_CMD_STATUS] */
7471         HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7472                sizeof(struct hci_ev_hardware_error)),
7473         /* [0x12 = HCI_EV_ROLE_CHANGE] */
7474         HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7475                sizeof(struct hci_ev_role_change)),
7476         /* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7477         HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7478                   sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7479         /* [0x14 = HCI_EV_MODE_CHANGE] */
7480         HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7481                sizeof(struct hci_ev_mode_change)),
7482         /* [0x16 = HCI_EV_PIN_CODE_REQ] */
7483         HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7484                sizeof(struct hci_ev_pin_code_req)),
7485         /* [0x17 = HCI_EV_LINK_KEY_REQ] */
7486         HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7487                sizeof(struct hci_ev_link_key_req)),
7488         /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7489         HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7490                sizeof(struct hci_ev_link_key_notify)),
7491         /* [0x1c = HCI_EV_CLOCK_OFFSET] */
7492         HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7493                sizeof(struct hci_ev_clock_offset)),
7494         /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7495         HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7496                sizeof(struct hci_ev_pkt_type_change)),
7497         /* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7498         HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7499                sizeof(struct hci_ev_pscan_rep_mode)),
7500         /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7501         HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7502                   hci_inquiry_result_with_rssi_evt,
7503                   sizeof(struct hci_ev_inquiry_result_rssi),
7504                   HCI_MAX_EVENT_SIZE),
7505         /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7506         HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7507                sizeof(struct hci_ev_remote_ext_features)),
7508         /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7509         HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7510                sizeof(struct hci_ev_sync_conn_complete)),
7511         /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7512         HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7513                   hci_extended_inquiry_result_evt,
7514                   sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7515         /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7516         HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7517                sizeof(struct hci_ev_key_refresh_complete)),
7518         /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7519         HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7520                sizeof(struct hci_ev_io_capa_request)),
7521         /* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7522         HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7523                sizeof(struct hci_ev_io_capa_reply)),
7524         /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7525         HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7526                sizeof(struct hci_ev_user_confirm_req)),
7527         /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7528         HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7529                sizeof(struct hci_ev_user_passkey_req)),
7530         /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7531         HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7532                sizeof(struct hci_ev_remote_oob_data_request)),
7533         /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7534         HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7535                sizeof(struct hci_ev_simple_pair_complete)),
7536         /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7537         HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7538                sizeof(struct hci_ev_user_passkey_notify)),
7539         /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7540         HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7541                sizeof(struct hci_ev_keypress_notify)),
7542         /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7543         HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7544                sizeof(struct hci_ev_remote_host_features)),
7545         /* [0x3e = HCI_EV_LE_META] */
7546         HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7547                       sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7548 #if IS_ENABLED(CONFIG_BT_HS)
7549         /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
7550         HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
7551                sizeof(struct hci_ev_phy_link_complete)),
7552         /* [0x41 = HCI_EV_CHANNEL_SELECTED] */
7553         HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
7554                sizeof(struct hci_ev_channel_selected)),
7555         /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
7556         HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
7557                hci_disconn_loglink_complete_evt,
7558                sizeof(struct hci_ev_disconn_logical_link_complete)),
7559         /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
7560         HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
7561                sizeof(struct hci_ev_logical_link_complete)),
7562         /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
7563         HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
7564                hci_disconn_phylink_complete_evt,
7565                sizeof(struct hci_ev_disconn_phy_link_complete)),
7566 #endif
7567         /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
7568         HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
7569                sizeof(struct hci_ev_num_comp_blocks)),
7570 #ifdef TIZEN_BT
7571         /* [0xFF = HCI_EV_VENDOR_SPECIFIC] */
7572         HCI_EV(HCI_EV_VENDOR_SPECIFIC, hci_vendor_specific_evt,
7573                sizeof(struct hci_ev_vendor_specific)),
7574 #else
7575         /* [0xff = HCI_EV_VENDOR] */
7576         HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7577 #endif
7578 };
7579
7580 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7581                            u16 *opcode, u8 *status,
7582                            hci_req_complete_t *req_complete,
7583                            hci_req_complete_skb_t *req_complete_skb)
7584 {
7585         const struct hci_ev *ev = &hci_ev_table[event];
7586         void *data;
7587
7588         if (!ev->func)
7589                 return;
7590
7591         if (skb->len < ev->min_len) {
7592                 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7593                            event, skb->len, ev->min_len);
7594                 return;
7595         }
7596
7597         /* Just warn if the length is over max_len size it still be
7598          * possible to partially parse the event so leave to callback to
7599          * decide if that is acceptable.
7600          */
7601         if (skb->len > ev->max_len)
7602                 bt_dev_warn_ratelimited(hdev,
7603                                         "unexpected event 0x%2.2x length: %u > %u",
7604                                         event, skb->len, ev->max_len);
7605
7606         data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7607         if (!data)
7608                 return;
7609
7610         if (ev->req)
7611                 ev->func_req(hdev, data, skb, opcode, status, req_complete,
7612                              req_complete_skb);
7613         else
7614                 ev->func(hdev, data, skb);
7615 }
7616
7617 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7618 {
7619         struct hci_event_hdr *hdr = (void *) skb->data;
7620         hci_req_complete_t req_complete = NULL;
7621         hci_req_complete_skb_t req_complete_skb = NULL;
7622         struct sk_buff *orig_skb = NULL;
7623         u8 status = 0, event, req_evt = 0;
7624         u16 opcode = HCI_OP_NOP;
7625
7626         if (skb->len < sizeof(*hdr)) {
7627                 bt_dev_err(hdev, "Malformed HCI Event");
7628                 goto done;
7629         }
7630
7631         kfree_skb(hdev->recv_event);
7632         hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7633
7634         event = hdr->evt;
7635         if (!event) {
7636                 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7637                             event);
7638                 goto done;
7639         }
7640
7641         /* Only match event if command OGF is not for LE */
7642         if (hdev->sent_cmd &&
7643             hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
7644             hci_skb_event(hdev->sent_cmd) == event) {
7645                 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
7646                                      status, &req_complete, &req_complete_skb);
7647                 req_evt = event;
7648         }
7649
7650         /* If it looks like we might end up having to call
7651          * req_complete_skb, store a pristine copy of the skb since the
7652          * various handlers may modify the original one through
7653          * skb_pull() calls, etc.
7654          */
7655         if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7656             event == HCI_EV_CMD_COMPLETE)
7657                 orig_skb = skb_clone(skb, GFP_KERNEL);
7658
7659         skb_pull(skb, HCI_EVENT_HDR_SIZE);
7660
7661         /* Store wake reason if we're suspended */
7662         hci_store_wake_reason(hdev, event, skb);
7663
7664         bt_dev_dbg(hdev, "event 0x%2.2x", event);
7665
7666         hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7667                        &req_complete_skb);
7668
7669         if (req_complete) {
7670                 req_complete(hdev, status, opcode);
7671         } else if (req_complete_skb) {
7672                 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7673                         kfree_skb(orig_skb);
7674                         orig_skb = NULL;
7675                 }
7676                 req_complete_skb(hdev, status, opcode, orig_skb);
7677         }
7678
7679 done:
7680         kfree_skb(orig_skb);
7681         kfree_skb(skb);
7682         hdev->stat.evt_rx++;
7683 }