6e3edb285b738d9bf503b54de945df3ba07993a0
[platform/kernel/linux-starfive.git] / net / bluetooth / hci_event.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
38 #include "msft.h"
39 #include "eir.h"
40
41 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
42                  "\x00\x00\x00\x00\x00\x00\x00\x00"
43
44 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
45
46 /* Handle HCI Event packets */
47
48 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
49                              u8 ev, size_t len)
50 {
51         void *data;
52
53         data = skb_pull_data(skb, len);
54         if (!data)
55                 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
56
57         return data;
58 }
59
60 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
61                              u16 op, size_t len)
62 {
63         void *data;
64
65         data = skb_pull_data(skb, len);
66         if (!data)
67                 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
68
69         return data;
70 }
71
72 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
73                                 u8 ev, size_t len)
74 {
75         void *data;
76
77         data = skb_pull_data(skb, len);
78         if (!data)
79                 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
80
81         return data;
82 }
83
84 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
85                                 struct sk_buff *skb)
86 {
87         struct hci_ev_status *rp = data;
88
89         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
90
91         /* It is possible that we receive Inquiry Complete event right
92          * before we receive Inquiry Cancel Command Complete event, in
93          * which case the latter event should have status of Command
94          * Disallowed (0x0c). This should not be treated as error, since
95          * we actually achieve what Inquiry Cancel wants to achieve,
96          * which is to end the last Inquiry session.
97          */
98         if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
99                 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
100                 rp->status = 0x00;
101         }
102
103         if (rp->status)
104                 return rp->status;
105
106         clear_bit(HCI_INQUIRY, &hdev->flags);
107         smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
108         wake_up_bit(&hdev->flags, HCI_INQUIRY);
109
110         hci_dev_lock(hdev);
111         /* Set discovery state to stopped if we're not doing LE active
112          * scanning.
113          */
114         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
115             hdev->le_scan_type != LE_SCAN_ACTIVE)
116                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
117         hci_dev_unlock(hdev);
118
119         hci_conn_check_pending(hdev);
120
121         return rp->status;
122 }
123
124 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
125                               struct sk_buff *skb)
126 {
127         struct hci_ev_status *rp = data;
128
129         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
130
131         if (rp->status)
132                 return rp->status;
133
134         hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
135
136         return rp->status;
137 }
138
139 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
140                                    struct sk_buff *skb)
141 {
142         struct hci_ev_status *rp = data;
143
144         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
145
146         if (rp->status)
147                 return rp->status;
148
149         hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
150
151         hci_conn_check_pending(hdev);
152
153         return rp->status;
154 }
155
156 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
157                                         struct sk_buff *skb)
158 {
159         struct hci_ev_status *rp = data;
160
161         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
162
163         return rp->status;
164 }
165
166 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
167                                 struct sk_buff *skb)
168 {
169         struct hci_rp_role_discovery *rp = data;
170         struct hci_conn *conn;
171
172         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
173
174         if (rp->status)
175                 return rp->status;
176
177         hci_dev_lock(hdev);
178
179         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
180         if (conn)
181                 conn->role = rp->role;
182
183         hci_dev_unlock(hdev);
184
185         return rp->status;
186 }
187
188 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
189                                   struct sk_buff *skb)
190 {
191         struct hci_rp_read_link_policy *rp = data;
192         struct hci_conn *conn;
193
194         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
195
196         if (rp->status)
197                 return rp->status;
198
199         hci_dev_lock(hdev);
200
201         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
202         if (conn)
203                 conn->link_policy = __le16_to_cpu(rp->policy);
204
205         hci_dev_unlock(hdev);
206
207         return rp->status;
208 }
209
210 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
211                                    struct sk_buff *skb)
212 {
213         struct hci_rp_write_link_policy *rp = data;
214         struct hci_conn *conn;
215         void *sent;
216 #ifdef TIZEN_BT
217         struct hci_cp_write_link_policy cp;
218         struct hci_conn *sco_conn;
219 #endif
220
221         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
222
223         if (rp->status)
224                 return rp->status;
225
226         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
227         if (!sent)
228                 return rp->status;
229
230         hci_dev_lock(hdev);
231
232         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
233         if (conn)
234                 conn->link_policy = get_unaligned_le16(sent + 2);
235
236 #ifdef TIZEN_BT
237         sco_conn = hci_conn_hash_lookup_sco(hdev);
238         if (sco_conn && bacmp(&sco_conn->dst, &conn->dst) == 0 &&
239             conn->link_policy & HCI_LP_SNIFF) {
240                 BT_ERR("SNIFF is not allowed during sco connection");
241                 cp.handle = __cpu_to_le16(conn->handle);
242                 cp.policy = __cpu_to_le16(conn->link_policy & ~HCI_LP_SNIFF);
243                 hci_send_cmd(hdev, HCI_OP_WRITE_LINK_POLICY, sizeof(cp), &cp);
244         }
245 #endif
246
247         hci_dev_unlock(hdev);
248
249         return rp->status;
250 }
251
252 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
253                                       struct sk_buff *skb)
254 {
255         struct hci_rp_read_def_link_policy *rp = data;
256
257         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
258
259         if (rp->status)
260                 return rp->status;
261
262         hdev->link_policy = __le16_to_cpu(rp->policy);
263
264         return rp->status;
265 }
266
267 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
268                                        struct sk_buff *skb)
269 {
270         struct hci_ev_status *rp = data;
271         void *sent;
272
273         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
274
275         if (rp->status)
276                 return rp->status;
277
278         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
279         if (!sent)
280                 return rp->status;
281
282         hdev->link_policy = get_unaligned_le16(sent);
283
284         return rp->status;
285 }
286
287 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
288 {
289         struct hci_ev_status *rp = data;
290
291         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
292
293         clear_bit(HCI_RESET, &hdev->flags);
294
295         if (rp->status)
296                 return rp->status;
297
298         /* Reset all non-persistent flags */
299         hci_dev_clear_volatile_flags(hdev);
300
301         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
302
303         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
304         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
305
306         memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
307         hdev->adv_data_len = 0;
308
309         memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
310         hdev->scan_rsp_data_len = 0;
311
312         hdev->le_scan_type = LE_SCAN_PASSIVE;
313
314         hdev->ssp_debug_mode = 0;
315
316         hci_bdaddr_list_clear(&hdev->le_accept_list);
317         hci_bdaddr_list_clear(&hdev->le_resolv_list);
318
319         return rp->status;
320 }
321
322 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
323                                       struct sk_buff *skb)
324 {
325         struct hci_rp_read_stored_link_key *rp = data;
326         struct hci_cp_read_stored_link_key *sent;
327
328         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
329
330         sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
331         if (!sent)
332                 return rp->status;
333
334         if (!rp->status && sent->read_all == 0x01) {
335                 hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
336                 hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
337         }
338
339         return rp->status;
340 }
341
342 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
343                                         struct sk_buff *skb)
344 {
345         struct hci_rp_delete_stored_link_key *rp = data;
346         u16 num_keys;
347
348         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
349
350         if (rp->status)
351                 return rp->status;
352
353         num_keys = le16_to_cpu(rp->num_keys);
354
355         if (num_keys <= hdev->stored_num_keys)
356                 hdev->stored_num_keys -= num_keys;
357         else
358                 hdev->stored_num_keys = 0;
359
360         return rp->status;
361 }
362
363 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
364                                   struct sk_buff *skb)
365 {
366         struct hci_ev_status *rp = data;
367         void *sent;
368
369         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
370
371         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
372         if (!sent)
373                 return rp->status;
374
375         hci_dev_lock(hdev);
376
377         if (hci_dev_test_flag(hdev, HCI_MGMT))
378                 mgmt_set_local_name_complete(hdev, sent, rp->status);
379         else if (!rp->status)
380                 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
381
382         hci_dev_unlock(hdev);
383
384         return rp->status;
385 }
386
387 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
388                                  struct sk_buff *skb)
389 {
390         struct hci_rp_read_local_name *rp = data;
391
392         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
393
394         if (rp->status)
395                 return rp->status;
396
397         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
398             hci_dev_test_flag(hdev, HCI_CONFIG))
399                 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
400
401         return rp->status;
402 }
403
404 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
405                                    struct sk_buff *skb)
406 {
407         struct hci_ev_status *rp = data;
408         void *sent;
409
410         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
411
412         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
413         if (!sent)
414                 return rp->status;
415
416         hci_dev_lock(hdev);
417
418         if (!rp->status) {
419                 __u8 param = *((__u8 *) sent);
420
421                 if (param == AUTH_ENABLED)
422                         set_bit(HCI_AUTH, &hdev->flags);
423                 else
424                         clear_bit(HCI_AUTH, &hdev->flags);
425         }
426
427         if (hci_dev_test_flag(hdev, HCI_MGMT))
428                 mgmt_auth_enable_complete(hdev, rp->status);
429
430         hci_dev_unlock(hdev);
431
432         return rp->status;
433 }
434
435 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
436                                     struct sk_buff *skb)
437 {
438         struct hci_ev_status *rp = data;
439         __u8 param;
440         void *sent;
441
442         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
443
444         if (rp->status)
445                 return rp->status;
446
447         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
448         if (!sent)
449                 return rp->status;
450
451         param = *((__u8 *) sent);
452
453         if (param)
454                 set_bit(HCI_ENCRYPT, &hdev->flags);
455         else
456                 clear_bit(HCI_ENCRYPT, &hdev->flags);
457
458         return rp->status;
459 }
460
461 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
462                                    struct sk_buff *skb)
463 {
464         struct hci_ev_status *rp = data;
465         __u8 param;
466         void *sent;
467
468         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
469
470         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
471         if (!sent)
472                 return rp->status;
473
474         param = *((__u8 *) sent);
475
476         hci_dev_lock(hdev);
477
478         if (rp->status) {
479                 hdev->discov_timeout = 0;
480                 goto done;
481         }
482
483         if (param & SCAN_INQUIRY)
484                 set_bit(HCI_ISCAN, &hdev->flags);
485         else
486                 clear_bit(HCI_ISCAN, &hdev->flags);
487
488         if (param & SCAN_PAGE)
489                 set_bit(HCI_PSCAN, &hdev->flags);
490         else
491                 clear_bit(HCI_PSCAN, &hdev->flags);
492
493 done:
494         hci_dev_unlock(hdev);
495
496         return rp->status;
497 }
498
499 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
500                                   struct sk_buff *skb)
501 {
502         struct hci_ev_status *rp = data;
503         struct hci_cp_set_event_filter *cp;
504         void *sent;
505
506         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
507
508         if (rp->status)
509                 return rp->status;
510
511         sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
512         if (!sent)
513                 return rp->status;
514
515         cp = (struct hci_cp_set_event_filter *)sent;
516
517         if (cp->flt_type == HCI_FLT_CLEAR_ALL)
518                 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
519         else
520                 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
521
522         return rp->status;
523 }
524
525 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
526                                    struct sk_buff *skb)
527 {
528         struct hci_rp_read_class_of_dev *rp = data;
529
530         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
531
532         if (rp->status)
533                 return rp->status;
534
535         memcpy(hdev->dev_class, rp->dev_class, 3);
536
537         bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
538                    hdev->dev_class[1], hdev->dev_class[0]);
539
540         return rp->status;
541 }
542
543 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
544                                     struct sk_buff *skb)
545 {
546         struct hci_ev_status *rp = data;
547         void *sent;
548
549         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
550
551         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
552         if (!sent)
553                 return rp->status;
554
555         hci_dev_lock(hdev);
556
557         if (!rp->status)
558                 memcpy(hdev->dev_class, sent, 3);
559
560         if (hci_dev_test_flag(hdev, HCI_MGMT))
561                 mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
562
563         hci_dev_unlock(hdev);
564
565         return rp->status;
566 }
567
568 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
569                                     struct sk_buff *skb)
570 {
571         struct hci_rp_read_voice_setting *rp = data;
572         __u16 setting;
573
574         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
575
576         if (rp->status)
577                 return rp->status;
578
579         setting = __le16_to_cpu(rp->voice_setting);
580
581         if (hdev->voice_setting == setting)
582                 return rp->status;
583
584         hdev->voice_setting = setting;
585
586         bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
587
588         if (hdev->notify)
589                 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
590
591         return rp->status;
592 }
593
594 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
595                                      struct sk_buff *skb)
596 {
597         struct hci_ev_status *rp = data;
598         __u16 setting;
599         void *sent;
600
601         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
602
603         if (rp->status)
604                 return rp->status;
605
606         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
607         if (!sent)
608                 return rp->status;
609
610         setting = get_unaligned_le16(sent);
611
612         if (hdev->voice_setting == setting)
613                 return rp->status;
614
615         hdev->voice_setting = setting;
616
617         bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
618
619         if (hdev->notify)
620                 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
621
622         return rp->status;
623 }
624
625 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
626                                         struct sk_buff *skb)
627 {
628         struct hci_rp_read_num_supported_iac *rp = data;
629
630         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
631
632         if (rp->status)
633                 return rp->status;
634
635         hdev->num_iac = rp->num_iac;
636
637         bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
638
639         return rp->status;
640 }
641
642 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
643                                 struct sk_buff *skb)
644 {
645         struct hci_ev_status *rp = data;
646         struct hci_cp_write_ssp_mode *sent;
647
648         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
649
650         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
651         if (!sent)
652                 return rp->status;
653
654         hci_dev_lock(hdev);
655
656         if (!rp->status) {
657                 if (sent->mode)
658                         hdev->features[1][0] |= LMP_HOST_SSP;
659                 else
660                         hdev->features[1][0] &= ~LMP_HOST_SSP;
661         }
662
663         if (!rp->status) {
664                 if (sent->mode)
665                         hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
666                 else
667                         hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
668         }
669
670         hci_dev_unlock(hdev);
671
672         return rp->status;
673 }
674
675 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
676                                   struct sk_buff *skb)
677 {
678         struct hci_ev_status *rp = data;
679         struct hci_cp_write_sc_support *sent;
680
681         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
682
683         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
684         if (!sent)
685                 return rp->status;
686
687         hci_dev_lock(hdev);
688
689         if (!rp->status) {
690                 if (sent->support)
691                         hdev->features[1][0] |= LMP_HOST_SC;
692                 else
693                         hdev->features[1][0] &= ~LMP_HOST_SC;
694         }
695
696         if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
697                 if (sent->support)
698                         hci_dev_set_flag(hdev, HCI_SC_ENABLED);
699                 else
700                         hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
701         }
702
703         hci_dev_unlock(hdev);
704
705         return rp->status;
706 }
707
708 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
709                                     struct sk_buff *skb)
710 {
711         struct hci_rp_read_local_version *rp = data;
712
713         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
714
715         if (rp->status)
716                 return rp->status;
717
718         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
719             hci_dev_test_flag(hdev, HCI_CONFIG)) {
720                 hdev->hci_ver = rp->hci_ver;
721                 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
722                 hdev->lmp_ver = rp->lmp_ver;
723                 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
724                 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
725         }
726
727         return rp->status;
728 }
729
730 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
731                                    struct sk_buff *skb)
732 {
733         struct hci_rp_read_enc_key_size *rp = data;
734         struct hci_conn *conn;
735         u16 handle;
736         u8 status = rp->status;
737
738         bt_dev_dbg(hdev, "status 0x%2.2x", status);
739
740         handle = le16_to_cpu(rp->handle);
741
742         hci_dev_lock(hdev);
743
744         conn = hci_conn_hash_lookup_handle(hdev, handle);
745         if (!conn) {
746                 status = 0xFF;
747                 goto done;
748         }
749
750         /* While unexpected, the read_enc_key_size command may fail. The most
751          * secure approach is to then assume the key size is 0 to force a
752          * disconnection.
753          */
754         if (status) {
755                 bt_dev_err(hdev, "failed to read key size for handle %u",
756                            handle);
757                 conn->enc_key_size = 0;
758         } else {
759                 conn->enc_key_size = rp->key_size;
760                 status = 0;
761         }
762
763         hci_encrypt_cfm(conn, 0);
764
765 done:
766         hci_dev_unlock(hdev);
767
768         return status;
769 }
770
771 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
772                                      struct sk_buff *skb)
773 {
774         struct hci_rp_read_local_commands *rp = data;
775
776         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
777
778         if (rp->status)
779                 return rp->status;
780
781         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
782             hci_dev_test_flag(hdev, HCI_CONFIG))
783                 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
784
785         return rp->status;
786 }
787
788 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
789                                            struct sk_buff *skb)
790 {
791         struct hci_rp_read_auth_payload_to *rp = data;
792         struct hci_conn *conn;
793
794         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
795
796         if (rp->status)
797                 return rp->status;
798
799         hci_dev_lock(hdev);
800
801         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
802         if (conn)
803                 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
804
805         hci_dev_unlock(hdev);
806
807         return rp->status;
808 }
809
810 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
811                                             struct sk_buff *skb)
812 {
813         struct hci_rp_write_auth_payload_to *rp = data;
814         struct hci_conn *conn;
815         void *sent;
816
817         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
818
819         if (rp->status)
820                 return rp->status;
821
822         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
823         if (!sent)
824                 return rp->status;
825
826         hci_dev_lock(hdev);
827
828         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
829         if (conn)
830                 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
831
832         hci_dev_unlock(hdev);
833
834         return rp->status;
835 }
836
837 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
838                                      struct sk_buff *skb)
839 {
840         struct hci_rp_read_local_features *rp = data;
841
842         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
843
844         if (rp->status)
845                 return rp->status;
846
847         memcpy(hdev->features, rp->features, 8);
848
849         /* Adjust default settings according to features
850          * supported by device. */
851
852         if (hdev->features[0][0] & LMP_3SLOT)
853                 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
854
855         if (hdev->features[0][0] & LMP_5SLOT)
856                 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
857
858         if (hdev->features[0][1] & LMP_HV2) {
859                 hdev->pkt_type  |= (HCI_HV2);
860                 hdev->esco_type |= (ESCO_HV2);
861         }
862
863         if (hdev->features[0][1] & LMP_HV3) {
864                 hdev->pkt_type  |= (HCI_HV3);
865                 hdev->esco_type |= (ESCO_HV3);
866         }
867
868         if (lmp_esco_capable(hdev))
869                 hdev->esco_type |= (ESCO_EV3);
870
871         if (hdev->features[0][4] & LMP_EV4)
872                 hdev->esco_type |= (ESCO_EV4);
873
874         if (hdev->features[0][4] & LMP_EV5)
875                 hdev->esco_type |= (ESCO_EV5);
876
877         if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
878                 hdev->esco_type |= (ESCO_2EV3);
879
880         if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
881                 hdev->esco_type |= (ESCO_3EV3);
882
883         if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
884                 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
885
886         return rp->status;
887 }
888
889 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
890                                          struct sk_buff *skb)
891 {
892         struct hci_rp_read_local_ext_features *rp = data;
893
894         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
895
896         if (rp->status)
897                 return rp->status;
898
899         if (hdev->max_page < rp->max_page)
900                 hdev->max_page = rp->max_page;
901
902         if (rp->page < HCI_MAX_PAGES)
903                 memcpy(hdev->features[rp->page], rp->features, 8);
904
905         return rp->status;
906 }
907
908 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
909                                         struct sk_buff *skb)
910 {
911         struct hci_rp_read_flow_control_mode *rp = data;
912
913         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
914
915         if (rp->status)
916                 return rp->status;
917
918         hdev->flow_ctl_mode = rp->mode;
919
920         return rp->status;
921 }
922
923 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
924                                   struct sk_buff *skb)
925 {
926         struct hci_rp_read_buffer_size *rp = data;
927
928         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
929
930         if (rp->status)
931                 return rp->status;
932
933         hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
934         hdev->sco_mtu  = rp->sco_mtu;
935         hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
936         hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
937
938         if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
939                 hdev->sco_mtu  = 64;
940                 hdev->sco_pkts = 8;
941         }
942
943         hdev->acl_cnt = hdev->acl_pkts;
944         hdev->sco_cnt = hdev->sco_pkts;
945
946         BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
947                hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
948
949         return rp->status;
950 }
951
952 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
953                               struct sk_buff *skb)
954 {
955         struct hci_rp_read_bd_addr *rp = data;
956
957         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
958
959         if (rp->status)
960                 return rp->status;
961
962         if (test_bit(HCI_INIT, &hdev->flags))
963                 bacpy(&hdev->bdaddr, &rp->bdaddr);
964
965         if (hci_dev_test_flag(hdev, HCI_SETUP))
966                 bacpy(&hdev->setup_addr, &rp->bdaddr);
967
968         return rp->status;
969 }
970
971 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
972                                          struct sk_buff *skb)
973 {
974         struct hci_rp_read_local_pairing_opts *rp = data;
975
976         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
977
978         if (rp->status)
979                 return rp->status;
980
981         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
982             hci_dev_test_flag(hdev, HCI_CONFIG)) {
983                 hdev->pairing_opts = rp->pairing_opts;
984                 hdev->max_enc_key_size = rp->max_key_size;
985         }
986
987         return rp->status;
988 }
989
990 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
991                                          struct sk_buff *skb)
992 {
993         struct hci_rp_read_page_scan_activity *rp = data;
994
995         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
996
997         if (rp->status)
998                 return rp->status;
999
1000         if (test_bit(HCI_INIT, &hdev->flags)) {
1001                 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
1002                 hdev->page_scan_window = __le16_to_cpu(rp->window);
1003         }
1004
1005         return rp->status;
1006 }
1007
1008 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1009                                           struct sk_buff *skb)
1010 {
1011         struct hci_ev_status *rp = data;
1012         struct hci_cp_write_page_scan_activity *sent;
1013
1014         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1015
1016         if (rp->status)
1017                 return rp->status;
1018
1019         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1020         if (!sent)
1021                 return rp->status;
1022
1023         hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1024         hdev->page_scan_window = __le16_to_cpu(sent->window);
1025
1026         return rp->status;
1027 }
1028
1029 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1030                                      struct sk_buff *skb)
1031 {
1032         struct hci_rp_read_page_scan_type *rp = data;
1033
1034         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1035
1036         if (rp->status)
1037                 return rp->status;
1038
1039         if (test_bit(HCI_INIT, &hdev->flags))
1040                 hdev->page_scan_type = rp->type;
1041
1042         return rp->status;
1043 }
1044
1045 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1046                                       struct sk_buff *skb)
1047 {
1048         struct hci_ev_status *rp = data;
1049         u8 *type;
1050
1051         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1052
1053         if (rp->status)
1054                 return rp->status;
1055
1056         type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1057         if (type)
1058                 hdev->page_scan_type = *type;
1059
1060         return rp->status;
1061 }
1062
1063 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
1064                                       struct sk_buff *skb)
1065 {
1066         struct hci_rp_read_data_block_size *rp = data;
1067
1068         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1069
1070         if (rp->status)
1071                 return rp->status;
1072
1073         hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
1074         hdev->block_len = __le16_to_cpu(rp->block_len);
1075         hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
1076
1077         hdev->block_cnt = hdev->num_blocks;
1078
1079         BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
1080                hdev->block_cnt, hdev->block_len);
1081
1082         return rp->status;
1083 }
1084
1085 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1086                             struct sk_buff *skb)
1087 {
1088         struct hci_rp_read_clock *rp = data;
1089         struct hci_cp_read_clock *cp;
1090         struct hci_conn *conn;
1091
1092         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1093
1094         if (rp->status)
1095                 return rp->status;
1096
1097         hci_dev_lock(hdev);
1098
1099         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1100         if (!cp)
1101                 goto unlock;
1102
1103         if (cp->which == 0x00) {
1104                 hdev->clock = le32_to_cpu(rp->clock);
1105                 goto unlock;
1106         }
1107
1108         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1109         if (conn) {
1110                 conn->clock = le32_to_cpu(rp->clock);
1111                 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1112         }
1113
1114 unlock:
1115         hci_dev_unlock(hdev);
1116         return rp->status;
1117 }
1118
1119 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
1120                                      struct sk_buff *skb)
1121 {
1122         struct hci_rp_read_local_amp_info *rp = data;
1123
1124         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1125
1126         if (rp->status)
1127                 return rp->status;
1128
1129         hdev->amp_status = rp->amp_status;
1130         hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
1131         hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
1132         hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
1133         hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
1134         hdev->amp_type = rp->amp_type;
1135         hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
1136         hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
1137         hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
1138         hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
1139
1140         return rp->status;
1141 }
1142
1143 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1144                                        struct sk_buff *skb)
1145 {
1146         struct hci_rp_read_inq_rsp_tx_power *rp = data;
1147
1148         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1149
1150         if (rp->status)
1151                 return rp->status;
1152
1153         hdev->inq_tx_power = rp->tx_power;
1154
1155         return rp->status;
1156 }
1157
1158 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1159                                              struct sk_buff *skb)
1160 {
1161         struct hci_rp_read_def_err_data_reporting *rp = data;
1162
1163         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1164
1165         if (rp->status)
1166                 return rp->status;
1167
1168         hdev->err_data_reporting = rp->err_data_reporting;
1169
1170         return rp->status;
1171 }
1172
1173 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1174                                               struct sk_buff *skb)
1175 {
1176         struct hci_ev_status *rp = data;
1177         struct hci_cp_write_def_err_data_reporting *cp;
1178
1179         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1180
1181         if (rp->status)
1182                 return rp->status;
1183
1184         cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1185         if (!cp)
1186                 return rp->status;
1187
1188         hdev->err_data_reporting = cp->err_data_reporting;
1189
1190         return rp->status;
1191 }
1192
1193 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1194                                 struct sk_buff *skb)
1195 {
1196         struct hci_rp_pin_code_reply *rp = data;
1197         struct hci_cp_pin_code_reply *cp;
1198         struct hci_conn *conn;
1199
1200         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1201
1202         hci_dev_lock(hdev);
1203
1204         if (hci_dev_test_flag(hdev, HCI_MGMT))
1205                 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1206
1207         if (rp->status)
1208                 goto unlock;
1209
1210         cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1211         if (!cp)
1212                 goto unlock;
1213
1214         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1215         if (conn)
1216                 conn->pin_length = cp->pin_len;
1217
1218 unlock:
1219         hci_dev_unlock(hdev);
1220         return rp->status;
1221 }
1222
1223 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1224                                     struct sk_buff *skb)
1225 {
1226         struct hci_rp_pin_code_neg_reply *rp = data;
1227
1228         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1229
1230         hci_dev_lock(hdev);
1231
1232         if (hci_dev_test_flag(hdev, HCI_MGMT))
1233                 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1234                                                  rp->status);
1235
1236         hci_dev_unlock(hdev);
1237
1238         return rp->status;
1239 }
1240
1241 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1242                                      struct sk_buff *skb)
1243 {
1244         struct hci_rp_le_read_buffer_size *rp = data;
1245
1246         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1247
1248         if (rp->status)
1249                 return rp->status;
1250
1251         hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1252         hdev->le_pkts = rp->le_max_pkt;
1253
1254         hdev->le_cnt = hdev->le_pkts;
1255
1256         BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1257
1258         return rp->status;
1259 }
1260
1261 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1262                                         struct sk_buff *skb)
1263 {
1264         struct hci_rp_le_read_local_features *rp = data;
1265
1266         BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1267
1268         if (rp->status)
1269                 return rp->status;
1270
1271         memcpy(hdev->le_features, rp->features, 8);
1272
1273         return rp->status;
1274 }
1275
1276 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1277                                       struct sk_buff *skb)
1278 {
1279         struct hci_rp_le_read_adv_tx_power *rp = data;
1280
1281         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1282
1283         if (rp->status)
1284                 return rp->status;
1285
1286         hdev->adv_tx_power = rp->tx_power;
1287
1288         return rp->status;
1289 }
1290
1291 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1292                                     struct sk_buff *skb)
1293 {
1294         struct hci_rp_user_confirm_reply *rp = data;
1295
1296         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1297
1298         hci_dev_lock(hdev);
1299
1300         if (hci_dev_test_flag(hdev, HCI_MGMT))
1301                 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1302                                                  rp->status);
1303
1304         hci_dev_unlock(hdev);
1305
1306         return rp->status;
1307 }
1308
1309 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1310                                         struct sk_buff *skb)
1311 {
1312         struct hci_rp_user_confirm_reply *rp = data;
1313
1314         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1315
1316         hci_dev_lock(hdev);
1317
1318         if (hci_dev_test_flag(hdev, HCI_MGMT))
1319                 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1320                                                      ACL_LINK, 0, rp->status);
1321
1322         hci_dev_unlock(hdev);
1323
1324         return rp->status;
1325 }
1326
1327 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1328                                     struct sk_buff *skb)
1329 {
1330         struct hci_rp_user_confirm_reply *rp = data;
1331
1332         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1333
1334         hci_dev_lock(hdev);
1335
1336         if (hci_dev_test_flag(hdev, HCI_MGMT))
1337                 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1338                                                  0, rp->status);
1339
1340         hci_dev_unlock(hdev);
1341
1342         return rp->status;
1343 }
1344
1345 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1346                                         struct sk_buff *skb)
1347 {
1348         struct hci_rp_user_confirm_reply *rp = data;
1349
1350         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1351
1352         hci_dev_lock(hdev);
1353
1354         if (hci_dev_test_flag(hdev, HCI_MGMT))
1355                 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1356                                                      ACL_LINK, 0, rp->status);
1357
1358         hci_dev_unlock(hdev);
1359
1360         return rp->status;
1361 }
1362
1363 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1364                                      struct sk_buff *skb)
1365 {
1366         struct hci_rp_read_local_oob_data *rp = data;
1367
1368         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1369
1370         return rp->status;
1371 }
1372
1373 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1374                                          struct sk_buff *skb)
1375 {
1376         struct hci_rp_read_local_oob_ext_data *rp = data;
1377
1378         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1379
1380         return rp->status;
1381 }
1382
1383 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1384                                     struct sk_buff *skb)
1385 {
1386         struct hci_ev_status *rp = data;
1387         bdaddr_t *sent;
1388
1389         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1390
1391         if (rp->status)
1392                 return rp->status;
1393
1394         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1395         if (!sent)
1396                 return rp->status;
1397
1398         hci_dev_lock(hdev);
1399
1400         bacpy(&hdev->random_addr, sent);
1401
1402         if (!bacmp(&hdev->rpa, sent)) {
1403                 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1404                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1405                                    secs_to_jiffies(hdev->rpa_timeout));
1406         }
1407
1408         hci_dev_unlock(hdev);
1409
1410         return rp->status;
1411 }
1412
1413 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1414                                     struct sk_buff *skb)
1415 {
1416         struct hci_ev_status *rp = data;
1417         struct hci_cp_le_set_default_phy *cp;
1418
1419         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1420
1421         if (rp->status)
1422                 return rp->status;
1423
1424         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1425         if (!cp)
1426                 return rp->status;
1427
1428         hci_dev_lock(hdev);
1429
1430         hdev->le_tx_def_phys = cp->tx_phys;
1431         hdev->le_rx_def_phys = cp->rx_phys;
1432
1433         hci_dev_unlock(hdev);
1434
1435         return rp->status;
1436 }
1437
1438 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1439                                             struct sk_buff *skb)
1440 {
1441         struct hci_ev_status *rp = data;
1442         struct hci_cp_le_set_adv_set_rand_addr *cp;
1443         struct adv_info *adv;
1444
1445         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1446
1447         if (rp->status)
1448                 return rp->status;
1449
1450         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1451         /* Update only in case the adv instance since handle 0x00 shall be using
1452          * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1453          * non-extended adverting.
1454          */
1455         if (!cp || !cp->handle)
1456                 return rp->status;
1457
1458         hci_dev_lock(hdev);
1459
1460         adv = hci_find_adv_instance(hdev, cp->handle);
1461         if (adv) {
1462                 bacpy(&adv->random_addr, &cp->bdaddr);
1463                 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1464                         adv->rpa_expired = false;
1465                         queue_delayed_work(hdev->workqueue,
1466                                            &adv->rpa_expired_cb,
1467                                            secs_to_jiffies(hdev->rpa_timeout));
1468                 }
1469         }
1470
1471         hci_dev_unlock(hdev);
1472
1473         return rp->status;
1474 }
1475
1476 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1477                                    struct sk_buff *skb)
1478 {
1479         struct hci_ev_status *rp = data;
1480         u8 *instance;
1481         int err;
1482
1483         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1484
1485         if (rp->status)
1486                 return rp->status;
1487
1488         instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1489         if (!instance)
1490                 return rp->status;
1491
1492         hci_dev_lock(hdev);
1493
1494         err = hci_remove_adv_instance(hdev, *instance);
1495         if (!err)
1496                 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1497                                          *instance);
1498
1499         hci_dev_unlock(hdev);
1500
1501         return rp->status;
1502 }
1503
1504 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1505                                    struct sk_buff *skb)
1506 {
1507         struct hci_ev_status *rp = data;
1508         struct adv_info *adv, *n;
1509         int err;
1510
1511         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1512
1513         if (rp->status)
1514                 return rp->status;
1515
1516         if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1517                 return rp->status;
1518
1519         hci_dev_lock(hdev);
1520
1521         list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1522                 u8 instance = adv->instance;
1523
1524                 err = hci_remove_adv_instance(hdev, instance);
1525                 if (!err)
1526                         mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1527                                                  hdev, instance);
1528         }
1529
1530         hci_dev_unlock(hdev);
1531
1532         return rp->status;
1533 }
1534
1535 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1536                                         struct sk_buff *skb)
1537 {
1538         struct hci_rp_le_read_transmit_power *rp = data;
1539
1540         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1541
1542         if (rp->status)
1543                 return rp->status;
1544
1545         hdev->min_le_tx_power = rp->min_le_tx_power;
1546         hdev->max_le_tx_power = rp->max_le_tx_power;
1547
1548         return rp->status;
1549 }
1550
1551 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1552                                      struct sk_buff *skb)
1553 {
1554         struct hci_ev_status *rp = data;
1555         struct hci_cp_le_set_privacy_mode *cp;
1556         struct hci_conn_params *params;
1557
1558         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1559
1560         if (rp->status)
1561                 return rp->status;
1562
1563         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1564         if (!cp)
1565                 return rp->status;
1566
1567         hci_dev_lock(hdev);
1568
1569         params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1570         if (params)
1571                 params->privacy_mode = cp->mode;
1572
1573         hci_dev_unlock(hdev);
1574
1575         return rp->status;
1576 }
1577
1578 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1579                                    struct sk_buff *skb)
1580 {
1581         struct hci_ev_status *rp = data;
1582         __u8 *sent;
1583
1584         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1585
1586         if (rp->status)
1587                 return rp->status;
1588
1589         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1590         if (!sent)
1591                 return rp->status;
1592
1593         hci_dev_lock(hdev);
1594
1595         /* If we're doing connection initiation as peripheral. Set a
1596          * timeout in case something goes wrong.
1597          */
1598         if (*sent) {
1599                 struct hci_conn *conn;
1600
1601                 hci_dev_set_flag(hdev, HCI_LE_ADV);
1602
1603                 conn = hci_lookup_le_connect(hdev);
1604                 if (conn)
1605                         queue_delayed_work(hdev->workqueue,
1606                                            &conn->le_conn_timeout,
1607                                            conn->conn_timeout);
1608         } else {
1609                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1610         }
1611
1612         hci_dev_unlock(hdev);
1613
1614         return rp->status;
1615 }
1616
1617 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1618                                        struct sk_buff *skb)
1619 {
1620         struct hci_cp_le_set_ext_adv_enable *cp;
1621         struct hci_cp_ext_adv_set *set;
1622         struct adv_info *adv = NULL, *n;
1623         struct hci_ev_status *rp = data;
1624
1625         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1626
1627         if (rp->status)
1628                 return rp->status;
1629
1630         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1631         if (!cp)
1632                 return rp->status;
1633
1634         set = (void *)cp->data;
1635
1636         hci_dev_lock(hdev);
1637
1638         if (cp->num_of_sets)
1639                 adv = hci_find_adv_instance(hdev, set->handle);
1640
1641         if (cp->enable) {
1642                 struct hci_conn *conn;
1643
1644                 hci_dev_set_flag(hdev, HCI_LE_ADV);
1645
1646                 if (adv)
1647                         adv->enabled = true;
1648
1649                 conn = hci_lookup_le_connect(hdev);
1650                 if (conn)
1651                         queue_delayed_work(hdev->workqueue,
1652                                            &conn->le_conn_timeout,
1653                                            conn->conn_timeout);
1654         } else {
1655                 if (cp->num_of_sets) {
1656                         if (adv)
1657                                 adv->enabled = false;
1658
1659                         /* If just one instance was disabled check if there are
1660                          * any other instance enabled before clearing HCI_LE_ADV
1661                          */
1662                         list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1663                                                  list) {
1664                                 if (adv->enabled)
1665                                         goto unlock;
1666                         }
1667                 } else {
1668                         /* All instances shall be considered disabled */
1669                         list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1670                                                  list)
1671                                 adv->enabled = false;
1672                 }
1673
1674                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1675         }
1676
1677 unlock:
1678         hci_dev_unlock(hdev);
1679         return rp->status;
1680 }
1681
1682 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1683                                    struct sk_buff *skb)
1684 {
1685         struct hci_cp_le_set_scan_param *cp;
1686         struct hci_ev_status *rp = data;
1687
1688         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1689
1690         if (rp->status)
1691                 return rp->status;
1692
1693         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1694         if (!cp)
1695                 return rp->status;
1696
1697         hci_dev_lock(hdev);
1698
1699         hdev->le_scan_type = cp->type;
1700
1701         hci_dev_unlock(hdev);
1702
1703         return rp->status;
1704 }
1705
1706 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1707                                        struct sk_buff *skb)
1708 {
1709         struct hci_cp_le_set_ext_scan_params *cp;
1710         struct hci_ev_status *rp = data;
1711         struct hci_cp_le_scan_phy_params *phy_param;
1712
1713         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1714
1715         if (rp->status)
1716                 return rp->status;
1717
1718         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1719         if (!cp)
1720                 return rp->status;
1721
1722         phy_param = (void *)cp->data;
1723
1724         hci_dev_lock(hdev);
1725
1726         hdev->le_scan_type = phy_param->type;
1727
1728         hci_dev_unlock(hdev);
1729
1730         return rp->status;
1731 }
1732
1733 static bool has_pending_adv_report(struct hci_dev *hdev)
1734 {
1735         struct discovery_state *d = &hdev->discovery;
1736
1737         return bacmp(&d->last_adv_addr, BDADDR_ANY);
1738 }
1739
1740 static void clear_pending_adv_report(struct hci_dev *hdev)
1741 {
1742         struct discovery_state *d = &hdev->discovery;
1743
1744         bacpy(&d->last_adv_addr, BDADDR_ANY);
1745         d->last_adv_data_len = 0;
1746 }
1747
1748 #ifndef TIZEN_BT
1749 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1750                                      u8 bdaddr_type, s8 rssi, u32 flags,
1751                                      u8 *data, u8 len)
1752 {
1753         struct discovery_state *d = &hdev->discovery;
1754
1755         if (len > HCI_MAX_AD_LENGTH)
1756                 return;
1757
1758         bacpy(&d->last_adv_addr, bdaddr);
1759         d->last_adv_addr_type = bdaddr_type;
1760         d->last_adv_rssi = rssi;
1761         d->last_adv_flags = flags;
1762         memcpy(d->last_adv_data, data, len);
1763         d->last_adv_data_len = len;
1764 }
1765 #endif
1766
1767 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1768 {
1769         hci_dev_lock(hdev);
1770
1771         switch (enable) {
1772         case LE_SCAN_ENABLE:
1773                 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1774                 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1775                         clear_pending_adv_report(hdev);
1776                 if (hci_dev_test_flag(hdev, HCI_MESH))
1777                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1778                 break;
1779
1780         case LE_SCAN_DISABLE:
1781                 /* We do this here instead of when setting DISCOVERY_STOPPED
1782                  * since the latter would potentially require waiting for
1783                  * inquiry to stop too.
1784                  */
1785                 if (has_pending_adv_report(hdev)) {
1786                         struct discovery_state *d = &hdev->discovery;
1787
1788                         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1789                                           d->last_adv_addr_type, NULL,
1790                                           d->last_adv_rssi, d->last_adv_flags,
1791                                           d->last_adv_data,
1792                                           d->last_adv_data_len, NULL, 0, 0);
1793                 }
1794
1795                 /* Cancel this timer so that we don't try to disable scanning
1796                  * when it's already disabled.
1797                  */
1798                 cancel_delayed_work(&hdev->le_scan_disable);
1799
1800                 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1801
1802                 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1803                  * interrupted scanning due to a connect request. Mark
1804                  * therefore discovery as stopped.
1805                  */
1806                 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1807 #ifndef TIZEN_BT /* The below line is kernel bug. */
1808                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1809 #else
1810                         hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
1811 #endif
1812                 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1813                          hdev->discovery.state == DISCOVERY_FINDING)
1814                         queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1815
1816                 break;
1817
1818         default:
1819                 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1820                            enable);
1821                 break;
1822         }
1823
1824         hci_dev_unlock(hdev);
1825 }
1826
1827 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1828                                     struct sk_buff *skb)
1829 {
1830         struct hci_cp_le_set_scan_enable *cp;
1831         struct hci_ev_status *rp = data;
1832
1833         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1834
1835         if (rp->status)
1836                 return rp->status;
1837
1838         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1839         if (!cp)
1840                 return rp->status;
1841
1842         le_set_scan_enable_complete(hdev, cp->enable);
1843
1844         return rp->status;
1845 }
1846
1847 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1848                                         struct sk_buff *skb)
1849 {
1850         struct hci_cp_le_set_ext_scan_enable *cp;
1851         struct hci_ev_status *rp = data;
1852
1853         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1854
1855         if (rp->status)
1856                 return rp->status;
1857
1858         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1859         if (!cp)
1860                 return rp->status;
1861
1862         le_set_scan_enable_complete(hdev, cp->enable);
1863
1864         return rp->status;
1865 }
1866
1867 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1868                                       struct sk_buff *skb)
1869 {
1870         struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1871
1872         bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1873                    rp->num_of_sets);
1874
1875         if (rp->status)
1876                 return rp->status;
1877
1878         hdev->le_num_of_adv_sets = rp->num_of_sets;
1879
1880         return rp->status;
1881 }
1882
1883 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1884                                           struct sk_buff *skb)
1885 {
1886         struct hci_rp_le_read_accept_list_size *rp = data;
1887
1888         bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1889
1890         if (rp->status)
1891                 return rp->status;
1892
1893         hdev->le_accept_list_size = rp->size;
1894
1895         return rp->status;
1896 }
1897
1898 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1899                                       struct sk_buff *skb)
1900 {
1901         struct hci_ev_status *rp = data;
1902
1903         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1904
1905         if (rp->status)
1906                 return rp->status;
1907
1908         hci_dev_lock(hdev);
1909         hci_bdaddr_list_clear(&hdev->le_accept_list);
1910         hci_dev_unlock(hdev);
1911
1912         return rp->status;
1913 }
1914
1915 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1916                                        struct sk_buff *skb)
1917 {
1918         struct hci_cp_le_add_to_accept_list *sent;
1919         struct hci_ev_status *rp = data;
1920
1921         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1922
1923         if (rp->status)
1924                 return rp->status;
1925
1926         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1927         if (!sent)
1928                 return rp->status;
1929
1930         hci_dev_lock(hdev);
1931         hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1932                             sent->bdaddr_type);
1933         hci_dev_unlock(hdev);
1934
1935         return rp->status;
1936 }
1937
1938 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1939                                          struct sk_buff *skb)
1940 {
1941         struct hci_cp_le_del_from_accept_list *sent;
1942         struct hci_ev_status *rp = data;
1943
1944         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1945
1946         if (rp->status)
1947                 return rp->status;
1948
1949         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1950         if (!sent)
1951                 return rp->status;
1952
1953         hci_dev_lock(hdev);
1954         hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1955                             sent->bdaddr_type);
1956         hci_dev_unlock(hdev);
1957
1958         return rp->status;
1959 }
1960
1961 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1962                                           struct sk_buff *skb)
1963 {
1964         struct hci_rp_le_read_supported_states *rp = data;
1965
1966         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1967
1968         if (rp->status)
1969                 return rp->status;
1970
1971         memcpy(hdev->le_states, rp->le_states, 8);
1972
1973         return rp->status;
1974 }
1975
1976 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1977                                       struct sk_buff *skb)
1978 {
1979         struct hci_rp_le_read_def_data_len *rp = data;
1980
1981         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1982
1983 #ifdef TIZEN_BT
1984         hci_dev_lock(hdev);
1985 #else
1986         if (rp->status)
1987                 return rp->status;
1988 #endif
1989
1990         hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1991         hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1992
1993 #ifdef TIZEN_BT
1994         mgmt_le_read_host_suggested_data_length_complete(hdev, rp->status);
1995
1996         hci_dev_unlock(hdev);
1997 #endif
1998
1999         return rp->status;
2000 }
2001
2002 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
2003                                        struct sk_buff *skb)
2004 {
2005         struct hci_cp_le_write_def_data_len *sent;
2006         struct hci_ev_status *rp = data;
2007
2008         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2009
2010         if (rp->status)
2011 #ifndef TIZEN_BT
2012                 return rp->status;
2013 #else
2014                 goto unblock;
2015 #endif
2016
2017         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
2018         if (!sent)
2019 #ifndef TIZEN_BT
2020                 return rp->status;
2021 #else
2022                 goto unblock;
2023 #endif
2024
2025         hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
2026         hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
2027
2028         return rp->status;
2029 #ifdef TIZEN_BT
2030 unblock:
2031         mgmt_le_write_host_suggested_data_length_complete(hdev, rp->status);
2032         return rp->status;
2033 #endif
2034 }
2035
2036 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
2037                                        struct sk_buff *skb)
2038 {
2039         struct hci_cp_le_add_to_resolv_list *sent;
2040         struct hci_ev_status *rp = data;
2041
2042         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2043
2044         if (rp->status)
2045                 return rp->status;
2046
2047         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
2048         if (!sent)
2049                 return rp->status;
2050
2051         hci_dev_lock(hdev);
2052         hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2053                                 sent->bdaddr_type, sent->peer_irk,
2054                                 sent->local_irk);
2055         hci_dev_unlock(hdev);
2056
2057         return rp->status;
2058 }
2059
2060 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2061                                          struct sk_buff *skb)
2062 {
2063         struct hci_cp_le_del_from_resolv_list *sent;
2064         struct hci_ev_status *rp = data;
2065
2066         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2067
2068         if (rp->status)
2069                 return rp->status;
2070
2071         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2072         if (!sent)
2073                 return rp->status;
2074
2075         hci_dev_lock(hdev);
2076         hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2077                             sent->bdaddr_type);
2078         hci_dev_unlock(hdev);
2079
2080         return rp->status;
2081 }
2082
2083 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2084                                       struct sk_buff *skb)
2085 {
2086         struct hci_ev_status *rp = data;
2087
2088         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2089
2090         if (rp->status)
2091                 return rp->status;
2092
2093         hci_dev_lock(hdev);
2094         hci_bdaddr_list_clear(&hdev->le_resolv_list);
2095         hci_dev_unlock(hdev);
2096
2097         return rp->status;
2098 }
2099
2100 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2101                                           struct sk_buff *skb)
2102 {
2103         struct hci_rp_le_read_resolv_list_size *rp = data;
2104
2105         bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2106
2107         if (rp->status)
2108                 return rp->status;
2109
2110         hdev->le_resolv_list_size = rp->size;
2111
2112         return rp->status;
2113 }
2114
2115 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2116                                                struct sk_buff *skb)
2117 {
2118         struct hci_ev_status *rp = data;
2119         __u8 *sent;
2120
2121         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2122
2123         if (rp->status)
2124                 return rp->status;
2125
2126         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2127         if (!sent)
2128                 return rp->status;
2129
2130         hci_dev_lock(hdev);
2131
2132         if (*sent)
2133                 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2134         else
2135                 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2136
2137         hci_dev_unlock(hdev);
2138
2139         return rp->status;
2140 }
2141
2142 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2143                                       struct sk_buff *skb)
2144 {
2145         struct hci_rp_le_read_max_data_len *rp = data;
2146
2147         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2148
2149 #ifndef TIZEN_BT
2150         if (rp->status)
2151                 return rp->status;
2152 #else
2153         hci_dev_lock(hdev);
2154 #endif
2155
2156         hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2157         hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2158         hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2159         hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2160
2161 #ifdef TIZEN_BT
2162         mgmt_le_read_maximum_data_length_complete(hdev, rp->status);
2163         hci_dev_unlock(hdev);
2164 #endif
2165
2166         return rp->status;
2167 }
2168
2169 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2170                                          struct sk_buff *skb)
2171 {
2172         struct hci_cp_write_le_host_supported *sent;
2173         struct hci_ev_status *rp = data;
2174
2175         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2176
2177         if (rp->status)
2178                 return rp->status;
2179
2180         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2181         if (!sent)
2182                 return rp->status;
2183
2184         hci_dev_lock(hdev);
2185
2186         if (sent->le) {
2187                 hdev->features[1][0] |= LMP_HOST_LE;
2188                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2189         } else {
2190                 hdev->features[1][0] &= ~LMP_HOST_LE;
2191                 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2192                 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2193         }
2194
2195         if (sent->simul)
2196                 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2197         else
2198                 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2199
2200         hci_dev_unlock(hdev);
2201
2202         return rp->status;
2203 }
2204
2205 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2206                                struct sk_buff *skb)
2207 {
2208         struct hci_cp_le_set_adv_param *cp;
2209         struct hci_ev_status *rp = data;
2210
2211         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2212
2213         if (rp->status)
2214                 return rp->status;
2215
2216         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2217         if (!cp)
2218                 return rp->status;
2219
2220         hci_dev_lock(hdev);
2221         hdev->adv_addr_type = cp->own_address_type;
2222         hci_dev_unlock(hdev);
2223
2224         return rp->status;
2225 }
2226
2227 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2228                                    struct sk_buff *skb)
2229 {
2230         struct hci_rp_le_set_ext_adv_params *rp = data;
2231         struct hci_cp_le_set_ext_adv_params *cp;
2232         struct adv_info *adv_instance;
2233
2234         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2235
2236         if (rp->status)
2237                 return rp->status;
2238
2239         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2240         if (!cp)
2241                 return rp->status;
2242
2243         hci_dev_lock(hdev);
2244         hdev->adv_addr_type = cp->own_addr_type;
2245         if (!cp->handle) {
2246                 /* Store in hdev for instance 0 */
2247                 hdev->adv_tx_power = rp->tx_power;
2248         } else {
2249                 adv_instance = hci_find_adv_instance(hdev, cp->handle);
2250                 if (adv_instance)
2251                         adv_instance->tx_power = rp->tx_power;
2252         }
2253         /* Update adv data as tx power is known now */
2254         hci_update_adv_data(hdev, cp->handle);
2255
2256         hci_dev_unlock(hdev);
2257
2258         return rp->status;
2259 }
2260
2261 #ifdef TIZEN_BT
2262 static u8 hci_cc_enable_rssi(struct hci_dev *hdev, void *data,
2263                              struct sk_buff *skb)
2264 {
2265         struct hci_cc_rsp_enable_rssi *rp = data;
2266
2267         BT_DBG("hci_cc_enable_rssi - %s status 0x%2.2x Event_LE_ext_Opcode 0x%2.2x",
2268                hdev->name, rp->status, rp->le_ext_opcode);
2269
2270         mgmt_enable_rssi_cc(hdev, rp, rp->status);
2271
2272         return rp->status;
2273 }
2274
2275 static u8 hci_cc_get_raw_rssi(struct hci_dev *hdev, void *data,
2276                               struct sk_buff *skb)
2277 {
2278         struct hci_cc_rp_get_raw_rssi *rp = data;
2279
2280         BT_DBG("hci_cc_get_raw_rssi- %s Get Raw Rssi Response[%2.2x %4.4x %2.2X]",
2281                hdev->name, rp->status, rp->conn_handle, rp->rssi_dbm);
2282
2283         mgmt_raw_rssi_response(hdev, rp, rp->status);
2284
2285         return rp->status;
2286 }
2287
2288 static void hci_vendor_ext_rssi_link_alert_evt(struct hci_dev *hdev,
2289                                                struct sk_buff *skb)
2290 {
2291         struct hci_ev_vendor_specific_rssi_alert *ev = (void *)skb->data;
2292
2293         BT_DBG("RSSI event LE_RSSI_LINK_ALERT %X", LE_RSSI_LINK_ALERT);
2294
2295         mgmt_rssi_alert_evt(hdev, ev->conn_handle, ev->alert_type,
2296                             ev->rssi_dbm);
2297 }
2298
2299 static void hci_vendor_specific_group_ext_evt(struct hci_dev *hdev,
2300                                               struct sk_buff *skb)
2301 {
2302         struct hci_ev_ext_vendor_specific *ev = (void *)skb->data;
2303         __u8 event_le_ext_sub_code;
2304
2305         BT_DBG("RSSI event LE_META_VENDOR_SPECIFIC_GROUP_EVENT: %X",
2306                LE_META_VENDOR_SPECIFIC_GROUP_EVENT);
2307
2308         skb_pull(skb, sizeof(*ev));
2309         event_le_ext_sub_code = ev->event_le_ext_sub_code;
2310
2311         switch (event_le_ext_sub_code) {
2312         case LE_RSSI_LINK_ALERT:
2313                 hci_vendor_ext_rssi_link_alert_evt(hdev, skb);
2314                 break;
2315
2316         default:
2317                 break;
2318         }
2319 }
2320
2321 static void hci_vendor_multi_adv_state_change_evt(struct hci_dev *hdev,
2322                                                   struct sk_buff *skb)
2323 {
2324         struct hci_ev_vendor_specific_multi_adv_state *ev = (void *)skb->data;
2325
2326         BT_DBG("LE_MULTI_ADV_STATE_CHANGE_SUB_EVENT");
2327
2328         mgmt_multi_adv_state_change_evt(hdev, ev->adv_instance,
2329                                         ev->state_change_reason,
2330                                         ev->connection_handle);
2331 }
2332
2333 static void hci_vendor_specific_evt(struct hci_dev *hdev, void *data,
2334                                     struct sk_buff *skb)
2335 {
2336         struct hci_ev_vendor_specific *ev = (void *)skb->data;
2337         __u8 event_sub_code;
2338
2339         BT_DBG("hci_vendor_specific_evt");
2340
2341         skb_pull(skb, sizeof(*ev));
2342         event_sub_code = ev->event_sub_code;
2343
2344         switch (event_sub_code) {
2345         case LE_META_VENDOR_SPECIFIC_GROUP_EVENT:
2346                 hci_vendor_specific_group_ext_evt(hdev, skb);
2347                 break;
2348
2349         case LE_MULTI_ADV_STATE_CHANGE_SUB_EVENT:
2350                 hci_vendor_multi_adv_state_change_evt(hdev, skb);
2351                 break;
2352
2353         default:
2354                 break;
2355         }
2356 }
2357
2358 static void hci_le_data_length_changed_complete_evt(struct hci_dev *hdev,
2359                                                     void *data,
2360                                                     struct sk_buff *skb)
2361 {
2362         struct hci_ev_le_data_len_change *ev = (void *)skb->data;
2363         struct hci_conn *conn;
2364
2365         BT_DBG("%s status", hdev->name);
2366
2367         hci_dev_lock(hdev);
2368
2369         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2370         if (conn) {
2371                 conn->tx_len = le16_to_cpu(ev->tx_len);
2372                 conn->tx_time = le16_to_cpu(ev->tx_time);
2373                 conn->rx_len = le16_to_cpu(ev->rx_len);
2374                 conn->rx_time = le16_to_cpu(ev->rx_time);
2375
2376                 mgmt_le_data_length_change_complete(hdev, &conn->dst,
2377                                             conn->tx_len, conn->tx_time,
2378                                             conn->rx_len, conn->rx_time);
2379         }
2380
2381         hci_dev_unlock(hdev);
2382 }
2383 #endif
2384
2385 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2386                            struct sk_buff *skb)
2387 {
2388         struct hci_rp_read_rssi *rp = data;
2389         struct hci_conn *conn;
2390
2391         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2392
2393         if (rp->status)
2394                 return rp->status;
2395
2396         hci_dev_lock(hdev);
2397
2398         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2399         if (conn)
2400                 conn->rssi = rp->rssi;
2401
2402         hci_dev_unlock(hdev);
2403
2404         return rp->status;
2405 }
2406
2407 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2408                                struct sk_buff *skb)
2409 {
2410         struct hci_cp_read_tx_power *sent;
2411         struct hci_rp_read_tx_power *rp = data;
2412         struct hci_conn *conn;
2413
2414         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2415
2416         if (rp->status)
2417                 return rp->status;
2418
2419         sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2420         if (!sent)
2421                 return rp->status;
2422
2423         hci_dev_lock(hdev);
2424
2425         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2426         if (!conn)
2427                 goto unlock;
2428
2429         switch (sent->type) {
2430         case 0x00:
2431                 conn->tx_power = rp->tx_power;
2432                 break;
2433         case 0x01:
2434                 conn->max_tx_power = rp->tx_power;
2435                 break;
2436         }
2437
2438 unlock:
2439         hci_dev_unlock(hdev);
2440         return rp->status;
2441 }
2442
2443 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2444                                       struct sk_buff *skb)
2445 {
2446         struct hci_ev_status *rp = data;
2447         u8 *mode;
2448
2449         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2450
2451         if (rp->status)
2452                 return rp->status;
2453
2454         mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2455         if (mode)
2456                 hdev->ssp_debug_mode = *mode;
2457
2458         return rp->status;
2459 }
2460
2461 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2462 {
2463         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2464
2465         if (status) {
2466                 hci_conn_check_pending(hdev);
2467                 return;
2468         }
2469
2470         set_bit(HCI_INQUIRY, &hdev->flags);
2471 }
2472
2473 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2474 {
2475         struct hci_cp_create_conn *cp;
2476         struct hci_conn *conn;
2477
2478         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2479
2480         cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2481         if (!cp)
2482                 return;
2483
2484         hci_dev_lock(hdev);
2485
2486         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2487
2488         bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2489
2490         if (status) {
2491                 if (conn && conn->state == BT_CONNECT) {
2492                         if (status != 0x0c || conn->attempt > 2) {
2493                                 conn->state = BT_CLOSED;
2494                                 hci_connect_cfm(conn, status);
2495                                 hci_conn_del(conn);
2496                         } else
2497                                 conn->state = BT_CONNECT2;
2498                 }
2499         } else {
2500                 if (!conn) {
2501                         conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
2502                                             HCI_ROLE_MASTER);
2503                         if (!conn)
2504                                 bt_dev_err(hdev, "no memory for new connection");
2505                 }
2506         }
2507
2508         hci_dev_unlock(hdev);
2509 }
2510
2511 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2512 {
2513         struct hci_cp_add_sco *cp;
2514         struct hci_conn *acl, *sco;
2515         __u16 handle;
2516
2517         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2518
2519         if (!status)
2520                 return;
2521
2522         cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2523         if (!cp)
2524                 return;
2525
2526         handle = __le16_to_cpu(cp->handle);
2527
2528         bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2529
2530         hci_dev_lock(hdev);
2531
2532         acl = hci_conn_hash_lookup_handle(hdev, handle);
2533         if (acl) {
2534                 sco = acl->link;
2535                 if (sco) {
2536                         sco->state = BT_CLOSED;
2537
2538                         hci_connect_cfm(sco, status);
2539                         hci_conn_del(sco);
2540                 }
2541         }
2542
2543         hci_dev_unlock(hdev);
2544 }
2545
2546 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2547 {
2548         struct hci_cp_auth_requested *cp;
2549         struct hci_conn *conn;
2550
2551         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2552
2553         if (!status)
2554                 return;
2555
2556         cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2557         if (!cp)
2558                 return;
2559
2560         hci_dev_lock(hdev);
2561
2562         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2563         if (conn) {
2564                 if (conn->state == BT_CONFIG) {
2565                         hci_connect_cfm(conn, status);
2566                         hci_conn_drop(conn);
2567                 }
2568         }
2569
2570         hci_dev_unlock(hdev);
2571 }
2572
2573 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2574 {
2575         struct hci_cp_set_conn_encrypt *cp;
2576         struct hci_conn *conn;
2577
2578         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2579
2580         if (!status)
2581                 return;
2582
2583         cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2584         if (!cp)
2585                 return;
2586
2587         hci_dev_lock(hdev);
2588
2589         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2590         if (conn) {
2591                 if (conn->state == BT_CONFIG) {
2592                         hci_connect_cfm(conn, status);
2593                         hci_conn_drop(conn);
2594                 }
2595         }
2596
2597         hci_dev_unlock(hdev);
2598 }
2599
2600 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2601                                     struct hci_conn *conn)
2602 {
2603         if (conn->state != BT_CONFIG || !conn->out)
2604                 return 0;
2605
2606         if (conn->pending_sec_level == BT_SECURITY_SDP)
2607                 return 0;
2608
2609         /* Only request authentication for SSP connections or non-SSP
2610          * devices with sec_level MEDIUM or HIGH or if MITM protection
2611          * is requested.
2612          */
2613         if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2614             conn->pending_sec_level != BT_SECURITY_FIPS &&
2615             conn->pending_sec_level != BT_SECURITY_HIGH &&
2616             conn->pending_sec_level != BT_SECURITY_MEDIUM)
2617                 return 0;
2618
2619         return 1;
2620 }
2621
2622 static int hci_resolve_name(struct hci_dev *hdev,
2623                                    struct inquiry_entry *e)
2624 {
2625         struct hci_cp_remote_name_req cp;
2626
2627         memset(&cp, 0, sizeof(cp));
2628
2629         bacpy(&cp.bdaddr, &e->data.bdaddr);
2630         cp.pscan_rep_mode = e->data.pscan_rep_mode;
2631         cp.pscan_mode = e->data.pscan_mode;
2632         cp.clock_offset = e->data.clock_offset;
2633
2634         return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2635 }
2636
2637 static bool hci_resolve_next_name(struct hci_dev *hdev)
2638 {
2639         struct discovery_state *discov = &hdev->discovery;
2640         struct inquiry_entry *e;
2641
2642         if (list_empty(&discov->resolve))
2643                 return false;
2644
2645         /* We should stop if we already spent too much time resolving names. */
2646         if (time_after(jiffies, discov->name_resolve_timeout)) {
2647                 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2648                 return false;
2649         }
2650
2651         e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2652         if (!e)
2653                 return false;
2654
2655         if (hci_resolve_name(hdev, e) == 0) {
2656                 e->name_state = NAME_PENDING;
2657                 return true;
2658         }
2659
2660         return false;
2661 }
2662
2663 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2664                                    bdaddr_t *bdaddr, u8 *name, u8 name_len)
2665 {
2666         struct discovery_state *discov = &hdev->discovery;
2667         struct inquiry_entry *e;
2668
2669 #ifdef TIZEN_BT
2670         /* Update the mgmt connected state if necessary. Be careful with
2671          * conn objects that exist but are not (yet) connected however.
2672          * Only those in BT_CONFIG or BT_CONNECTED states can be
2673          * considered connected.
2674          */
2675         if (conn &&
2676             (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) {
2677                 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2678                         mgmt_device_connected(hdev, conn, name, name_len);
2679                 else
2680                         mgmt_device_name_update(hdev, bdaddr, name, name_len);
2681         }
2682 #else
2683         if (conn &&
2684             (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2685             !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2686                 mgmt_device_connected(hdev, conn, name, name_len);
2687 #endif
2688
2689         if (discov->state == DISCOVERY_STOPPED)
2690                 return;
2691
2692         if (discov->state == DISCOVERY_STOPPING)
2693                 goto discov_complete;
2694
2695         if (discov->state != DISCOVERY_RESOLVING)
2696                 return;
2697
2698         e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2699         /* If the device was not found in a list of found devices names of which
2700          * are pending. there is no need to continue resolving a next name as it
2701          * will be done upon receiving another Remote Name Request Complete
2702          * Event */
2703         if (!e)
2704                 return;
2705
2706         list_del(&e->list);
2707
2708         e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2709         mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2710                          name, name_len);
2711
2712         if (hci_resolve_next_name(hdev))
2713                 return;
2714
2715 discov_complete:
2716         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2717 }
2718
2719 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2720 {
2721         struct hci_cp_remote_name_req *cp;
2722         struct hci_conn *conn;
2723
2724         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2725
2726         /* If successful wait for the name req complete event before
2727          * checking for the need to do authentication */
2728         if (!status)
2729                 return;
2730
2731         cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2732         if (!cp)
2733                 return;
2734
2735         hci_dev_lock(hdev);
2736
2737         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2738
2739         if (hci_dev_test_flag(hdev, HCI_MGMT))
2740                 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2741
2742         if (!conn)
2743                 goto unlock;
2744
2745         if (!hci_outgoing_auth_needed(hdev, conn))
2746                 goto unlock;
2747
2748         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2749                 struct hci_cp_auth_requested auth_cp;
2750
2751                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2752
2753                 auth_cp.handle = __cpu_to_le16(conn->handle);
2754                 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2755                              sizeof(auth_cp), &auth_cp);
2756         }
2757
2758 unlock:
2759         hci_dev_unlock(hdev);
2760 }
2761
2762 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2763 {
2764         struct hci_cp_read_remote_features *cp;
2765         struct hci_conn *conn;
2766
2767         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2768
2769         if (!status)
2770                 return;
2771
2772         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2773         if (!cp)
2774                 return;
2775
2776         hci_dev_lock(hdev);
2777
2778         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2779         if (conn) {
2780                 if (conn->state == BT_CONFIG) {
2781                         hci_connect_cfm(conn, status);
2782                         hci_conn_drop(conn);
2783                 }
2784         }
2785
2786         hci_dev_unlock(hdev);
2787 }
2788
2789 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2790 {
2791         struct hci_cp_read_remote_ext_features *cp;
2792         struct hci_conn *conn;
2793
2794         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2795
2796         if (!status)
2797                 return;
2798
2799         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2800         if (!cp)
2801                 return;
2802
2803         hci_dev_lock(hdev);
2804
2805         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2806         if (conn) {
2807                 if (conn->state == BT_CONFIG) {
2808                         hci_connect_cfm(conn, status);
2809                         hci_conn_drop(conn);
2810                 }
2811         }
2812
2813         hci_dev_unlock(hdev);
2814 }
2815
2816 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2817 {
2818         struct hci_cp_setup_sync_conn *cp;
2819         struct hci_conn *acl, *sco;
2820         __u16 handle;
2821
2822         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2823
2824         if (!status)
2825                 return;
2826
2827         cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2828         if (!cp)
2829                 return;
2830
2831         handle = __le16_to_cpu(cp->handle);
2832
2833         bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2834
2835         hci_dev_lock(hdev);
2836
2837         acl = hci_conn_hash_lookup_handle(hdev, handle);
2838         if (acl) {
2839                 sco = acl->link;
2840                 if (sco) {
2841                         sco->state = BT_CLOSED;
2842
2843                         hci_connect_cfm(sco, status);
2844                         hci_conn_del(sco);
2845                 }
2846         }
2847
2848         hci_dev_unlock(hdev);
2849 }
2850
2851 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2852 {
2853         struct hci_cp_enhanced_setup_sync_conn *cp;
2854         struct hci_conn *acl, *sco;
2855         __u16 handle;
2856
2857         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2858
2859         if (!status)
2860                 return;
2861
2862         cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2863         if (!cp)
2864                 return;
2865
2866         handle = __le16_to_cpu(cp->handle);
2867
2868         bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2869
2870         hci_dev_lock(hdev);
2871
2872         acl = hci_conn_hash_lookup_handle(hdev, handle);
2873         if (acl) {
2874                 sco = acl->link;
2875                 if (sco) {
2876                         sco->state = BT_CLOSED;
2877
2878                         hci_connect_cfm(sco, status);
2879                         hci_conn_del(sco);
2880                 }
2881         }
2882
2883         hci_dev_unlock(hdev);
2884 }
2885
2886 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2887 {
2888         struct hci_cp_sniff_mode *cp;
2889         struct hci_conn *conn;
2890
2891         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2892
2893         if (!status)
2894                 return;
2895
2896         cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2897         if (!cp)
2898                 return;
2899
2900         hci_dev_lock(hdev);
2901
2902         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2903         if (conn) {
2904                 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2905
2906                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2907                         hci_sco_setup(conn, status);
2908         }
2909
2910         hci_dev_unlock(hdev);
2911 }
2912
2913 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2914 {
2915         struct hci_cp_exit_sniff_mode *cp;
2916         struct hci_conn *conn;
2917
2918         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2919
2920         if (!status)
2921                 return;
2922
2923         cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2924         if (!cp)
2925                 return;
2926
2927         hci_dev_lock(hdev);
2928
2929         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2930         if (conn) {
2931                 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2932
2933                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2934                         hci_sco_setup(conn, status);
2935         }
2936
2937         hci_dev_unlock(hdev);
2938 }
2939
2940 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2941 {
2942         struct hci_cp_disconnect *cp;
2943         struct hci_conn_params *params;
2944         struct hci_conn *conn;
2945         bool mgmt_conn;
2946
2947         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2948
2949         /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2950          * otherwise cleanup the connection immediately.
2951          */
2952         if (!status && !hdev->suspended)
2953                 return;
2954
2955         cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2956         if (!cp)
2957                 return;
2958
2959         hci_dev_lock(hdev);
2960
2961         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2962         if (!conn)
2963                 goto unlock;
2964
2965         if (status) {
2966                 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2967                                        conn->dst_type, status);
2968
2969                 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2970                         hdev->cur_adv_instance = conn->adv_instance;
2971                         hci_enable_advertising(hdev);
2972                 }
2973
2974                 goto done;
2975         }
2976
2977         mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2978
2979         if (conn->type == ACL_LINK) {
2980                 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2981                         hci_remove_link_key(hdev, &conn->dst);
2982         }
2983
2984         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2985         if (params) {
2986                 switch (params->auto_connect) {
2987                 case HCI_AUTO_CONN_LINK_LOSS:
2988                         if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2989                                 break;
2990                         fallthrough;
2991
2992                 case HCI_AUTO_CONN_DIRECT:
2993                 case HCI_AUTO_CONN_ALWAYS:
2994                         list_del_init(&params->action);
2995                         list_add(&params->action, &hdev->pend_le_conns);
2996                         break;
2997
2998                 default:
2999                         break;
3000                 }
3001         }
3002
3003         mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3004                                  cp->reason, mgmt_conn);
3005
3006         hci_disconn_cfm(conn, cp->reason);
3007
3008 done:
3009         /* If the disconnection failed for any reason, the upper layer
3010          * does not retry to disconnect in current implementation.
3011          * Hence, we need to do some basic cleanup here and re-enable
3012          * advertising if necessary.
3013          */
3014         hci_conn_del(conn);
3015 unlock:
3016         hci_dev_unlock(hdev);
3017 }
3018
3019 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
3020 {
3021         /* When using controller based address resolution, then the new
3022          * address types 0x02 and 0x03 are used. These types need to be
3023          * converted back into either public address or random address type
3024          */
3025         switch (type) {
3026         case ADDR_LE_DEV_PUBLIC_RESOLVED:
3027                 if (resolved)
3028                         *resolved = true;
3029                 return ADDR_LE_DEV_PUBLIC;
3030         case ADDR_LE_DEV_RANDOM_RESOLVED:
3031                 if (resolved)
3032                         *resolved = true;
3033                 return ADDR_LE_DEV_RANDOM;
3034         }
3035
3036         if (resolved)
3037                 *resolved = false;
3038         return type;
3039 }
3040
3041 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
3042                               u8 peer_addr_type, u8 own_address_type,
3043                               u8 filter_policy)
3044 {
3045         struct hci_conn *conn;
3046
3047         conn = hci_conn_hash_lookup_le(hdev, peer_addr,
3048                                        peer_addr_type);
3049         if (!conn)
3050                 return;
3051
3052         own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
3053
3054         /* Store the initiator and responder address information which
3055          * is needed for SMP. These values will not change during the
3056          * lifetime of the connection.
3057          */
3058         conn->init_addr_type = own_address_type;
3059         if (own_address_type == ADDR_LE_DEV_RANDOM)
3060                 bacpy(&conn->init_addr, &hdev->random_addr);
3061         else
3062                 bacpy(&conn->init_addr, &hdev->bdaddr);
3063
3064         conn->resp_addr_type = peer_addr_type;
3065         bacpy(&conn->resp_addr, peer_addr);
3066
3067         /* We don't want the connection attempt to stick around
3068          * indefinitely since LE doesn't have a page timeout concept
3069          * like BR/EDR. Set a timer for any connection that doesn't use
3070          * the accept list for connecting.
3071          */
3072         if (filter_policy == HCI_LE_USE_PEER_ADDR)
3073                 queue_delayed_work(conn->hdev->workqueue,
3074                                    &conn->le_conn_timeout,
3075                                    conn->conn_timeout);
3076 }
3077
3078 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
3079 {
3080         struct hci_cp_le_create_conn *cp;
3081
3082         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3083
3084         /* All connection failure handling is taken care of by the
3085          * hci_conn_failed function which is triggered by the HCI
3086          * request completion callbacks used for connecting.
3087          */
3088         if (status)
3089                 return;
3090
3091         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
3092         if (!cp)
3093                 return;
3094
3095         hci_dev_lock(hdev);
3096
3097         cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
3098                           cp->own_address_type, cp->filter_policy);
3099
3100         hci_dev_unlock(hdev);
3101 }
3102
3103 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
3104 {
3105         struct hci_cp_le_ext_create_conn *cp;
3106
3107         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3108
3109         /* All connection failure handling is taken care of by the
3110          * hci_conn_failed function which is triggered by the HCI
3111          * request completion callbacks used for connecting.
3112          */
3113         if (status)
3114                 return;
3115
3116         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
3117         if (!cp)
3118                 return;
3119
3120         hci_dev_lock(hdev);
3121
3122         cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
3123                           cp->own_addr_type, cp->filter_policy);
3124
3125         hci_dev_unlock(hdev);
3126 }
3127
3128 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
3129 {
3130         struct hci_cp_le_read_remote_features *cp;
3131         struct hci_conn *conn;
3132
3133         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3134
3135         if (!status)
3136                 return;
3137
3138         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
3139         if (!cp)
3140                 return;
3141
3142         hci_dev_lock(hdev);
3143
3144         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3145         if (conn) {
3146                 if (conn->state == BT_CONFIG) {
3147                         hci_connect_cfm(conn, status);
3148                         hci_conn_drop(conn);
3149                 }
3150         }
3151
3152         hci_dev_unlock(hdev);
3153 }
3154
3155 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
3156 {
3157         struct hci_cp_le_start_enc *cp;
3158         struct hci_conn *conn;
3159
3160         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3161
3162         if (!status)
3163                 return;
3164
3165         hci_dev_lock(hdev);
3166
3167         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
3168         if (!cp)
3169                 goto unlock;
3170
3171         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3172         if (!conn)
3173                 goto unlock;
3174
3175         if (conn->state != BT_CONNECTED)
3176                 goto unlock;
3177
3178         hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3179         hci_conn_drop(conn);
3180
3181 unlock:
3182         hci_dev_unlock(hdev);
3183 }
3184
3185 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
3186 {
3187         struct hci_cp_switch_role *cp;
3188         struct hci_conn *conn;
3189
3190         BT_DBG("%s status 0x%2.2x", hdev->name, status);
3191
3192         if (!status)
3193                 return;
3194
3195         cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
3196         if (!cp)
3197                 return;
3198
3199         hci_dev_lock(hdev);
3200
3201         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
3202         if (conn)
3203                 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3204
3205         hci_dev_unlock(hdev);
3206 }
3207
3208 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
3209                                      struct sk_buff *skb)
3210 {
3211         struct hci_ev_status *ev = data;
3212         struct discovery_state *discov = &hdev->discovery;
3213         struct inquiry_entry *e;
3214
3215         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3216
3217         hci_conn_check_pending(hdev);
3218
3219         if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
3220                 return;
3221
3222         smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
3223         wake_up_bit(&hdev->flags, HCI_INQUIRY);
3224
3225         if (!hci_dev_test_flag(hdev, HCI_MGMT))
3226                 return;
3227
3228         hci_dev_lock(hdev);
3229
3230         if (discov->state != DISCOVERY_FINDING)
3231                 goto unlock;
3232
3233         if (list_empty(&discov->resolve)) {
3234                 /* When BR/EDR inquiry is active and no LE scanning is in
3235                  * progress, then change discovery state to indicate completion.
3236                  *
3237                  * When running LE scanning and BR/EDR inquiry simultaneously
3238                  * and the LE scan already finished, then change the discovery
3239                  * state to indicate completion.
3240                  */
3241                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3242                     !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3243                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3244                 goto unlock;
3245         }
3246
3247         e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3248         if (e && hci_resolve_name(hdev, e) == 0) {
3249                 e->name_state = NAME_PENDING;
3250                 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3251                 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3252         } else {
3253                 /* When BR/EDR inquiry is active and no LE scanning is in
3254                  * progress, then change discovery state to indicate completion.
3255                  *
3256                  * When running LE scanning and BR/EDR inquiry simultaneously
3257                  * and the LE scan already finished, then change the discovery
3258                  * state to indicate completion.
3259                  */
3260                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3261                     !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3262                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3263         }
3264
3265 unlock:
3266         hci_dev_unlock(hdev);
3267 }
3268
3269 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3270                                    struct sk_buff *skb)
3271 {
3272         struct hci_ev_inquiry_result *ev = edata;
3273         struct inquiry_data data;
3274         int i;
3275
3276         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3277                              flex_array_size(ev, info, ev->num)))
3278                 return;
3279
3280         bt_dev_dbg(hdev, "num %d", ev->num);
3281
3282         if (!ev->num)
3283                 return;
3284
3285         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3286                 return;
3287
3288         hci_dev_lock(hdev);
3289
3290         for (i = 0; i < ev->num; i++) {
3291                 struct inquiry_info *info = &ev->info[i];
3292                 u32 flags;
3293
3294                 bacpy(&data.bdaddr, &info->bdaddr);
3295                 data.pscan_rep_mode     = info->pscan_rep_mode;
3296                 data.pscan_period_mode  = info->pscan_period_mode;
3297                 data.pscan_mode         = info->pscan_mode;
3298                 memcpy(data.dev_class, info->dev_class, 3);
3299                 data.clock_offset       = info->clock_offset;
3300                 data.rssi               = HCI_RSSI_INVALID;
3301                 data.ssp_mode           = 0x00;
3302
3303                 flags = hci_inquiry_cache_update(hdev, &data, false);
3304
3305                 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3306                                   info->dev_class, HCI_RSSI_INVALID,
3307                                   flags, NULL, 0, NULL, 0, 0);
3308         }
3309
3310         hci_dev_unlock(hdev);
3311 }
3312
3313 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3314                                   struct sk_buff *skb)
3315 {
3316         struct hci_ev_conn_complete *ev = data;
3317         struct hci_conn *conn;
3318         u8 status = ev->status;
3319
3320         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3321
3322         hci_dev_lock(hdev);
3323
3324         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3325         if (!conn) {
3326                 /* In case of error status and there is no connection pending
3327                  * just unlock as there is nothing to cleanup.
3328                  */
3329                 if (ev->status)
3330                         goto unlock;
3331
3332                 /* Connection may not exist if auto-connected. Check the bredr
3333                  * allowlist to see if this device is allowed to auto connect.
3334                  * If link is an ACL type, create a connection class
3335                  * automatically.
3336                  *
3337                  * Auto-connect will only occur if the event filter is
3338                  * programmed with a given address. Right now, event filter is
3339                  * only used during suspend.
3340                  */
3341                 if (ev->link_type == ACL_LINK &&
3342                     hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3343                                                       &ev->bdaddr,
3344                                                       BDADDR_BREDR)) {
3345                         conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3346                                             HCI_ROLE_SLAVE);
3347                         if (!conn) {
3348                                 bt_dev_err(hdev, "no memory for new conn");
3349                                 goto unlock;
3350                         }
3351                 } else {
3352                         if (ev->link_type != SCO_LINK)
3353                                 goto unlock;
3354
3355                         conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3356                                                        &ev->bdaddr);
3357                         if (!conn)
3358                                 goto unlock;
3359
3360                         conn->type = SCO_LINK;
3361                 }
3362         }
3363
3364         /* The HCI_Connection_Complete event is only sent once per connection.
3365          * Processing it more than once per connection can corrupt kernel memory.
3366          *
3367          * As the connection handle is set here for the first time, it indicates
3368          * whether the connection is already set up.
3369          */
3370         if (conn->handle != HCI_CONN_HANDLE_UNSET) {
3371                 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3372                 goto unlock;
3373         }
3374
3375         if (!status) {
3376                 conn->handle = __le16_to_cpu(ev->handle);
3377                 if (conn->handle > HCI_CONN_HANDLE_MAX) {
3378                         bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
3379                                    conn->handle, HCI_CONN_HANDLE_MAX);
3380                         status = HCI_ERROR_INVALID_PARAMETERS;
3381                         goto done;
3382                 }
3383
3384                 if (conn->type == ACL_LINK) {
3385                         conn->state = BT_CONFIG;
3386                         hci_conn_hold(conn);
3387
3388                         if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3389                             !hci_find_link_key(hdev, &ev->bdaddr))
3390                                 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3391                         else
3392                                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3393                 } else
3394                         conn->state = BT_CONNECTED;
3395
3396                 hci_debugfs_create_conn(conn);
3397                 hci_conn_add_sysfs(conn);
3398
3399                 if (test_bit(HCI_AUTH, &hdev->flags))
3400                         set_bit(HCI_CONN_AUTH, &conn->flags);
3401
3402                 if (test_bit(HCI_ENCRYPT, &hdev->flags))
3403                         set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3404
3405                 /* Get remote features */
3406                 if (conn->type == ACL_LINK) {
3407                         struct hci_cp_read_remote_features cp;
3408                         cp.handle = ev->handle;
3409                         hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3410                                      sizeof(cp), &cp);
3411
3412                         hci_update_scan(hdev);
3413                 }
3414
3415                 /* Set packet type for incoming connection */
3416                 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3417                         struct hci_cp_change_conn_ptype cp;
3418                         cp.handle = ev->handle;
3419                         cp.pkt_type = cpu_to_le16(conn->pkt_type);
3420                         hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3421                                      &cp);
3422                 }
3423
3424 #ifdef TIZEN_BT
3425                 if (get_link_mode(conn) & HCI_LM_MASTER)
3426                         hci_conn_change_supervision_timeout(conn,
3427                                         LINK_SUPERVISION_TIMEOUT);
3428 #endif
3429         }
3430
3431         if (conn->type == ACL_LINK)
3432                 hci_sco_setup(conn, ev->status);
3433
3434 done:
3435         if (status) {
3436                 hci_conn_failed(conn, status);
3437         } else if (ev->link_type == SCO_LINK) {
3438                 switch (conn->setting & SCO_AIRMODE_MASK) {
3439                 case SCO_AIRMODE_CVSD:
3440                         if (hdev->notify)
3441                                 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3442                         break;
3443                 }
3444
3445                 hci_connect_cfm(conn, status);
3446         }
3447
3448 unlock:
3449         hci_dev_unlock(hdev);
3450
3451         hci_conn_check_pending(hdev);
3452 }
3453
3454 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3455 {
3456         struct hci_cp_reject_conn_req cp;
3457
3458         bacpy(&cp.bdaddr, bdaddr);
3459         cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3460         hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3461 }
3462
3463 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3464                                  struct sk_buff *skb)
3465 {
3466         struct hci_ev_conn_request *ev = data;
3467         int mask = hdev->link_mode;
3468         struct inquiry_entry *ie;
3469         struct hci_conn *conn;
3470         __u8 flags = 0;
3471
3472         bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3473
3474         mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3475                                       &flags);
3476
3477         if (!(mask & HCI_LM_ACCEPT)) {
3478                 hci_reject_conn(hdev, &ev->bdaddr);
3479                 return;
3480         }
3481
3482         hci_dev_lock(hdev);
3483
3484         if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3485                                    BDADDR_BREDR)) {
3486                 hci_reject_conn(hdev, &ev->bdaddr);
3487                 goto unlock;
3488         }
3489
3490         /* Require HCI_CONNECTABLE or an accept list entry to accept the
3491          * connection. These features are only touched through mgmt so
3492          * only do the checks if HCI_MGMT is set.
3493          */
3494         if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3495             !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3496             !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3497                                                BDADDR_BREDR)) {
3498                 hci_reject_conn(hdev, &ev->bdaddr);
3499                 goto unlock;
3500         }
3501
3502         /* Connection accepted */
3503
3504         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3505         if (ie)
3506                 memcpy(ie->data.dev_class, ev->dev_class, 3);
3507
3508 #ifdef TIZEN_BT
3509                 if ((ev->link_type == SCO_LINK || ev->link_type == ESCO_LINK) &&
3510                     hci_conn_hash_lookup_sco(hdev)) {
3511                         struct hci_cp_reject_conn_req cp;
3512
3513                         bacpy(&cp.bdaddr, &ev->bdaddr);
3514                         cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
3515                         hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ,
3516                                      sizeof(cp), &cp);
3517                         hci_dev_unlock(hdev);
3518                         return;
3519                 }
3520 #endif
3521
3522         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3523                         &ev->bdaddr);
3524         if (!conn) {
3525                 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3526                                     HCI_ROLE_SLAVE);
3527                 if (!conn) {
3528                         bt_dev_err(hdev, "no memory for new connection");
3529                         goto unlock;
3530                 }
3531         }
3532
3533         memcpy(conn->dev_class, ev->dev_class, 3);
3534
3535         hci_dev_unlock(hdev);
3536
3537         if (ev->link_type == ACL_LINK ||
3538             (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3539                 struct hci_cp_accept_conn_req cp;
3540                 conn->state = BT_CONNECT;
3541
3542                 bacpy(&cp.bdaddr, &ev->bdaddr);
3543
3544                 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3545                         cp.role = 0x00; /* Become central */
3546                 else
3547                         cp.role = 0x01; /* Remain peripheral */
3548
3549                 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3550         } else if (!(flags & HCI_PROTO_DEFER)) {
3551                 struct hci_cp_accept_sync_conn_req cp;
3552                 conn->state = BT_CONNECT;
3553
3554                 bacpy(&cp.bdaddr, &ev->bdaddr);
3555                 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3556
3557                 cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
3558                 cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
3559                 cp.max_latency    = cpu_to_le16(0xffff);
3560                 cp.content_format = cpu_to_le16(hdev->voice_setting);
3561                 cp.retrans_effort = 0xff;
3562
3563                 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3564                              &cp);
3565         } else {
3566                 conn->state = BT_CONNECT2;
3567                 hci_connect_cfm(conn, 0);
3568         }
3569
3570         return;
3571 unlock:
3572         hci_dev_unlock(hdev);
3573 }
3574
3575 static u8 hci_to_mgmt_reason(u8 err)
3576 {
3577         switch (err) {
3578         case HCI_ERROR_CONNECTION_TIMEOUT:
3579                 return MGMT_DEV_DISCONN_TIMEOUT;
3580         case HCI_ERROR_REMOTE_USER_TERM:
3581         case HCI_ERROR_REMOTE_LOW_RESOURCES:
3582         case HCI_ERROR_REMOTE_POWER_OFF:
3583                 return MGMT_DEV_DISCONN_REMOTE;
3584         case HCI_ERROR_LOCAL_HOST_TERM:
3585                 return MGMT_DEV_DISCONN_LOCAL_HOST;
3586         default:
3587                 return MGMT_DEV_DISCONN_UNKNOWN;
3588         }
3589 }
3590
3591 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3592                                      struct sk_buff *skb)
3593 {
3594         struct hci_ev_disconn_complete *ev = data;
3595         u8 reason;
3596         struct hci_conn_params *params;
3597         struct hci_conn *conn;
3598         bool mgmt_connected;
3599
3600         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3601
3602         hci_dev_lock(hdev);
3603
3604         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3605         if (!conn)
3606                 goto unlock;
3607
3608         if (ev->status) {
3609                 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3610                                        conn->dst_type, ev->status);
3611                 goto unlock;
3612         }
3613
3614         conn->state = BT_CLOSED;
3615
3616         mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3617
3618         if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3619                 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3620         else
3621                 reason = hci_to_mgmt_reason(ev->reason);
3622
3623         mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3624                                 reason, mgmt_connected);
3625
3626         if (conn->type == ACL_LINK) {
3627                 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3628                         hci_remove_link_key(hdev, &conn->dst);
3629
3630                 hci_update_scan(hdev);
3631         }
3632
3633         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3634         if (params) {
3635                 switch (params->auto_connect) {
3636                 case HCI_AUTO_CONN_LINK_LOSS:
3637                         if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3638                                 break;
3639                         fallthrough;
3640
3641                 case HCI_AUTO_CONN_DIRECT:
3642                 case HCI_AUTO_CONN_ALWAYS:
3643                         list_del_init(&params->action);
3644                         list_add(&params->action, &hdev->pend_le_conns);
3645                         hci_update_passive_scan(hdev);
3646                         break;
3647
3648                 default:
3649                         break;
3650                 }
3651         }
3652
3653         hci_disconn_cfm(conn, ev->reason);
3654
3655         /* Re-enable advertising if necessary, since it might
3656          * have been disabled by the connection. From the
3657          * HCI_LE_Set_Advertise_Enable command description in
3658          * the core specification (v4.0):
3659          * "The Controller shall continue advertising until the Host
3660          * issues an LE_Set_Advertise_Enable command with
3661          * Advertising_Enable set to 0x00 (Advertising is disabled)
3662          * or until a connection is created or until the Advertising
3663          * is timed out due to Directed Advertising."
3664          */
3665         if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3666                 hdev->cur_adv_instance = conn->adv_instance;
3667                 hci_enable_advertising(hdev);
3668         }
3669
3670         hci_conn_del(conn);
3671
3672 #ifdef TIZEN_BT
3673         if (conn->type == ACL_LINK && !hci_conn_num(hdev, ACL_LINK)) {
3674                 int iscan;
3675                 int pscan;
3676
3677                 iscan = test_bit(HCI_ISCAN, &hdev->flags);
3678                 pscan = test_bit(HCI_PSCAN, &hdev->flags);
3679                 if (!iscan && !pscan) {
3680                         u8 scan_enable = SCAN_PAGE;
3681
3682                         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE,
3683                                      sizeof(scan_enable), &scan_enable);
3684                 }
3685         }
3686 #endif
3687
3688 unlock:
3689         hci_dev_unlock(hdev);
3690 }
3691
3692 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3693                                   struct sk_buff *skb)
3694 {
3695         struct hci_ev_auth_complete *ev = data;
3696         struct hci_conn *conn;
3697
3698         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3699
3700         hci_dev_lock(hdev);
3701
3702         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3703         if (!conn)
3704                 goto unlock;
3705
3706 #ifdef TIZEN_BT
3707         /*  PIN or Key Missing patch */
3708         BT_DBG("remote_auth %x, remote_cap %x, auth_type %x, io_capability %x",
3709                conn->remote_auth, conn->remote_cap,
3710                conn->auth_type, conn->io_capability);
3711
3712         if (ev->status == 0x06 && hci_conn_ssp_enabled(conn)) {
3713                 struct hci_cp_auth_requested cp;
3714
3715                 BT_DBG("Pin or key missing");
3716                 hci_remove_link_key(hdev, &conn->dst);
3717                 cp.handle = cpu_to_le16(conn->handle);
3718                 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
3719                              sizeof(cp), &cp);
3720                 goto unlock;
3721         }
3722 #endif
3723
3724         if (!ev->status) {
3725                 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3726
3727                 if (!hci_conn_ssp_enabled(conn) &&
3728                     test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
3729                         bt_dev_info(hdev, "re-auth of legacy device is not possible.");
3730                 } else {
3731                         set_bit(HCI_CONN_AUTH, &conn->flags);
3732                         conn->sec_level = conn->pending_sec_level;
3733                 }
3734         } else {
3735                 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3736                         set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3737
3738                 mgmt_auth_failed(conn, ev->status);
3739         }
3740
3741         clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3742         clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3743
3744         if (conn->state == BT_CONFIG) {
3745                 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3746                         struct hci_cp_set_conn_encrypt cp;
3747                         cp.handle  = ev->handle;
3748                         cp.encrypt = 0x01;
3749                         hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3750                                      &cp);
3751                 } else {
3752                         conn->state = BT_CONNECTED;
3753                         hci_connect_cfm(conn, ev->status);
3754                         hci_conn_drop(conn);
3755                 }
3756         } else {
3757                 hci_auth_cfm(conn, ev->status);
3758
3759                 hci_conn_hold(conn);
3760                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3761                 hci_conn_drop(conn);
3762         }
3763
3764         if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3765                 if (!ev->status) {
3766                         struct hci_cp_set_conn_encrypt cp;
3767                         cp.handle  = ev->handle;
3768                         cp.encrypt = 0x01;
3769                         hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3770                                      &cp);
3771                 } else {
3772                         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3773                         hci_encrypt_cfm(conn, ev->status);
3774                 }
3775         }
3776
3777 unlock:
3778         hci_dev_unlock(hdev);
3779 }
3780
3781 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3782                                 struct sk_buff *skb)
3783 {
3784         struct hci_ev_remote_name *ev = data;
3785         struct hci_conn *conn;
3786
3787         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3788
3789         hci_conn_check_pending(hdev);
3790
3791         hci_dev_lock(hdev);
3792
3793         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3794
3795         if (!hci_dev_test_flag(hdev, HCI_MGMT))
3796                 goto check_auth;
3797
3798         if (ev->status == 0)
3799                 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3800                                        strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3801         else
3802                 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3803
3804 check_auth:
3805         if (!conn)
3806                 goto unlock;
3807
3808         if (!hci_outgoing_auth_needed(hdev, conn))
3809                 goto unlock;
3810
3811         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3812                 struct hci_cp_auth_requested cp;
3813
3814                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3815
3816                 cp.handle = __cpu_to_le16(conn->handle);
3817                 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3818         }
3819
3820 unlock:
3821         hci_dev_unlock(hdev);
3822 }
3823
3824 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3825                                    struct sk_buff *skb)
3826 {
3827         struct hci_ev_encrypt_change *ev = data;
3828         struct hci_conn *conn;
3829
3830         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3831
3832         hci_dev_lock(hdev);
3833
3834         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3835         if (!conn)
3836                 goto unlock;
3837
3838         if (!ev->status) {
3839                 if (ev->encrypt) {
3840                         /* Encryption implies authentication */
3841                         set_bit(HCI_CONN_AUTH, &conn->flags);
3842                         set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3843                         conn->sec_level = conn->pending_sec_level;
3844
3845                         /* P-256 authentication key implies FIPS */
3846                         if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3847                                 set_bit(HCI_CONN_FIPS, &conn->flags);
3848
3849                         if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3850                             conn->type == LE_LINK)
3851                                 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3852                 } else {
3853                         clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3854                         clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3855                 }
3856         }
3857
3858         /* We should disregard the current RPA and generate a new one
3859          * whenever the encryption procedure fails.
3860          */
3861         if (ev->status && conn->type == LE_LINK) {
3862                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3863                 hci_adv_instances_set_rpa_expired(hdev, true);
3864         }
3865
3866         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3867
3868         /* Check link security requirements are met */
3869         if (!hci_conn_check_link_mode(conn))
3870                 ev->status = HCI_ERROR_AUTH_FAILURE;
3871
3872         if (ev->status && conn->state == BT_CONNECTED) {
3873                 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3874                         set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3875
3876                 /* Notify upper layers so they can cleanup before
3877                  * disconnecting.
3878                  */
3879                 hci_encrypt_cfm(conn, ev->status);
3880                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3881                 hci_conn_drop(conn);
3882                 goto unlock;
3883         }
3884
3885         /* Try reading the encryption key size for encrypted ACL links */
3886         if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3887                 struct hci_cp_read_enc_key_size cp;
3888
3889                 /* Only send HCI_Read_Encryption_Key_Size if the
3890                  * controller really supports it. If it doesn't, assume
3891                  * the default size (16).
3892                  */
3893                 if (!(hdev->commands[20] & 0x10)) {
3894                         conn->enc_key_size = HCI_LINK_KEY_SIZE;
3895                         goto notify;
3896                 }
3897
3898                 cp.handle = cpu_to_le16(conn->handle);
3899                 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3900                                  sizeof(cp), &cp)) {
3901                         bt_dev_err(hdev, "sending read key size failed");
3902                         conn->enc_key_size = HCI_LINK_KEY_SIZE;
3903                         goto notify;
3904                 }
3905
3906                 goto unlock;
3907         }
3908
3909         /* Set the default Authenticated Payload Timeout after
3910          * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3911          * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3912          * sent when the link is active and Encryption is enabled, the conn
3913          * type can be either LE or ACL and controller must support LMP Ping.
3914          * Ensure for AES-CCM encryption as well.
3915          */
3916         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3917             test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3918             ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3919              (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3920                 struct hci_cp_write_auth_payload_to cp;
3921
3922                 cp.handle = cpu_to_le16(conn->handle);
3923                 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3924                 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3925                              sizeof(cp), &cp);
3926         }
3927
3928 notify:
3929         hci_encrypt_cfm(conn, ev->status);
3930
3931 unlock:
3932         hci_dev_unlock(hdev);
3933 }
3934
3935 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3936                                              struct sk_buff *skb)
3937 {
3938         struct hci_ev_change_link_key_complete *ev = data;
3939         struct hci_conn *conn;
3940
3941         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3942
3943         hci_dev_lock(hdev);
3944
3945         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3946         if (conn) {
3947                 if (!ev->status)
3948                         set_bit(HCI_CONN_SECURE, &conn->flags);
3949
3950                 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3951
3952                 hci_key_change_cfm(conn, ev->status);
3953         }
3954
3955         hci_dev_unlock(hdev);
3956 }
3957
3958 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3959                                     struct sk_buff *skb)
3960 {
3961         struct hci_ev_remote_features *ev = data;
3962         struct hci_conn *conn;
3963
3964         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3965
3966         hci_dev_lock(hdev);
3967
3968         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3969         if (!conn)
3970                 goto unlock;
3971
3972         if (!ev->status)
3973                 memcpy(conn->features[0], ev->features, 8);
3974
3975         if (conn->state != BT_CONFIG)
3976                 goto unlock;
3977
3978         if (!ev->status && lmp_ext_feat_capable(hdev) &&
3979             lmp_ext_feat_capable(conn)) {
3980                 struct hci_cp_read_remote_ext_features cp;
3981                 cp.handle = ev->handle;
3982                 cp.page = 0x01;
3983                 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3984                              sizeof(cp), &cp);
3985                 goto unlock;
3986         }
3987
3988         if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3989                 struct hci_cp_remote_name_req cp;
3990                 memset(&cp, 0, sizeof(cp));
3991                 bacpy(&cp.bdaddr, &conn->dst);
3992                 cp.pscan_rep_mode = 0x02;
3993                 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3994         } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3995                 mgmt_device_connected(hdev, conn, NULL, 0);
3996
3997         if (!hci_outgoing_auth_needed(hdev, conn)) {
3998                 conn->state = BT_CONNECTED;
3999                 hci_connect_cfm(conn, ev->status);
4000                 hci_conn_drop(conn);
4001         }
4002
4003 unlock:
4004         hci_dev_unlock(hdev);
4005 }
4006
4007 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
4008 {
4009         cancel_delayed_work(&hdev->cmd_timer);
4010
4011         rcu_read_lock();
4012         if (!test_bit(HCI_RESET, &hdev->flags)) {
4013                 if (ncmd) {
4014                         cancel_delayed_work(&hdev->ncmd_timer);
4015                         atomic_set(&hdev->cmd_cnt, 1);
4016                 } else {
4017                         if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4018                                 queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
4019                                                    HCI_NCMD_TIMEOUT);
4020                 }
4021         }
4022         rcu_read_unlock();
4023 }
4024
4025 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
4026                                         struct sk_buff *skb)
4027 {
4028         struct hci_rp_le_read_buffer_size_v2 *rp = data;
4029
4030         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4031
4032         if (rp->status)
4033                 return rp->status;
4034
4035         hdev->le_mtu   = __le16_to_cpu(rp->acl_mtu);
4036         hdev->le_pkts  = rp->acl_max_pkt;
4037         hdev->iso_mtu  = __le16_to_cpu(rp->iso_mtu);
4038         hdev->iso_pkts = rp->iso_max_pkt;
4039
4040         hdev->le_cnt  = hdev->le_pkts;
4041         hdev->iso_cnt = hdev->iso_pkts;
4042
4043         BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
4044                hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
4045
4046         return rp->status;
4047 }
4048
4049 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
4050                                    struct sk_buff *skb)
4051 {
4052         struct hci_rp_le_set_cig_params *rp = data;
4053         struct hci_conn *conn;
4054         int i = 0;
4055
4056         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4057
4058         hci_dev_lock(hdev);
4059
4060         if (rp->status) {
4061                 while ((conn = hci_conn_hash_lookup_cig(hdev, rp->cig_id))) {
4062                         conn->state = BT_CLOSED;
4063                         hci_connect_cfm(conn, rp->status);
4064                         hci_conn_del(conn);
4065                 }
4066                 goto unlock;
4067         }
4068
4069         rcu_read_lock();
4070
4071         list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
4072                 if (conn->type != ISO_LINK || conn->iso_qos.cig != rp->cig_id ||
4073                     conn->state == BT_CONNECTED)
4074                         continue;
4075
4076                 conn->handle = __le16_to_cpu(rp->handle[i++]);
4077
4078                 bt_dev_dbg(hdev, "%p handle 0x%4.4x link %p", conn,
4079                            conn->handle, conn->link);
4080
4081                 /* Create CIS if LE is already connected */
4082                 if (conn->link && conn->link->state == BT_CONNECTED) {
4083                         rcu_read_unlock();
4084                         hci_le_create_cis(conn->link);
4085                         rcu_read_lock();
4086                 }
4087
4088                 if (i == rp->num_handles)
4089                         break;
4090         }
4091
4092         rcu_read_unlock();
4093
4094 unlock:
4095         hci_dev_unlock(hdev);
4096
4097         return rp->status;
4098 }
4099
4100 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
4101                                    struct sk_buff *skb)
4102 {
4103         struct hci_rp_le_setup_iso_path *rp = data;
4104         struct hci_cp_le_setup_iso_path *cp;
4105         struct hci_conn *conn;
4106
4107         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4108
4109         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
4110         if (!cp)
4111                 return rp->status;
4112
4113         hci_dev_lock(hdev);
4114
4115         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
4116         if (!conn)
4117                 goto unlock;
4118
4119         if (rp->status) {
4120                 hci_connect_cfm(conn, rp->status);
4121                 hci_conn_del(conn);
4122                 goto unlock;
4123         }
4124
4125         switch (cp->direction) {
4126         /* Input (Host to Controller) */
4127         case 0x00:
4128                 /* Only confirm connection if output only */
4129                 if (conn->iso_qos.out.sdu && !conn->iso_qos.in.sdu)
4130                         hci_connect_cfm(conn, rp->status);
4131                 break;
4132         /* Output (Controller to Host) */
4133         case 0x01:
4134                 /* Confirm connection since conn->iso_qos is always configured
4135                  * last.
4136                  */
4137                 hci_connect_cfm(conn, rp->status);
4138                 break;
4139         }
4140
4141 unlock:
4142         hci_dev_unlock(hdev);
4143         return rp->status;
4144 }
4145
4146 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
4147 {
4148         bt_dev_dbg(hdev, "status 0x%2.2x", status);
4149 }
4150
4151 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
4152                                    struct sk_buff *skb)
4153 {
4154         struct hci_ev_status *rp = data;
4155         struct hci_cp_le_set_per_adv_params *cp;
4156
4157         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4158
4159         if (rp->status)
4160                 return rp->status;
4161
4162         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
4163         if (!cp)
4164                 return rp->status;
4165
4166         /* TODO: set the conn state */
4167         return rp->status;
4168 }
4169
4170 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
4171                                        struct sk_buff *skb)
4172 {
4173         struct hci_ev_status *rp = data;
4174         __u8 *sent;
4175
4176         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4177
4178         if (rp->status)
4179                 return rp->status;
4180
4181         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
4182         if (!sent)
4183                 return rp->status;
4184
4185         hci_dev_lock(hdev);
4186
4187         if (*sent)
4188                 hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
4189         else
4190                 hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
4191
4192         hci_dev_unlock(hdev);
4193
4194         return rp->status;
4195 }
4196
4197 #define HCI_CC_VL(_op, _func, _min, _max) \
4198 { \
4199         .op = _op, \
4200         .func = _func, \
4201         .min_len = _min, \
4202         .max_len = _max, \
4203 }
4204
4205 #define HCI_CC(_op, _func, _len) \
4206         HCI_CC_VL(_op, _func, _len, _len)
4207
4208 #define HCI_CC_STATUS(_op, _func) \
4209         HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4210
4211 static const struct hci_cc {
4212         u16  op;
4213         u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4214         u16  min_len;
4215         u16  max_len;
4216 } hci_cc_table[] = {
4217         HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4218         HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4219         HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4220         HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
4221                       hci_cc_remote_name_req_cancel),
4222         HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4223                sizeof(struct hci_rp_role_discovery)),
4224         HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4225                sizeof(struct hci_rp_read_link_policy)),
4226         HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4227                sizeof(struct hci_rp_write_link_policy)),
4228         HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4229                sizeof(struct hci_rp_read_def_link_policy)),
4230         HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4231                       hci_cc_write_def_link_policy),
4232         HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4233         HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4234                sizeof(struct hci_rp_read_stored_link_key)),
4235         HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4236                sizeof(struct hci_rp_delete_stored_link_key)),
4237         HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4238         HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4239                sizeof(struct hci_rp_read_local_name)),
4240         HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4241         HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4242         HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4243         HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4244         HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4245                sizeof(struct hci_rp_read_class_of_dev)),
4246         HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4247         HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4248                sizeof(struct hci_rp_read_voice_setting)),
4249         HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4250         HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4251                sizeof(struct hci_rp_read_num_supported_iac)),
4252         HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4253         HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4254         HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4255                sizeof(struct hci_rp_read_auth_payload_to)),
4256         HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4257                sizeof(struct hci_rp_write_auth_payload_to)),
4258         HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4259                sizeof(struct hci_rp_read_local_version)),
4260         HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4261                sizeof(struct hci_rp_read_local_commands)),
4262         HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4263                sizeof(struct hci_rp_read_local_features)),
4264         HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4265                sizeof(struct hci_rp_read_local_ext_features)),
4266         HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4267                sizeof(struct hci_rp_read_buffer_size)),
4268         HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4269                sizeof(struct hci_rp_read_bd_addr)),
4270         HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4271                sizeof(struct hci_rp_read_local_pairing_opts)),
4272         HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4273                sizeof(struct hci_rp_read_page_scan_activity)),
4274         HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4275                       hci_cc_write_page_scan_activity),
4276         HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4277                sizeof(struct hci_rp_read_page_scan_type)),
4278         HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4279         HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
4280                sizeof(struct hci_rp_read_data_block_size)),
4281         HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
4282                sizeof(struct hci_rp_read_flow_control_mode)),
4283         HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
4284                sizeof(struct hci_rp_read_local_amp_info)),
4285         HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4286                sizeof(struct hci_rp_read_clock)),
4287         HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4288                sizeof(struct hci_rp_read_enc_key_size)),
4289         HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4290                sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4291         HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4292                hci_cc_read_def_err_data_reporting,
4293                sizeof(struct hci_rp_read_def_err_data_reporting)),
4294         HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4295                       hci_cc_write_def_err_data_reporting),
4296         HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4297                sizeof(struct hci_rp_pin_code_reply)),
4298         HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4299                sizeof(struct hci_rp_pin_code_neg_reply)),
4300         HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4301                sizeof(struct hci_rp_read_local_oob_data)),
4302         HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4303                sizeof(struct hci_rp_read_local_oob_ext_data)),
4304         HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4305                sizeof(struct hci_rp_le_read_buffer_size)),
4306         HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4307                sizeof(struct hci_rp_le_read_local_features)),
4308         HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4309                sizeof(struct hci_rp_le_read_adv_tx_power)),
4310         HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4311                sizeof(struct hci_rp_user_confirm_reply)),
4312         HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4313                sizeof(struct hci_rp_user_confirm_reply)),
4314         HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4315                sizeof(struct hci_rp_user_confirm_reply)),
4316         HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4317                sizeof(struct hci_rp_user_confirm_reply)),
4318         HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4319         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4320         HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4321         HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4322         HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4323                hci_cc_le_read_accept_list_size,
4324                sizeof(struct hci_rp_le_read_accept_list_size)),
4325         HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4326         HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4327                       hci_cc_le_add_to_accept_list),
4328         HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4329                       hci_cc_le_del_from_accept_list),
4330         HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4331                sizeof(struct hci_rp_le_read_supported_states)),
4332         HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4333                sizeof(struct hci_rp_le_read_def_data_len)),
4334         HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4335                       hci_cc_le_write_def_data_len),
4336         HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4337                       hci_cc_le_add_to_resolv_list),
4338         HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4339                       hci_cc_le_del_from_resolv_list),
4340         HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4341                       hci_cc_le_clear_resolv_list),
4342         HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4343                sizeof(struct hci_rp_le_read_resolv_list_size)),
4344         HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4345                       hci_cc_le_set_addr_resolution_enable),
4346         HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4347                sizeof(struct hci_rp_le_read_max_data_len)),
4348         HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4349                       hci_cc_write_le_host_supported),
4350         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4351         HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4352                sizeof(struct hci_rp_read_rssi)),
4353         HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4354                sizeof(struct hci_rp_read_tx_power)),
4355         HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4356         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4357                       hci_cc_le_set_ext_scan_param),
4358         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4359                       hci_cc_le_set_ext_scan_enable),
4360         HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4361         HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4362                hci_cc_le_read_num_adv_sets,
4363                sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4364         HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4365                sizeof(struct hci_rp_le_set_ext_adv_params)),
4366         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4367                       hci_cc_le_set_ext_adv_enable),
4368         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4369                       hci_cc_le_set_adv_set_random_addr),
4370         HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4371         HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4372         HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4373         HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4374                       hci_cc_le_set_per_adv_enable),
4375         HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4376                sizeof(struct hci_rp_le_read_transmit_power)),
4377 #ifdef TIZEN_BT
4378         HCI_CC(HCI_OP_ENABLE_RSSI, hci_cc_enable_rssi,
4379                sizeof(struct hci_cc_rsp_enable_rssi)),
4380         HCI_CC(HCI_OP_GET_RAW_RSSI, hci_cc_get_raw_rssi,
4381                sizeof(struct hci_cc_rp_get_raw_rssi)),
4382 #endif
4383         HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4384         HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4385                sizeof(struct hci_rp_le_read_buffer_size_v2)),
4386         HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4387                   sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4388         HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4389                sizeof(struct hci_rp_le_setup_iso_path)),
4390 };
4391
4392 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4393                       struct sk_buff *skb)
4394 {
4395         void *data;
4396
4397         if (skb->len < cc->min_len) {
4398                 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4399                            cc->op, skb->len, cc->min_len);
4400                 return HCI_ERROR_UNSPECIFIED;
4401         }
4402
4403         /* Just warn if the length is over max_len size it still be possible to
4404          * partially parse the cc so leave to callback to decide if that is
4405          * acceptable.
4406          */
4407         if (skb->len > cc->max_len)
4408                 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4409                             cc->op, skb->len, cc->max_len);
4410
4411         data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4412         if (!data)
4413                 return HCI_ERROR_UNSPECIFIED;
4414
4415         return cc->func(hdev, data, skb);
4416 }
4417
4418 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4419                                  struct sk_buff *skb, u16 *opcode, u8 *status,
4420                                  hci_req_complete_t *req_complete,
4421                                  hci_req_complete_skb_t *req_complete_skb)
4422 {
4423         struct hci_ev_cmd_complete *ev = data;
4424         int i;
4425
4426         *opcode = __le16_to_cpu(ev->opcode);
4427
4428         bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4429
4430         for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4431                 if (hci_cc_table[i].op == *opcode) {
4432                         *status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4433                         break;
4434                 }
4435         }
4436
4437         if (i == ARRAY_SIZE(hci_cc_table)) {
4438                 /* Unknown opcode, assume byte 0 contains the status, so
4439                  * that e.g. __hci_cmd_sync() properly returns errors
4440                  * for vendor specific commands send by HCI drivers.
4441                  * If a vendor doesn't actually follow this convention we may
4442                  * need to introduce a vendor CC table in order to properly set
4443                  * the status.
4444                  */
4445                 *status = skb->data[0];
4446         }
4447
4448         handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4449
4450         hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4451                              req_complete_skb);
4452
4453         if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4454                 bt_dev_err(hdev,
4455                            "unexpected event for opcode 0x%4.4x", *opcode);
4456                 return;
4457         }
4458
4459         if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4460                 queue_work(hdev->workqueue, &hdev->cmd_work);
4461 }
4462
4463 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4464 {
4465         struct hci_cp_le_create_cis *cp;
4466         int i;
4467
4468         bt_dev_dbg(hdev, "status 0x%2.2x", status);
4469
4470         if (!status)
4471                 return;
4472
4473         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4474         if (!cp)
4475                 return;
4476
4477         hci_dev_lock(hdev);
4478
4479         /* Remove connection if command failed */
4480         for (i = 0; cp->num_cis; cp->num_cis--, i++) {
4481                 struct hci_conn *conn;
4482                 u16 handle;
4483
4484                 handle = __le16_to_cpu(cp->cis[i].cis_handle);
4485
4486                 conn = hci_conn_hash_lookup_handle(hdev, handle);
4487                 if (conn) {
4488                         conn->state = BT_CLOSED;
4489                         hci_connect_cfm(conn, status);
4490                         hci_conn_del(conn);
4491                 }
4492         }
4493
4494         hci_dev_unlock(hdev);
4495 }
4496
4497 #define HCI_CS(_op, _func) \
4498 { \
4499         .op = _op, \
4500         .func = _func, \
4501 }
4502
4503 static const struct hci_cs {
4504         u16  op;
4505         void (*func)(struct hci_dev *hdev, __u8 status);
4506 } hci_cs_table[] = {
4507         HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4508         HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4509         HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4510         HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4511         HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4512         HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4513         HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4514         HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4515         HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4516                hci_cs_read_remote_ext_features),
4517         HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4518         HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4519                hci_cs_enhanced_setup_sync_conn),
4520         HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4521         HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4522         HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4523         HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4524         HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4525         HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4526         HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4527         HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4528         HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4529 };
4530
4531 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4532                                struct sk_buff *skb, u16 *opcode, u8 *status,
4533                                hci_req_complete_t *req_complete,
4534                                hci_req_complete_skb_t *req_complete_skb)
4535 {
4536         struct hci_ev_cmd_status *ev = data;
4537         int i;
4538
4539         *opcode = __le16_to_cpu(ev->opcode);
4540         *status = ev->status;
4541
4542         bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4543
4544         for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4545                 if (hci_cs_table[i].op == *opcode) {
4546                         hci_cs_table[i].func(hdev, ev->status);
4547                         break;
4548                 }
4549         }
4550
4551         handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4552
4553         /* Indicate request completion if the command failed. Also, if
4554          * we're not waiting for a special event and we get a success
4555          * command status we should try to flag the request as completed
4556          * (since for this kind of commands there will not be a command
4557          * complete event).
4558          */
4559         if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
4560                 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4561                                      req_complete_skb);
4562                 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4563                         bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4564                                    *opcode);
4565                         return;
4566                 }
4567         }
4568
4569         if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4570                 queue_work(hdev->workqueue, &hdev->cmd_work);
4571 }
4572
4573 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4574                                    struct sk_buff *skb)
4575 {
4576         struct hci_ev_hardware_error *ev = data;
4577
4578         bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4579
4580 #ifdef TIZEN_BT
4581         hci_dev_lock(hdev);
4582         mgmt_hardware_error(hdev, ev->code);
4583         hci_dev_unlock(hdev);
4584 #endif
4585         hdev->hw_error_code = ev->code;
4586
4587         queue_work(hdev->req_workqueue, &hdev->error_reset);
4588 }
4589
4590 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4591                                 struct sk_buff *skb)
4592 {
4593         struct hci_ev_role_change *ev = data;
4594         struct hci_conn *conn;
4595
4596         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4597
4598         hci_dev_lock(hdev);
4599
4600         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4601         if (conn) {
4602                 if (!ev->status)
4603                         conn->role = ev->role;
4604
4605                 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4606
4607                 hci_role_switch_cfm(conn, ev->status, ev->role);
4608 #ifdef TIZEN_BT
4609                 if (!ev->status && (get_link_mode(conn) & HCI_LM_MASTER))
4610                         hci_conn_change_supervision_timeout(conn,
4611                                         LINK_SUPERVISION_TIMEOUT);
4612 #endif
4613         }
4614
4615         hci_dev_unlock(hdev);
4616 }
4617
4618 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4619                                   struct sk_buff *skb)
4620 {
4621         struct hci_ev_num_comp_pkts *ev = data;
4622         int i;
4623
4624         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4625                              flex_array_size(ev, handles, ev->num)))
4626                 return;
4627
4628         if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4629                 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4630                 return;
4631         }
4632
4633         bt_dev_dbg(hdev, "num %d", ev->num);
4634
4635         for (i = 0; i < ev->num; i++) {
4636                 struct hci_comp_pkts_info *info = &ev->handles[i];
4637                 struct hci_conn *conn;
4638                 __u16  handle, count;
4639
4640                 handle = __le16_to_cpu(info->handle);
4641                 count  = __le16_to_cpu(info->count);
4642
4643                 conn = hci_conn_hash_lookup_handle(hdev, handle);
4644                 if (!conn)
4645                         continue;
4646
4647                 conn->sent -= count;
4648
4649                 switch (conn->type) {
4650                 case ACL_LINK:
4651                         hdev->acl_cnt += count;
4652                         if (hdev->acl_cnt > hdev->acl_pkts)
4653                                 hdev->acl_cnt = hdev->acl_pkts;
4654                         break;
4655
4656                 case LE_LINK:
4657                         if (hdev->le_pkts) {
4658                                 hdev->le_cnt += count;
4659                                 if (hdev->le_cnt > hdev->le_pkts)
4660                                         hdev->le_cnt = hdev->le_pkts;
4661                         } else {
4662                                 hdev->acl_cnt += count;
4663                                 if (hdev->acl_cnt > hdev->acl_pkts)
4664                                         hdev->acl_cnt = hdev->acl_pkts;
4665                         }
4666                         break;
4667
4668                 case SCO_LINK:
4669                         hdev->sco_cnt += count;
4670                         if (hdev->sco_cnt > hdev->sco_pkts)
4671                                 hdev->sco_cnt = hdev->sco_pkts;
4672                         break;
4673
4674                 case ISO_LINK:
4675                         if (hdev->iso_pkts) {
4676                                 hdev->iso_cnt += count;
4677                                 if (hdev->iso_cnt > hdev->iso_pkts)
4678                                         hdev->iso_cnt = hdev->iso_pkts;
4679                         } else if (hdev->le_pkts) {
4680                                 hdev->le_cnt += count;
4681                                 if (hdev->le_cnt > hdev->le_pkts)
4682                                         hdev->le_cnt = hdev->le_pkts;
4683                         } else {
4684                                 hdev->acl_cnt += count;
4685                                 if (hdev->acl_cnt > hdev->acl_pkts)
4686                                         hdev->acl_cnt = hdev->acl_pkts;
4687                         }
4688                         break;
4689
4690                 default:
4691                         bt_dev_err(hdev, "unknown type %d conn %p",
4692                                    conn->type, conn);
4693                         break;
4694                 }
4695         }
4696
4697         queue_work(hdev->workqueue, &hdev->tx_work);
4698 }
4699
4700 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4701                                                  __u16 handle)
4702 {
4703         struct hci_chan *chan;
4704
4705         switch (hdev->dev_type) {
4706         case HCI_PRIMARY:
4707                 return hci_conn_hash_lookup_handle(hdev, handle);
4708         case HCI_AMP:
4709                 chan = hci_chan_lookup_handle(hdev, handle);
4710                 if (chan)
4711                         return chan->conn;
4712                 break;
4713         default:
4714                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4715                 break;
4716         }
4717
4718         return NULL;
4719 }
4720
4721 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
4722                                     struct sk_buff *skb)
4723 {
4724         struct hci_ev_num_comp_blocks *ev = data;
4725         int i;
4726
4727         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
4728                              flex_array_size(ev, handles, ev->num_hndl)))
4729                 return;
4730
4731         if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4732                 bt_dev_err(hdev, "wrong event for mode %d",
4733                            hdev->flow_ctl_mode);
4734                 return;
4735         }
4736
4737         bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
4738                    ev->num_hndl);
4739
4740         for (i = 0; i < ev->num_hndl; i++) {
4741                 struct hci_comp_blocks_info *info = &ev->handles[i];
4742                 struct hci_conn *conn = NULL;
4743                 __u16  handle, block_count;
4744
4745                 handle = __le16_to_cpu(info->handle);
4746                 block_count = __le16_to_cpu(info->blocks);
4747
4748                 conn = __hci_conn_lookup_handle(hdev, handle);
4749                 if (!conn)
4750                         continue;
4751
4752                 conn->sent -= block_count;
4753
4754                 switch (conn->type) {
4755                 case ACL_LINK:
4756                 case AMP_LINK:
4757                         hdev->block_cnt += block_count;
4758                         if (hdev->block_cnt > hdev->num_blocks)
4759                                 hdev->block_cnt = hdev->num_blocks;
4760                         break;
4761
4762                 default:
4763                         bt_dev_err(hdev, "unknown type %d conn %p",
4764                                    conn->type, conn);
4765                         break;
4766                 }
4767         }
4768
4769         queue_work(hdev->workqueue, &hdev->tx_work);
4770 }
4771
4772 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4773                                 struct sk_buff *skb)
4774 {
4775         struct hci_ev_mode_change *ev = data;
4776         struct hci_conn *conn;
4777
4778         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4779
4780         hci_dev_lock(hdev);
4781
4782         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4783         if (conn) {
4784                 conn->mode = ev->mode;
4785
4786                 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4787                                         &conn->flags)) {
4788                         if (conn->mode == HCI_CM_ACTIVE)
4789                                 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4790                         else
4791                                 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4792                 }
4793
4794                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4795                         hci_sco_setup(conn, ev->status);
4796         }
4797
4798         hci_dev_unlock(hdev);
4799 }
4800
4801 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4802                                      struct sk_buff *skb)
4803 {
4804         struct hci_ev_pin_code_req *ev = data;
4805         struct hci_conn *conn;
4806
4807         bt_dev_dbg(hdev, "");
4808
4809         hci_dev_lock(hdev);
4810
4811         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4812         if (!conn)
4813                 goto unlock;
4814
4815         if (conn->state == BT_CONNECTED) {
4816                 hci_conn_hold(conn);
4817                 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4818                 hci_conn_drop(conn);
4819         }
4820
4821         if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4822             !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4823                 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4824                              sizeof(ev->bdaddr), &ev->bdaddr);
4825         } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4826                 u8 secure;
4827
4828                 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4829                         secure = 1;
4830                 else
4831                         secure = 0;
4832
4833                 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4834         }
4835
4836 unlock:
4837         hci_dev_unlock(hdev);
4838 }
4839
4840 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4841 {
4842         if (key_type == HCI_LK_CHANGED_COMBINATION)
4843                 return;
4844
4845         conn->pin_length = pin_len;
4846         conn->key_type = key_type;
4847
4848         switch (key_type) {
4849         case HCI_LK_LOCAL_UNIT:
4850         case HCI_LK_REMOTE_UNIT:
4851         case HCI_LK_DEBUG_COMBINATION:
4852                 return;
4853         case HCI_LK_COMBINATION:
4854                 if (pin_len == 16)
4855                         conn->pending_sec_level = BT_SECURITY_HIGH;
4856                 else
4857                         conn->pending_sec_level = BT_SECURITY_MEDIUM;
4858                 break;
4859         case HCI_LK_UNAUTH_COMBINATION_P192:
4860         case HCI_LK_UNAUTH_COMBINATION_P256:
4861                 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4862                 break;
4863         case HCI_LK_AUTH_COMBINATION_P192:
4864                 conn->pending_sec_level = BT_SECURITY_HIGH;
4865                 break;
4866         case HCI_LK_AUTH_COMBINATION_P256:
4867                 conn->pending_sec_level = BT_SECURITY_FIPS;
4868                 break;
4869         }
4870 }
4871
4872 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4873                                      struct sk_buff *skb)
4874 {
4875         struct hci_ev_link_key_req *ev = data;
4876         struct hci_cp_link_key_reply cp;
4877         struct hci_conn *conn;
4878         struct link_key *key;
4879
4880         bt_dev_dbg(hdev, "");
4881
4882         if (!hci_dev_test_flag(hdev, HCI_MGMT))
4883                 return;
4884
4885         hci_dev_lock(hdev);
4886
4887         key = hci_find_link_key(hdev, &ev->bdaddr);
4888         if (!key) {
4889                 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4890                 goto not_found;
4891         }
4892
4893         bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4894
4895         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4896         if (conn) {
4897                 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4898
4899                 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4900                      key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4901                     conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4902                         bt_dev_dbg(hdev, "ignoring unauthenticated key");
4903                         goto not_found;
4904                 }
4905
4906                 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4907                     (conn->pending_sec_level == BT_SECURITY_HIGH ||
4908                      conn->pending_sec_level == BT_SECURITY_FIPS)) {
4909                         bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4910                         goto not_found;
4911                 }
4912
4913                 conn_set_key(conn, key->type, key->pin_len);
4914         }
4915
4916         bacpy(&cp.bdaddr, &ev->bdaddr);
4917         memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4918
4919         hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4920
4921         hci_dev_unlock(hdev);
4922
4923         return;
4924
4925 not_found:
4926         hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4927         hci_dev_unlock(hdev);
4928 }
4929
4930 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4931                                     struct sk_buff *skb)
4932 {
4933         struct hci_ev_link_key_notify *ev = data;
4934         struct hci_conn *conn;
4935         struct link_key *key;
4936         bool persistent;
4937         u8 pin_len = 0;
4938
4939         bt_dev_dbg(hdev, "");
4940
4941         hci_dev_lock(hdev);
4942
4943         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4944         if (!conn)
4945                 goto unlock;
4946
4947         hci_conn_hold(conn);
4948         conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4949         hci_conn_drop(conn);
4950
4951         set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4952         conn_set_key(conn, ev->key_type, conn->pin_length);
4953
4954         if (!hci_dev_test_flag(hdev, HCI_MGMT))
4955                 goto unlock;
4956
4957         key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4958                                 ev->key_type, pin_len, &persistent);
4959         if (!key)
4960                 goto unlock;
4961
4962         /* Update connection information since adding the key will have
4963          * fixed up the type in the case of changed combination keys.
4964          */
4965         if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4966                 conn_set_key(conn, key->type, key->pin_len);
4967
4968         mgmt_new_link_key(hdev, key, persistent);
4969
4970         /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4971          * is set. If it's not set simply remove the key from the kernel
4972          * list (we've still notified user space about it but with
4973          * store_hint being 0).
4974          */
4975         if (key->type == HCI_LK_DEBUG_COMBINATION &&
4976             !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4977                 list_del_rcu(&key->list);
4978                 kfree_rcu(key, rcu);
4979                 goto unlock;
4980         }
4981
4982         if (persistent)
4983                 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4984         else
4985                 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4986
4987 unlock:
4988         hci_dev_unlock(hdev);
4989 }
4990
4991 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4992                                  struct sk_buff *skb)
4993 {
4994         struct hci_ev_clock_offset *ev = data;
4995         struct hci_conn *conn;
4996
4997         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4998
4999         hci_dev_lock(hdev);
5000
5001         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5002         if (conn && !ev->status) {
5003                 struct inquiry_entry *ie;
5004
5005                 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
5006                 if (ie) {
5007                         ie->data.clock_offset = ev->clock_offset;
5008                         ie->timestamp = jiffies;
5009                 }
5010         }
5011
5012         hci_dev_unlock(hdev);
5013 }
5014
5015 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
5016                                     struct sk_buff *skb)
5017 {
5018         struct hci_ev_pkt_type_change *ev = data;
5019         struct hci_conn *conn;
5020
5021         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5022
5023         hci_dev_lock(hdev);
5024
5025         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5026         if (conn && !ev->status)
5027                 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
5028
5029         hci_dev_unlock(hdev);
5030 }
5031
5032 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
5033                                    struct sk_buff *skb)
5034 {
5035         struct hci_ev_pscan_rep_mode *ev = data;
5036         struct inquiry_entry *ie;
5037
5038         bt_dev_dbg(hdev, "");
5039
5040         hci_dev_lock(hdev);
5041
5042         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5043         if (ie) {
5044                 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
5045                 ie->timestamp = jiffies;
5046         }
5047
5048         hci_dev_unlock(hdev);
5049 }
5050
5051 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
5052                                              struct sk_buff *skb)
5053 {
5054         struct hci_ev_inquiry_result_rssi *ev = edata;
5055         struct inquiry_data data;
5056         int i;
5057
5058         bt_dev_dbg(hdev, "num_rsp %d", ev->num);
5059
5060         if (!ev->num)
5061                 return;
5062
5063         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5064                 return;
5065
5066         hci_dev_lock(hdev);
5067
5068         if (skb->len == array_size(ev->num,
5069                                    sizeof(struct inquiry_info_rssi_pscan))) {
5070                 struct inquiry_info_rssi_pscan *info;
5071
5072                 for (i = 0; i < ev->num; i++) {
5073                         u32 flags;
5074
5075                         info = hci_ev_skb_pull(hdev, skb,
5076                                                HCI_EV_INQUIRY_RESULT_WITH_RSSI,
5077                                                sizeof(*info));
5078                         if (!info) {
5079                                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
5080                                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
5081                                 goto unlock;
5082                         }
5083
5084                         bacpy(&data.bdaddr, &info->bdaddr);
5085                         data.pscan_rep_mode     = info->pscan_rep_mode;
5086                         data.pscan_period_mode  = info->pscan_period_mode;
5087                         data.pscan_mode         = info->pscan_mode;
5088                         memcpy(data.dev_class, info->dev_class, 3);
5089                         data.clock_offset       = info->clock_offset;
5090                         data.rssi               = info->rssi;
5091                         data.ssp_mode           = 0x00;
5092
5093                         flags = hci_inquiry_cache_update(hdev, &data, false);
5094
5095                         mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5096                                           info->dev_class, info->rssi,
5097                                           flags, NULL, 0, NULL, 0, 0);
5098                 }
5099         } else if (skb->len == array_size(ev->num,
5100                                           sizeof(struct inquiry_info_rssi))) {
5101                 struct inquiry_info_rssi *info;
5102
5103                 for (i = 0; i < ev->num; i++) {
5104                         u32 flags;
5105
5106                         info = hci_ev_skb_pull(hdev, skb,
5107                                                HCI_EV_INQUIRY_RESULT_WITH_RSSI,
5108                                                sizeof(*info));
5109                         if (!info) {
5110                                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
5111                                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
5112                                 goto unlock;
5113                         }
5114
5115                         bacpy(&data.bdaddr, &info->bdaddr);
5116                         data.pscan_rep_mode     = info->pscan_rep_mode;
5117                         data.pscan_period_mode  = info->pscan_period_mode;
5118                         data.pscan_mode         = 0x00;
5119                         memcpy(data.dev_class, info->dev_class, 3);
5120                         data.clock_offset       = info->clock_offset;
5121                         data.rssi               = info->rssi;
5122                         data.ssp_mode           = 0x00;
5123
5124                         flags = hci_inquiry_cache_update(hdev, &data, false);
5125
5126                         mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5127                                           info->dev_class, info->rssi,
5128                                           flags, NULL, 0, NULL, 0, 0);
5129                 }
5130         } else {
5131                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
5132                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
5133         }
5134 unlock:
5135         hci_dev_unlock(hdev);
5136 }
5137
5138 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
5139                                         struct sk_buff *skb)
5140 {
5141         struct hci_ev_remote_ext_features *ev = data;
5142         struct hci_conn *conn;
5143
5144         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5145
5146         hci_dev_lock(hdev);
5147
5148         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5149         if (!conn)
5150                 goto unlock;
5151
5152         if (ev->page < HCI_MAX_PAGES)
5153                 memcpy(conn->features[ev->page], ev->features, 8);
5154
5155         if (!ev->status && ev->page == 0x01) {
5156                 struct inquiry_entry *ie;
5157
5158                 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
5159                 if (ie)
5160                         ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5161
5162                 if (ev->features[0] & LMP_HOST_SSP) {
5163                         set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5164                 } else {
5165                         /* It is mandatory by the Bluetooth specification that
5166                          * Extended Inquiry Results are only used when Secure
5167                          * Simple Pairing is enabled, but some devices violate
5168                          * this.
5169                          *
5170                          * To make these devices work, the internal SSP
5171                          * enabled flag needs to be cleared if the remote host
5172                          * features do not indicate SSP support */
5173                         clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5174                 }
5175
5176                 if (ev->features[0] & LMP_HOST_SC)
5177                         set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
5178         }
5179
5180         if (conn->state != BT_CONFIG)
5181                 goto unlock;
5182
5183         if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
5184                 struct hci_cp_remote_name_req cp;
5185                 memset(&cp, 0, sizeof(cp));
5186                 bacpy(&cp.bdaddr, &conn->dst);
5187                 cp.pscan_rep_mode = 0x02;
5188                 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
5189         } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5190                 mgmt_device_connected(hdev, conn, NULL, 0);
5191
5192         if (!hci_outgoing_auth_needed(hdev, conn)) {
5193                 conn->state = BT_CONNECTED;
5194                 hci_connect_cfm(conn, ev->status);
5195                 hci_conn_drop(conn);
5196         }
5197
5198 unlock:
5199         hci_dev_unlock(hdev);
5200 }
5201
5202 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
5203                                        struct sk_buff *skb)
5204 {
5205         struct hci_ev_sync_conn_complete *ev = data;
5206         struct hci_conn *conn;
5207         u8 status = ev->status;
5208
5209         switch (ev->link_type) {
5210         case SCO_LINK:
5211         case ESCO_LINK:
5212                 break;
5213         default:
5214                 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
5215                  * for HCI_Synchronous_Connection_Complete is limited to
5216                  * either SCO or eSCO
5217                  */
5218                 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
5219                 return;
5220         }
5221
5222         bt_dev_dbg(hdev, "status 0x%2.2x", status);
5223
5224         hci_dev_lock(hdev);
5225
5226         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
5227         if (!conn) {
5228                 if (ev->link_type == ESCO_LINK)
5229                         goto unlock;
5230
5231                 /* When the link type in the event indicates SCO connection
5232                  * and lookup of the connection object fails, then check
5233                  * if an eSCO connection object exists.
5234                  *
5235                  * The core limits the synchronous connections to either
5236                  * SCO or eSCO. The eSCO connection is preferred and tried
5237                  * to be setup first and until successfully established,
5238                  * the link type will be hinted as eSCO.
5239                  */
5240                 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
5241                 if (!conn)
5242                         goto unlock;
5243         }
5244
5245         /* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
5246          * Processing it more than once per connection can corrupt kernel memory.
5247          *
5248          * As the connection handle is set here for the first time, it indicates
5249          * whether the connection is already set up.
5250          */
5251         if (conn->handle != HCI_CONN_HANDLE_UNSET) {
5252                 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
5253                 goto unlock;
5254         }
5255
5256         switch (status) {
5257         case 0x00:
5258                 conn->handle = __le16_to_cpu(ev->handle);
5259                 if (conn->handle > HCI_CONN_HANDLE_MAX) {
5260                         bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
5261                                    conn->handle, HCI_CONN_HANDLE_MAX);
5262                         status = HCI_ERROR_INVALID_PARAMETERS;
5263                         conn->state = BT_CLOSED;
5264                         break;
5265                 }
5266
5267                 conn->state  = BT_CONNECTED;
5268                 conn->type   = ev->link_type;
5269
5270                 hci_debugfs_create_conn(conn);
5271                 hci_conn_add_sysfs(conn);
5272                 break;
5273
5274         case 0x10:      /* Connection Accept Timeout */
5275         case 0x0d:      /* Connection Rejected due to Limited Resources */
5276         case 0x11:      /* Unsupported Feature or Parameter Value */
5277         case 0x1c:      /* SCO interval rejected */
5278         case 0x1a:      /* Unsupported Remote Feature */
5279         case 0x1e:      /* Invalid LMP Parameters */
5280         case 0x1f:      /* Unspecified error */
5281         case 0x20:      /* Unsupported LMP Parameter value */
5282                 if (conn->out) {
5283                         conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5284                                         (hdev->esco_type & EDR_ESCO_MASK);
5285                         if (hci_setup_sync(conn, conn->link->handle))
5286                                 goto unlock;
5287                 }
5288                 fallthrough;
5289
5290         default:
5291                 conn->state = BT_CLOSED;
5292                 break;
5293         }
5294
5295         bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5296         /* Notify only in case of SCO over HCI transport data path which
5297          * is zero and non-zero value shall be non-HCI transport data path
5298          */
5299         if (conn->codec.data_path == 0 && hdev->notify) {
5300                 switch (ev->air_mode) {
5301                 case 0x02:
5302                         hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5303                         break;
5304                 case 0x03:
5305                         hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5306                         break;
5307                 }
5308         }
5309
5310         hci_connect_cfm(conn, status);
5311         if (status)
5312                 hci_conn_del(conn);
5313
5314 unlock:
5315         hci_dev_unlock(hdev);
5316 }
5317
5318 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5319 {
5320         size_t parsed = 0;
5321
5322         while (parsed < eir_len) {
5323                 u8 field_len = eir[0];
5324
5325                 if (field_len == 0)
5326                         return parsed;
5327
5328                 parsed += field_len + 1;
5329                 eir += field_len + 1;
5330         }
5331
5332         return eir_len;
5333 }
5334
5335 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5336                                             struct sk_buff *skb)
5337 {
5338         struct hci_ev_ext_inquiry_result *ev = edata;
5339         struct inquiry_data data;
5340         size_t eir_len;
5341         int i;
5342
5343         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5344                              flex_array_size(ev, info, ev->num)))
5345                 return;
5346
5347         bt_dev_dbg(hdev, "num %d", ev->num);
5348
5349         if (!ev->num)
5350                 return;
5351
5352         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5353                 return;
5354
5355         hci_dev_lock(hdev);
5356
5357         for (i = 0; i < ev->num; i++) {
5358                 struct extended_inquiry_info *info = &ev->info[i];
5359                 u32 flags;
5360                 bool name_known;
5361
5362                 bacpy(&data.bdaddr, &info->bdaddr);
5363                 data.pscan_rep_mode     = info->pscan_rep_mode;
5364                 data.pscan_period_mode  = info->pscan_period_mode;
5365                 data.pscan_mode         = 0x00;
5366                 memcpy(data.dev_class, info->dev_class, 3);
5367                 data.clock_offset       = info->clock_offset;
5368                 data.rssi               = info->rssi;
5369                 data.ssp_mode           = 0x01;
5370
5371                 if (hci_dev_test_flag(hdev, HCI_MGMT))
5372                         name_known = eir_get_data(info->data,
5373                                                   sizeof(info->data),
5374                                                   EIR_NAME_COMPLETE, NULL);
5375                 else
5376                         name_known = true;
5377
5378                 flags = hci_inquiry_cache_update(hdev, &data, name_known);
5379
5380                 eir_len = eir_get_length(info->data, sizeof(info->data));
5381
5382                 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5383                                   info->dev_class, info->rssi,
5384                                   flags, info->data, eir_len, NULL, 0, 0);
5385         }
5386
5387         hci_dev_unlock(hdev);
5388 }
5389
5390 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5391                                          struct sk_buff *skb)
5392 {
5393         struct hci_ev_key_refresh_complete *ev = data;
5394         struct hci_conn *conn;
5395
5396         bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5397                    __le16_to_cpu(ev->handle));
5398
5399         hci_dev_lock(hdev);
5400
5401         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5402         if (!conn)
5403                 goto unlock;
5404
5405         /* For BR/EDR the necessary steps are taken through the
5406          * auth_complete event.
5407          */
5408         if (conn->type != LE_LINK)
5409                 goto unlock;
5410
5411         if (!ev->status)
5412                 conn->sec_level = conn->pending_sec_level;
5413
5414         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5415
5416         if (ev->status && conn->state == BT_CONNECTED) {
5417                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5418                 hci_conn_drop(conn);
5419                 goto unlock;
5420         }
5421
5422         if (conn->state == BT_CONFIG) {
5423                 if (!ev->status)
5424                         conn->state = BT_CONNECTED;
5425
5426                 hci_connect_cfm(conn, ev->status);
5427                 hci_conn_drop(conn);
5428         } else {
5429                 hci_auth_cfm(conn, ev->status);
5430
5431                 hci_conn_hold(conn);
5432                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5433                 hci_conn_drop(conn);
5434         }
5435
5436 unlock:
5437         hci_dev_unlock(hdev);
5438 }
5439
5440 static u8 hci_get_auth_req(struct hci_conn *conn)
5441 {
5442 #ifdef TIZEN_BT
5443         if (conn->remote_auth == HCI_AT_GENERAL_BONDING_MITM) {
5444                 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5445                     conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5446                         return HCI_AT_GENERAL_BONDING_MITM;
5447         }
5448 #endif
5449
5450         /* If remote requests no-bonding follow that lead */
5451         if (conn->remote_auth == HCI_AT_NO_BONDING ||
5452             conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5453                 return conn->remote_auth | (conn->auth_type & 0x01);
5454
5455         /* If both remote and local have enough IO capabilities, require
5456          * MITM protection
5457          */
5458         if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5459             conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5460                 return conn->remote_auth | 0x01;
5461
5462         /* No MITM protection possible so ignore remote requirement */
5463         return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5464 }
5465
5466 static u8 bredr_oob_data_present(struct hci_conn *conn)
5467 {
5468         struct hci_dev *hdev = conn->hdev;
5469         struct oob_data *data;
5470
5471         data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5472         if (!data)
5473                 return 0x00;
5474
5475         if (bredr_sc_enabled(hdev)) {
5476                 /* When Secure Connections is enabled, then just
5477                  * return the present value stored with the OOB
5478                  * data. The stored value contains the right present
5479                  * information. However it can only be trusted when
5480                  * not in Secure Connection Only mode.
5481                  */
5482                 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5483                         return data->present;
5484
5485                 /* When Secure Connections Only mode is enabled, then
5486                  * the P-256 values are required. If they are not
5487                  * available, then do not declare that OOB data is
5488                  * present.
5489                  */
5490                 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
5491                     !memcmp(data->hash256, ZERO_KEY, 16))
5492                         return 0x00;
5493
5494                 return 0x02;
5495         }
5496
5497         /* When Secure Connections is not enabled or actually
5498          * not supported by the hardware, then check that if
5499          * P-192 data values are present.
5500          */
5501         if (!memcmp(data->rand192, ZERO_KEY, 16) ||
5502             !memcmp(data->hash192, ZERO_KEY, 16))
5503                 return 0x00;
5504
5505         return 0x01;
5506 }
5507
5508 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5509                                     struct sk_buff *skb)
5510 {
5511         struct hci_ev_io_capa_request *ev = data;
5512         struct hci_conn *conn;
5513
5514         bt_dev_dbg(hdev, "");
5515
5516         hci_dev_lock(hdev);
5517
5518         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5519         if (!conn)
5520                 goto unlock;
5521
5522         hci_conn_hold(conn);
5523
5524         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5525                 goto unlock;
5526
5527         /* Allow pairing if we're pairable, the initiators of the
5528          * pairing or if the remote is not requesting bonding.
5529          */
5530         if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5531             test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5532             (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5533                 struct hci_cp_io_capability_reply cp;
5534
5535                 bacpy(&cp.bdaddr, &ev->bdaddr);
5536                 /* Change the IO capability from KeyboardDisplay
5537                  * to DisplayYesNo as it is not supported by BT spec. */
5538                 cp.capability = (conn->io_capability == 0x04) ?
5539                                 HCI_IO_DISPLAY_YESNO : conn->io_capability;
5540
5541                 /* If we are initiators, there is no remote information yet */
5542                 if (conn->remote_auth == 0xff) {
5543                         /* Request MITM protection if our IO caps allow it
5544                          * except for the no-bonding case.
5545                          */
5546                         if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5547                             conn->auth_type != HCI_AT_NO_BONDING)
5548                                 conn->auth_type |= 0x01;
5549                 } else {
5550                         conn->auth_type = hci_get_auth_req(conn);
5551                 }
5552
5553                 /* If we're not bondable, force one of the non-bondable
5554                  * authentication requirement values.
5555                  */
5556                 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5557                         conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5558
5559                 cp.authentication = conn->auth_type;
5560                 cp.oob_data = bredr_oob_data_present(conn);
5561
5562                 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5563                              sizeof(cp), &cp);
5564         } else {
5565                 struct hci_cp_io_capability_neg_reply cp;
5566
5567                 bacpy(&cp.bdaddr, &ev->bdaddr);
5568                 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5569
5570                 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5571                              sizeof(cp), &cp);
5572         }
5573
5574 unlock:
5575         hci_dev_unlock(hdev);
5576 }
5577
5578 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5579                                   struct sk_buff *skb)
5580 {
5581         struct hci_ev_io_capa_reply *ev = data;
5582         struct hci_conn *conn;
5583
5584         bt_dev_dbg(hdev, "");
5585
5586         hci_dev_lock(hdev);
5587
5588         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5589         if (!conn)
5590                 goto unlock;
5591
5592         conn->remote_cap = ev->capability;
5593         conn->remote_auth = ev->authentication;
5594
5595 unlock:
5596         hci_dev_unlock(hdev);
5597 }
5598
5599 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5600                                          struct sk_buff *skb)
5601 {
5602         struct hci_ev_user_confirm_req *ev = data;
5603         int loc_mitm, rem_mitm, confirm_hint = 0;
5604         struct hci_conn *conn;
5605
5606         bt_dev_dbg(hdev, "");
5607
5608         hci_dev_lock(hdev);
5609
5610         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5611                 goto unlock;
5612
5613         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5614         if (!conn)
5615                 goto unlock;
5616
5617         loc_mitm = (conn->auth_type & 0x01);
5618         rem_mitm = (conn->remote_auth & 0x01);
5619
5620         /* If we require MITM but the remote device can't provide that
5621          * (it has NoInputNoOutput) then reject the confirmation
5622          * request. We check the security level here since it doesn't
5623          * necessarily match conn->auth_type.
5624          */
5625         if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5626             conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5627                 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5628                 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5629                              sizeof(ev->bdaddr), &ev->bdaddr);
5630                 goto unlock;
5631         }
5632
5633         /* If no side requires MITM protection; auto-accept */
5634         if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5635             (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5636
5637                 /* If we're not the initiators request authorization to
5638                  * proceed from user space (mgmt_user_confirm with
5639                  * confirm_hint set to 1). The exception is if neither
5640                  * side had MITM or if the local IO capability is
5641                  * NoInputNoOutput, in which case we do auto-accept
5642                  */
5643                 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5644                     conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5645                     (loc_mitm || rem_mitm)) {
5646                         bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5647                         confirm_hint = 1;
5648                         goto confirm;
5649                 }
5650
5651                 /* If there already exists link key in local host, leave the
5652                  * decision to user space since the remote device could be
5653                  * legitimate or malicious.
5654                  */
5655                 if (hci_find_link_key(hdev, &ev->bdaddr)) {
5656                         bt_dev_dbg(hdev, "Local host already has link key");
5657                         confirm_hint = 1;
5658                         goto confirm;
5659                 }
5660
5661                 BT_DBG("Auto-accept of user confirmation with %ums delay",
5662                        hdev->auto_accept_delay);
5663
5664                 if (hdev->auto_accept_delay > 0) {
5665                         int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5666                         queue_delayed_work(conn->hdev->workqueue,
5667                                            &conn->auto_accept_work, delay);
5668                         goto unlock;
5669                 }
5670
5671                 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5672                              sizeof(ev->bdaddr), &ev->bdaddr);
5673                 goto unlock;
5674         }
5675
5676 confirm:
5677         mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5678                                   le32_to_cpu(ev->passkey), confirm_hint);
5679
5680 unlock:
5681         hci_dev_unlock(hdev);
5682 }
5683
5684 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5685                                          struct sk_buff *skb)
5686 {
5687         struct hci_ev_user_passkey_req *ev = data;
5688
5689         bt_dev_dbg(hdev, "");
5690
5691         if (hci_dev_test_flag(hdev, HCI_MGMT))
5692                 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5693 }
5694
5695 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5696                                         struct sk_buff *skb)
5697 {
5698         struct hci_ev_user_passkey_notify *ev = data;
5699         struct hci_conn *conn;
5700
5701         bt_dev_dbg(hdev, "");
5702
5703         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5704         if (!conn)
5705                 return;
5706
5707         conn->passkey_notify = __le32_to_cpu(ev->passkey);
5708         conn->passkey_entered = 0;
5709
5710         if (hci_dev_test_flag(hdev, HCI_MGMT))
5711                 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5712                                          conn->dst_type, conn->passkey_notify,
5713                                          conn->passkey_entered);
5714 }
5715
5716 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5717                                     struct sk_buff *skb)
5718 {
5719         struct hci_ev_keypress_notify *ev = data;
5720         struct hci_conn *conn;
5721
5722         bt_dev_dbg(hdev, "");
5723
5724         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5725         if (!conn)
5726                 return;
5727
5728         switch (ev->type) {
5729         case HCI_KEYPRESS_STARTED:
5730                 conn->passkey_entered = 0;
5731                 return;
5732
5733         case HCI_KEYPRESS_ENTERED:
5734                 conn->passkey_entered++;
5735                 break;
5736
5737         case HCI_KEYPRESS_ERASED:
5738                 conn->passkey_entered--;
5739                 break;
5740
5741         case HCI_KEYPRESS_CLEARED:
5742                 conn->passkey_entered = 0;
5743                 break;
5744
5745         case HCI_KEYPRESS_COMPLETED:
5746                 return;
5747         }
5748
5749         if (hci_dev_test_flag(hdev, HCI_MGMT))
5750                 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5751                                          conn->dst_type, conn->passkey_notify,
5752                                          conn->passkey_entered);
5753 }
5754
5755 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5756                                          struct sk_buff *skb)
5757 {
5758         struct hci_ev_simple_pair_complete *ev = data;
5759         struct hci_conn *conn;
5760
5761         bt_dev_dbg(hdev, "");
5762
5763         hci_dev_lock(hdev);
5764
5765         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5766         if (!conn)
5767                 goto unlock;
5768
5769         /* Reset the authentication requirement to unknown */
5770         conn->remote_auth = 0xff;
5771
5772         /* To avoid duplicate auth_failed events to user space we check
5773          * the HCI_CONN_AUTH_PEND flag which will be set if we
5774          * initiated the authentication. A traditional auth_complete
5775          * event gets always produced as initiator and is also mapped to
5776          * the mgmt_auth_failed event */
5777         if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5778                 mgmt_auth_failed(conn, ev->status);
5779
5780         hci_conn_drop(conn);
5781
5782 unlock:
5783         hci_dev_unlock(hdev);
5784 }
5785
5786 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5787                                          struct sk_buff *skb)
5788 {
5789         struct hci_ev_remote_host_features *ev = data;
5790         struct inquiry_entry *ie;
5791         struct hci_conn *conn;
5792
5793         bt_dev_dbg(hdev, "");
5794
5795         hci_dev_lock(hdev);
5796
5797         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5798         if (conn)
5799                 memcpy(conn->features[1], ev->features, 8);
5800
5801         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5802         if (ie)
5803                 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5804
5805         hci_dev_unlock(hdev);
5806 }
5807
5808 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5809                                             struct sk_buff *skb)
5810 {
5811         struct hci_ev_remote_oob_data_request *ev = edata;
5812         struct oob_data *data;
5813
5814         bt_dev_dbg(hdev, "");
5815
5816         hci_dev_lock(hdev);
5817
5818         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5819                 goto unlock;
5820
5821         data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5822         if (!data) {
5823                 struct hci_cp_remote_oob_data_neg_reply cp;
5824
5825                 bacpy(&cp.bdaddr, &ev->bdaddr);
5826                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5827                              sizeof(cp), &cp);
5828                 goto unlock;
5829         }
5830
5831         if (bredr_sc_enabled(hdev)) {
5832                 struct hci_cp_remote_oob_ext_data_reply cp;
5833
5834                 bacpy(&cp.bdaddr, &ev->bdaddr);
5835                 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5836                         memset(cp.hash192, 0, sizeof(cp.hash192));
5837                         memset(cp.rand192, 0, sizeof(cp.rand192));
5838                 } else {
5839                         memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5840                         memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5841                 }
5842                 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5843                 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5844
5845                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5846                              sizeof(cp), &cp);
5847         } else {
5848                 struct hci_cp_remote_oob_data_reply cp;
5849
5850                 bacpy(&cp.bdaddr, &ev->bdaddr);
5851                 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5852                 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5853
5854                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5855                              sizeof(cp), &cp);
5856         }
5857
5858 unlock:
5859         hci_dev_unlock(hdev);
5860 }
5861
5862 #if IS_ENABLED(CONFIG_BT_HS)
5863 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
5864                                   struct sk_buff *skb)
5865 {
5866         struct hci_ev_channel_selected *ev = data;
5867         struct hci_conn *hcon;
5868
5869         bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
5870
5871         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5872         if (!hcon)
5873                 return;
5874
5875         amp_read_loc_assoc_final_data(hdev, hcon);
5876 }
5877
5878 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
5879                                       struct sk_buff *skb)
5880 {
5881         struct hci_ev_phy_link_complete *ev = data;
5882         struct hci_conn *hcon, *bredr_hcon;
5883
5884         bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
5885                    ev->status);
5886
5887         hci_dev_lock(hdev);
5888
5889         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5890         if (!hcon)
5891                 goto unlock;
5892
5893         if (!hcon->amp_mgr)
5894                 goto unlock;
5895
5896         if (ev->status) {
5897                 hci_conn_del(hcon);
5898                 goto unlock;
5899         }
5900
5901         bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5902
5903         hcon->state = BT_CONNECTED;
5904         bacpy(&hcon->dst, &bredr_hcon->dst);
5905
5906         hci_conn_hold(hcon);
5907         hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5908         hci_conn_drop(hcon);
5909
5910         hci_debugfs_create_conn(hcon);
5911         hci_conn_add_sysfs(hcon);
5912
5913         amp_physical_cfm(bredr_hcon, hcon);
5914
5915 unlock:
5916         hci_dev_unlock(hdev);
5917 }
5918
5919 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
5920                                      struct sk_buff *skb)
5921 {
5922         struct hci_ev_logical_link_complete *ev = data;
5923         struct hci_conn *hcon;
5924         struct hci_chan *hchan;
5925         struct amp_mgr *mgr;
5926
5927         bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5928                    le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
5929
5930         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5931         if (!hcon)
5932                 return;
5933
5934         /* Create AMP hchan */
5935         hchan = hci_chan_create(hcon);
5936         if (!hchan)
5937                 return;
5938
5939         hchan->handle = le16_to_cpu(ev->handle);
5940         hchan->amp = true;
5941
5942         BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5943
5944         mgr = hcon->amp_mgr;
5945         if (mgr && mgr->bredr_chan) {
5946                 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5947
5948                 l2cap_chan_lock(bredr_chan);
5949
5950                 bredr_chan->conn->mtu = hdev->block_mtu;
5951                 l2cap_logical_cfm(bredr_chan, hchan, 0);
5952                 hci_conn_hold(hcon);
5953
5954                 l2cap_chan_unlock(bredr_chan);
5955         }
5956 }
5957
5958 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
5959                                              struct sk_buff *skb)
5960 {
5961         struct hci_ev_disconn_logical_link_complete *ev = data;
5962         struct hci_chan *hchan;
5963
5964         bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
5965                    le16_to_cpu(ev->handle), ev->status);
5966
5967         if (ev->status)
5968                 return;
5969
5970         hci_dev_lock(hdev);
5971
5972         hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5973         if (!hchan || !hchan->amp)
5974                 goto unlock;
5975
5976         amp_destroy_logical_link(hchan, ev->reason);
5977
5978 unlock:
5979         hci_dev_unlock(hdev);
5980 }
5981
5982 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
5983                                              struct sk_buff *skb)
5984 {
5985         struct hci_ev_disconn_phy_link_complete *ev = data;
5986         struct hci_conn *hcon;
5987
5988         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5989
5990         if (ev->status)
5991                 return;
5992
5993         hci_dev_lock(hdev);
5994
5995         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5996         if (hcon && hcon->type == AMP_LINK) {
5997                 hcon->state = BT_CLOSED;
5998                 hci_disconn_cfm(hcon, ev->reason);
5999                 hci_conn_del(hcon);
6000         }
6001
6002         hci_dev_unlock(hdev);
6003 }
6004 #endif
6005
6006 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
6007                                 u8 bdaddr_type, bdaddr_t *local_rpa)
6008 {
6009         if (conn->out) {
6010                 conn->dst_type = bdaddr_type;
6011                 conn->resp_addr_type = bdaddr_type;
6012                 bacpy(&conn->resp_addr, bdaddr);
6013
6014                 /* Check if the controller has set a Local RPA then it must be
6015                  * used instead or hdev->rpa.
6016                  */
6017                 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
6018                         conn->init_addr_type = ADDR_LE_DEV_RANDOM;
6019                         bacpy(&conn->init_addr, local_rpa);
6020                 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
6021                         conn->init_addr_type = ADDR_LE_DEV_RANDOM;
6022                         bacpy(&conn->init_addr, &conn->hdev->rpa);
6023                 } else {
6024                         hci_copy_identity_address(conn->hdev, &conn->init_addr,
6025                                                   &conn->init_addr_type);
6026                 }
6027         } else {
6028                 conn->resp_addr_type = conn->hdev->adv_addr_type;
6029                 /* Check if the controller has set a Local RPA then it must be
6030                  * used instead or hdev->rpa.
6031                  */
6032                 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
6033                         conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
6034                         bacpy(&conn->resp_addr, local_rpa);
6035                 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
6036                         /* In case of ext adv, resp_addr will be updated in
6037                          * Adv Terminated event.
6038                          */
6039                         if (!ext_adv_capable(conn->hdev))
6040                                 bacpy(&conn->resp_addr,
6041                                       &conn->hdev->random_addr);
6042                 } else {
6043                         bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
6044                 }
6045
6046                 conn->init_addr_type = bdaddr_type;
6047                 bacpy(&conn->init_addr, bdaddr);
6048
6049                 /* For incoming connections, set the default minimum
6050                  * and maximum connection interval. They will be used
6051                  * to check if the parameters are in range and if not
6052                  * trigger the connection update procedure.
6053                  */
6054                 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
6055                 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
6056         }
6057 }
6058
6059 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
6060                                  bdaddr_t *bdaddr, u8 bdaddr_type,
6061                                  bdaddr_t *local_rpa, u8 role, u16 handle,
6062                                  u16 interval, u16 latency,
6063                                  u16 supervision_timeout)
6064 {
6065         struct hci_conn_params *params;
6066         struct hci_conn *conn;
6067         struct smp_irk *irk;
6068         u8 addr_type;
6069
6070         hci_dev_lock(hdev);
6071
6072         /* All controllers implicitly stop advertising in the event of a
6073          * connection, so ensure that the state bit is cleared.
6074          */
6075         hci_dev_clear_flag(hdev, HCI_LE_ADV);
6076
6077         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
6078         if (!conn) {
6079                 /* In case of error status and there is no connection pending
6080                  * just unlock as there is nothing to cleanup.
6081                  */
6082                 if (status)
6083                         goto unlock;
6084
6085                 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
6086                 if (!conn) {
6087                         bt_dev_err(hdev, "no memory for new connection");
6088                         goto unlock;
6089                 }
6090
6091                 conn->dst_type = bdaddr_type;
6092
6093                 /* If we didn't have a hci_conn object previously
6094                  * but we're in central role this must be something
6095                  * initiated using an accept list. Since accept list based
6096                  * connections are not "first class citizens" we don't
6097                  * have full tracking of them. Therefore, we go ahead
6098                  * with a "best effort" approach of determining the
6099                  * initiator address based on the HCI_PRIVACY flag.
6100                  */
6101                 if (conn->out) {
6102                         conn->resp_addr_type = bdaddr_type;
6103                         bacpy(&conn->resp_addr, bdaddr);
6104                         if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6105                                 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
6106                                 bacpy(&conn->init_addr, &hdev->rpa);
6107                         } else {
6108                                 hci_copy_identity_address(hdev,
6109                                                           &conn->init_addr,
6110                                                           &conn->init_addr_type);
6111                         }
6112                 }
6113         } else {
6114 #ifdef TIZEN_BT
6115                 /* LE auto connect */
6116                 bacpy(&conn->dst, bdaddr);
6117 #endif
6118                 cancel_delayed_work(&conn->le_conn_timeout);
6119         }
6120
6121         /* The HCI_LE_Connection_Complete event is only sent once per connection.
6122          * Processing it more than once per connection can corrupt kernel memory.
6123          *
6124          * As the connection handle is set here for the first time, it indicates
6125          * whether the connection is already set up.
6126          */
6127         if (conn->handle != HCI_CONN_HANDLE_UNSET) {
6128                 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
6129                 goto unlock;
6130         }
6131
6132         le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
6133
6134         /* Lookup the identity address from the stored connection
6135          * address and address type.
6136          *
6137          * When establishing connections to an identity address, the
6138          * connection procedure will store the resolvable random
6139          * address first. Now if it can be converted back into the
6140          * identity address, start using the identity address from
6141          * now on.
6142          */
6143         irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
6144         if (irk) {
6145                 bacpy(&conn->dst, &irk->bdaddr);
6146                 conn->dst_type = irk->addr_type;
6147         }
6148
6149         conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
6150
6151         if (handle > HCI_CONN_HANDLE_MAX) {
6152                 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle,
6153                            HCI_CONN_HANDLE_MAX);
6154                 status = HCI_ERROR_INVALID_PARAMETERS;
6155         }
6156
6157         /* All connection failure handling is taken care of by the
6158          * hci_conn_failed function which is triggered by the HCI
6159          * request completion callbacks used for connecting.
6160          */
6161         if (status)
6162                 goto unlock;
6163
6164         if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
6165                 addr_type = BDADDR_LE_PUBLIC;
6166         else
6167                 addr_type = BDADDR_LE_RANDOM;
6168
6169         /* Drop the connection if the device is blocked */
6170         if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
6171                 hci_conn_drop(conn);
6172                 goto unlock;
6173         }
6174
6175         if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
6176                 mgmt_device_connected(hdev, conn, NULL, 0);
6177
6178         conn->sec_level = BT_SECURITY_LOW;
6179         conn->handle = handle;
6180         conn->state = BT_CONFIG;
6181
6182         /* Store current advertising instance as connection advertising instance
6183          * when sotfware rotation is in use so it can be re-enabled when
6184          * disconnected.
6185          */
6186         if (!ext_adv_capable(hdev))
6187                 conn->adv_instance = hdev->cur_adv_instance;
6188
6189         conn->le_conn_interval = interval;
6190         conn->le_conn_latency = latency;
6191         conn->le_supv_timeout = supervision_timeout;
6192
6193         hci_debugfs_create_conn(conn);
6194         hci_conn_add_sysfs(conn);
6195
6196         /* The remote features procedure is defined for central
6197          * role only. So only in case of an initiated connection
6198          * request the remote features.
6199          *
6200          * If the local controller supports peripheral-initiated features
6201          * exchange, then requesting the remote features in peripheral
6202          * role is possible. Otherwise just transition into the
6203          * connected state without requesting the remote features.
6204          */
6205         if (conn->out ||
6206             (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
6207                 struct hci_cp_le_read_remote_features cp;
6208
6209                 cp.handle = __cpu_to_le16(conn->handle);
6210
6211                 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
6212                              sizeof(cp), &cp);
6213
6214                 hci_conn_hold(conn);
6215         } else {
6216                 conn->state = BT_CONNECTED;
6217                 hci_connect_cfm(conn, status);
6218         }
6219
6220         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
6221                                            conn->dst_type);
6222         if (params) {
6223                 list_del_init(&params->action);
6224                 if (params->conn) {
6225                         hci_conn_drop(params->conn);
6226                         hci_conn_put(params->conn);
6227                         params->conn = NULL;
6228                 }
6229         }
6230
6231 unlock:
6232         hci_update_passive_scan(hdev);
6233         hci_dev_unlock(hdev);
6234 }
6235
6236 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
6237                                      struct sk_buff *skb)
6238 {
6239         struct hci_ev_le_conn_complete *ev = data;
6240
6241         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6242
6243         le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6244                              NULL, ev->role, le16_to_cpu(ev->handle),
6245                              le16_to_cpu(ev->interval),
6246                              le16_to_cpu(ev->latency),
6247                              le16_to_cpu(ev->supervision_timeout));
6248 }
6249
6250 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
6251                                          struct sk_buff *skb)
6252 {
6253         struct hci_ev_le_enh_conn_complete *ev = data;
6254
6255         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6256
6257         le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6258                              &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
6259                              le16_to_cpu(ev->interval),
6260                              le16_to_cpu(ev->latency),
6261                              le16_to_cpu(ev->supervision_timeout));
6262 }
6263
6264 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
6265                                     struct sk_buff *skb)
6266 {
6267         struct hci_evt_le_ext_adv_set_term *ev = data;
6268         struct hci_conn *conn;
6269         struct adv_info *adv, *n;
6270
6271         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6272
6273         /* The Bluetooth Core 5.3 specification clearly states that this event
6274          * shall not be sent when the Host disables the advertising set. So in
6275          * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
6276          *
6277          * When the Host disables an advertising set, all cleanup is done via
6278          * its command callback and not needed to be duplicated here.
6279          */
6280         if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
6281                 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
6282                 return;
6283         }
6284
6285         hci_dev_lock(hdev);
6286
6287         adv = hci_find_adv_instance(hdev, ev->handle);
6288
6289         if (ev->status) {
6290                 if (!adv)
6291                         goto unlock;
6292
6293                 /* Remove advertising as it has been terminated */
6294                 hci_remove_adv_instance(hdev, ev->handle);
6295                 mgmt_advertising_removed(NULL, hdev, ev->handle);
6296
6297                 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
6298                         if (adv->enabled)
6299                                 goto unlock;
6300                 }
6301
6302                 /* We are no longer advertising, clear HCI_LE_ADV */
6303                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
6304                 goto unlock;
6305         }
6306
6307         if (adv)
6308                 adv->enabled = false;
6309
6310         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
6311         if (conn) {
6312                 /* Store handle in the connection so the correct advertising
6313                  * instance can be re-enabled when disconnected.
6314                  */
6315                 conn->adv_instance = ev->handle;
6316
6317                 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
6318                     bacmp(&conn->resp_addr, BDADDR_ANY))
6319                         goto unlock;
6320
6321                 if (!ev->handle) {
6322                         bacpy(&conn->resp_addr, &hdev->random_addr);
6323                         goto unlock;
6324                 }
6325
6326                 if (adv)
6327                         bacpy(&conn->resp_addr, &adv->random_addr);
6328         }
6329
6330 unlock:
6331         hci_dev_unlock(hdev);
6332 }
6333
6334 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
6335                                             struct sk_buff *skb)
6336 {
6337         struct hci_ev_le_conn_update_complete *ev = data;
6338         struct hci_conn *conn;
6339
6340         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6341
6342         if (ev->status)
6343                 return;
6344
6345         hci_dev_lock(hdev);
6346
6347         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6348         if (conn) {
6349 #ifdef TIZEN_BT
6350                 if (ev->status) {
6351                         hci_dev_unlock(hdev);
6352                         mgmt_le_conn_update_failed(hdev, &conn->dst,
6353                                 conn->type, conn->dst_type, ev->status);
6354                         return;
6355                 }
6356 #endif
6357                 conn->le_conn_interval = le16_to_cpu(ev->interval);
6358                 conn->le_conn_latency = le16_to_cpu(ev->latency);
6359                 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
6360         }
6361
6362         hci_dev_unlock(hdev);
6363
6364 #ifdef TIZEN_BT
6365         mgmt_le_conn_updated(hdev, &conn->dst, conn->type,
6366                                 conn->dst_type, conn->le_conn_interval,
6367                                 conn->le_conn_latency, conn->le_supv_timeout);
6368 #endif
6369 }
6370
6371 /* This function requires the caller holds hdev->lock */
6372 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
6373                                               bdaddr_t *addr,
6374                                               u8 addr_type, bool addr_resolved,
6375                                               u8 adv_type)
6376 {
6377         struct hci_conn *conn;
6378         struct hci_conn_params *params;
6379
6380         /* If the event is not connectable don't proceed further */
6381         if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
6382                 return NULL;
6383
6384         /* Ignore if the device is blocked or hdev is suspended */
6385         if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
6386             hdev->suspended)
6387                 return NULL;
6388
6389         /* Most controller will fail if we try to create new connections
6390          * while we have an existing one in peripheral role.
6391          */
6392         if (hdev->conn_hash.le_num_peripheral > 0 &&
6393             (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
6394              !(hdev->le_states[3] & 0x10)))
6395                 return NULL;
6396
6397         /* If we're not connectable only connect devices that we have in
6398          * our pend_le_conns list.
6399          */
6400         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
6401                                            addr_type);
6402         if (!params)
6403                 return NULL;
6404
6405         if (!params->explicit_connect) {
6406                 switch (params->auto_connect) {
6407                 case HCI_AUTO_CONN_DIRECT:
6408                         /* Only devices advertising with ADV_DIRECT_IND are
6409                          * triggering a connection attempt. This is allowing
6410                          * incoming connections from peripheral devices.
6411                          */
6412                         if (adv_type != LE_ADV_DIRECT_IND)
6413                                 return NULL;
6414                         break;
6415                 case HCI_AUTO_CONN_ALWAYS:
6416                         /* Devices advertising with ADV_IND or ADV_DIRECT_IND
6417                          * are triggering a connection attempt. This means
6418                          * that incoming connections from peripheral device are
6419                          * accepted and also outgoing connections to peripheral
6420                          * devices are established when found.
6421                          */
6422                         break;
6423                 default:
6424                         return NULL;
6425                 }
6426         }
6427
6428         conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
6429                               BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
6430                               HCI_ROLE_MASTER);
6431         if (!IS_ERR(conn)) {
6432                 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6433                  * by higher layer that tried to connect, if no then
6434                  * store the pointer since we don't really have any
6435                  * other owner of the object besides the params that
6436                  * triggered it. This way we can abort the connection if
6437                  * the parameters get removed and keep the reference
6438                  * count consistent once the connection is established.
6439                  */
6440
6441                 if (!params->explicit_connect)
6442                         params->conn = hci_conn_get(conn);
6443
6444                 return conn;
6445         }
6446
6447         switch (PTR_ERR(conn)) {
6448         case -EBUSY:
6449                 /* If hci_connect() returns -EBUSY it means there is already
6450                  * an LE connection attempt going on. Since controllers don't
6451                  * support more than one connection attempt at the time, we
6452                  * don't consider this an error case.
6453                  */
6454                 break;
6455         default:
6456                 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6457                 return NULL;
6458         }
6459
6460         return NULL;
6461 }
6462
6463 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6464                                u8 bdaddr_type, bdaddr_t *direct_addr,
6465                                u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
6466                                bool ext_adv, bool ctl_time, u64 instant)
6467 {
6468         struct discovery_state *d = &hdev->discovery;
6469         struct smp_irk *irk;
6470         struct hci_conn *conn;
6471         bool match, bdaddr_resolved;
6472         u32 flags;
6473         u8 *ptr;
6474
6475         switch (type) {
6476         case LE_ADV_IND:
6477         case LE_ADV_DIRECT_IND:
6478         case LE_ADV_SCAN_IND:
6479         case LE_ADV_NONCONN_IND:
6480         case LE_ADV_SCAN_RSP:
6481                 break;
6482         default:
6483                 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6484                                        "type: 0x%02x", type);
6485                 return;
6486         }
6487
6488         if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
6489                 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
6490                 return;
6491         }
6492
6493         /* Find the end of the data in case the report contains padded zero
6494          * bytes at the end causing an invalid length value.
6495          *
6496          * When data is NULL, len is 0 so there is no need for extra ptr
6497          * check as 'ptr < data + 0' is already false in such case.
6498          */
6499         for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6500                 if (ptr + 1 + *ptr > data + len)
6501                         break;
6502         }
6503
6504         /* Adjust for actual length. This handles the case when remote
6505          * device is advertising with incorrect data length.
6506          */
6507         len = ptr - data;
6508
6509         /* If the direct address is present, then this report is from
6510          * a LE Direct Advertising Report event. In that case it is
6511          * important to see if the address is matching the local
6512          * controller address.
6513          */
6514         if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6515                 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6516                                                   &bdaddr_resolved);
6517
6518                 /* Only resolvable random addresses are valid for these
6519                  * kind of reports and others can be ignored.
6520                  */
6521                 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6522                         return;
6523
6524                 /* If the controller is not using resolvable random
6525                  * addresses, then this report can be ignored.
6526                  */
6527                 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6528                         return;
6529
6530                 /* If the local IRK of the controller does not match
6531                  * with the resolvable random address provided, then
6532                  * this report can be ignored.
6533                  */
6534                 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6535                         return;
6536         }
6537
6538         /* Check if we need to convert to identity address */
6539         irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6540         if (irk) {
6541                 bdaddr = &irk->bdaddr;
6542                 bdaddr_type = irk->addr_type;
6543         }
6544
6545         bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6546
6547         /* Check if we have been requested to connect to this device.
6548          *
6549          * direct_addr is set only for directed advertising reports (it is NULL
6550          * for advertising reports) and is already verified to be RPA above.
6551          */
6552         conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6553                                      type);
6554         if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
6555                 /* Store report for later inclusion by
6556                  * mgmt_device_connected
6557                  */
6558                 memcpy(conn->le_adv_data, data, len);
6559                 conn->le_adv_data_len = len;
6560         }
6561
6562         if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6563                 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6564         else
6565                 flags = 0;
6566
6567         /* All scan results should be sent up for Mesh systems */
6568         if (hci_dev_test_flag(hdev, HCI_MESH)) {
6569                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6570                                   rssi, flags, data, len, NULL, 0, instant);
6571                 return;
6572         }
6573
6574         /* Passive scanning shouldn't trigger any device found events,
6575          * except for devices marked as CONN_REPORT for which we do send
6576          * device found events, or advertisement monitoring requested.
6577          */
6578         if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6579                 if (type == LE_ADV_DIRECT_IND)
6580                         return;
6581
6582 #ifndef TIZEN_BT
6583                 /* Handle all adv packet in platform */
6584                 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6585                                                bdaddr, bdaddr_type) &&
6586                     idr_is_empty(&hdev->adv_monitors_idr))
6587                         return;
6588 #endif
6589
6590 #ifdef TIZEN_BT
6591                 mgmt_le_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6592                                   rssi, flags, data, len, NULL, 0, type);
6593 #else
6594                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6595                                   rssi, flags, data, len, NULL, 0, 0);
6596 #endif
6597                 return;
6598         }
6599
6600         /* When receiving non-connectable or scannable undirected
6601          * advertising reports, this means that the remote device is
6602          * not connectable and then clearly indicate this in the
6603          * device found event.
6604          *
6605          * When receiving a scan response, then there is no way to
6606          * know if the remote device is connectable or not. However
6607          * since scan responses are merged with a previously seen
6608          * advertising report, the flags field from that report
6609          * will be used.
6610          *
6611          * In the really unlikely case that a controller get confused
6612          * and just sends a scan response event, then it is marked as
6613          * not connectable as well.
6614          */
6615         if (type == LE_ADV_SCAN_RSP)
6616                 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6617
6618 #ifdef TIZEN_BT
6619         /* Disable adv ind and scan rsp merging */
6620         mgmt_le_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6621                                   rssi, flags, data, len, NULL, 0, type);
6622 #else
6623         /* If there's nothing pending either store the data from this
6624          * event or send an immediate device found event if the data
6625          * should not be stored for later.
6626          */
6627         if (!ext_adv && !has_pending_adv_report(hdev)) {
6628                 /* If the report will trigger a SCAN_REQ store it for
6629                  * later merging.
6630                  */
6631                 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6632                         store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6633                                                  rssi, flags, data, len);
6634                         return;
6635                 }
6636
6637                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6638                                   rssi, flags, data, len, NULL, 0, 0);
6639                 return;
6640         }
6641
6642         /* Check if the pending report is for the same device as the new one */
6643         match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6644                  bdaddr_type == d->last_adv_addr_type);
6645
6646         /* If the pending data doesn't match this report or this isn't a
6647          * scan response (e.g. we got a duplicate ADV_IND) then force
6648          * sending of the pending data.
6649          */
6650         if (type != LE_ADV_SCAN_RSP || !match) {
6651                 /* Send out whatever is in the cache, but skip duplicates */
6652                 if (!match)
6653                         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6654                                           d->last_adv_addr_type, NULL,
6655                                           d->last_adv_rssi, d->last_adv_flags,
6656                                           d->last_adv_data,
6657                                           d->last_adv_data_len, NULL, 0, 0);
6658
6659                 /* If the new report will trigger a SCAN_REQ store it for
6660                  * later merging.
6661                  */
6662                 if (!ext_adv && (type == LE_ADV_IND ||
6663                                  type == LE_ADV_SCAN_IND)) {
6664                         store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6665                                                  rssi, flags, data, len);
6666                         return;
6667                 }
6668
6669                 /* The advertising reports cannot be merged, so clear
6670                  * the pending report and send out a device found event.
6671                  */
6672                 clear_pending_adv_report(hdev);
6673                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6674                                   rssi, flags, data, len, NULL, 0, 0);
6675                 return;
6676         }
6677
6678         /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6679          * the new event is a SCAN_RSP. We can therefore proceed with
6680          * sending a merged device found event.
6681          */
6682         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6683                           d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6684                           d->last_adv_data, d->last_adv_data_len, data, len, 0);
6685         clear_pending_adv_report(hdev);
6686 #endif
6687 }
6688
6689 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6690                                   struct sk_buff *skb)
6691 {
6692         struct hci_ev_le_advertising_report *ev = data;
6693         u64 instant = jiffies;
6694
6695         if (!ev->num)
6696                 return;
6697
6698         hci_dev_lock(hdev);
6699
6700         while (ev->num--) {
6701                 struct hci_ev_le_advertising_info *info;
6702                 s8 rssi;
6703
6704                 info = hci_le_ev_skb_pull(hdev, skb,
6705                                           HCI_EV_LE_ADVERTISING_REPORT,
6706                                           sizeof(*info));
6707                 if (!info)
6708                         break;
6709
6710                 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6711                                         info->length + 1))
6712                         break;
6713
6714                 if (info->length <= HCI_MAX_AD_LENGTH) {
6715                         rssi = info->data[info->length];
6716                         process_adv_report(hdev, info->type, &info->bdaddr,
6717                                            info->bdaddr_type, NULL, 0, rssi,
6718                                            info->data, info->length, false,
6719                                            false, instant);
6720                 } else {
6721                         bt_dev_err(hdev, "Dropping invalid advertising data");
6722                 }
6723         }
6724
6725         hci_dev_unlock(hdev);
6726 }
6727
6728 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6729 {
6730         if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6731                 switch (evt_type) {
6732                 case LE_LEGACY_ADV_IND:
6733                         return LE_ADV_IND;
6734                 case LE_LEGACY_ADV_DIRECT_IND:
6735                         return LE_ADV_DIRECT_IND;
6736                 case LE_LEGACY_ADV_SCAN_IND:
6737                         return LE_ADV_SCAN_IND;
6738                 case LE_LEGACY_NONCONN_IND:
6739                         return LE_ADV_NONCONN_IND;
6740                 case LE_LEGACY_SCAN_RSP_ADV:
6741                 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6742                         return LE_ADV_SCAN_RSP;
6743                 }
6744
6745                 goto invalid;
6746         }
6747
6748         if (evt_type & LE_EXT_ADV_CONN_IND) {
6749                 if (evt_type & LE_EXT_ADV_DIRECT_IND)
6750                         return LE_ADV_DIRECT_IND;
6751
6752                 return LE_ADV_IND;
6753         }
6754
6755         if (evt_type & LE_EXT_ADV_SCAN_RSP)
6756                 return LE_ADV_SCAN_RSP;
6757
6758         if (evt_type & LE_EXT_ADV_SCAN_IND)
6759                 return LE_ADV_SCAN_IND;
6760
6761         if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6762             evt_type & LE_EXT_ADV_DIRECT_IND)
6763                 return LE_ADV_NONCONN_IND;
6764
6765 invalid:
6766         bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6767                                evt_type);
6768
6769         return LE_ADV_INVALID;
6770 }
6771
6772 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6773                                       struct sk_buff *skb)
6774 {
6775         struct hci_ev_le_ext_adv_report *ev = data;
6776         u64 instant = jiffies;
6777
6778         if (!ev->num)
6779                 return;
6780
6781         hci_dev_lock(hdev);
6782
6783         while (ev->num--) {
6784                 struct hci_ev_le_ext_adv_info *info;
6785                 u8 legacy_evt_type;
6786                 u16 evt_type;
6787
6788                 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6789                                           sizeof(*info));
6790                 if (!info)
6791                         break;
6792
6793                 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6794                                         info->length))
6795                         break;
6796
6797                 evt_type = __le16_to_cpu(info->type);
6798                 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6799                 if (legacy_evt_type != LE_ADV_INVALID) {
6800                         process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6801                                            info->bdaddr_type, NULL, 0,
6802                                            info->rssi, info->data, info->length,
6803                                            !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6804                                            false, instant);
6805                 }
6806         }
6807
6808         hci_dev_unlock(hdev);
6809 }
6810
6811 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6812 {
6813         struct hci_cp_le_pa_term_sync cp;
6814
6815         memset(&cp, 0, sizeof(cp));
6816         cp.handle = handle;
6817
6818         return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6819 }
6820
6821 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6822                                             struct sk_buff *skb)
6823 {
6824         struct hci_ev_le_pa_sync_established *ev = data;
6825         int mask = hdev->link_mode;
6826         __u8 flags = 0;
6827
6828         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6829
6830         if (ev->status)
6831                 return;
6832
6833         hci_dev_lock(hdev);
6834
6835         hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6836
6837         mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6838         if (!(mask & HCI_LM_ACCEPT))
6839                 hci_le_pa_term_sync(hdev, ev->handle);
6840
6841         hci_dev_unlock(hdev);
6842 }
6843
6844 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6845                                             struct sk_buff *skb)
6846 {
6847         struct hci_ev_le_remote_feat_complete *ev = data;
6848         struct hci_conn *conn;
6849
6850         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6851
6852         hci_dev_lock(hdev);
6853
6854         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6855         if (conn) {
6856                 if (!ev->status)
6857                         memcpy(conn->features[0], ev->features, 8);
6858
6859                 if (conn->state == BT_CONFIG) {
6860                         __u8 status;
6861
6862                         /* If the local controller supports peripheral-initiated
6863                          * features exchange, but the remote controller does
6864                          * not, then it is possible that the error code 0x1a
6865                          * for unsupported remote feature gets returned.
6866                          *
6867                          * In this specific case, allow the connection to
6868                          * transition into connected state and mark it as
6869                          * successful.
6870                          */
6871                         if (!conn->out && ev->status == 0x1a &&
6872                             (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6873                                 status = 0x00;
6874                         else
6875                                 status = ev->status;
6876
6877                         conn->state = BT_CONNECTED;
6878                         hci_connect_cfm(conn, status);
6879                         hci_conn_drop(conn);
6880                 }
6881         }
6882
6883         hci_dev_unlock(hdev);
6884 }
6885
6886 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6887                                    struct sk_buff *skb)
6888 {
6889         struct hci_ev_le_ltk_req *ev = data;
6890         struct hci_cp_le_ltk_reply cp;
6891         struct hci_cp_le_ltk_neg_reply neg;
6892         struct hci_conn *conn;
6893         struct smp_ltk *ltk;
6894
6895         bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6896
6897         hci_dev_lock(hdev);
6898
6899         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6900         if (conn == NULL)
6901                 goto not_found;
6902
6903         ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6904         if (!ltk)
6905                 goto not_found;
6906
6907         if (smp_ltk_is_sc(ltk)) {
6908                 /* With SC both EDiv and Rand are set to zero */
6909                 if (ev->ediv || ev->rand)
6910                         goto not_found;
6911         } else {
6912                 /* For non-SC keys check that EDiv and Rand match */
6913                 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6914                         goto not_found;
6915         }
6916
6917         memcpy(cp.ltk, ltk->val, ltk->enc_size);
6918         memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6919         cp.handle = cpu_to_le16(conn->handle);
6920
6921         conn->pending_sec_level = smp_ltk_sec_level(ltk);
6922
6923         conn->enc_key_size = ltk->enc_size;
6924
6925         hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6926
6927         /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6928          * temporary key used to encrypt a connection following
6929          * pairing. It is used during the Encrypted Session Setup to
6930          * distribute the keys. Later, security can be re-established
6931          * using a distributed LTK.
6932          */
6933         if (ltk->type == SMP_STK) {
6934                 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6935                 list_del_rcu(&ltk->list);
6936                 kfree_rcu(ltk, rcu);
6937         } else {
6938                 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6939         }
6940
6941         hci_dev_unlock(hdev);
6942
6943         return;
6944
6945 not_found:
6946         neg.handle = ev->handle;
6947         hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6948         hci_dev_unlock(hdev);
6949 }
6950
6951 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6952                                       u8 reason)
6953 {
6954         struct hci_cp_le_conn_param_req_neg_reply cp;
6955
6956         cp.handle = cpu_to_le16(handle);
6957         cp.reason = reason;
6958
6959         hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6960                      &cp);
6961 }
6962
6963 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6964                                              struct sk_buff *skb)
6965 {
6966         struct hci_ev_le_remote_conn_param_req *ev = data;
6967         struct hci_cp_le_conn_param_req_reply cp;
6968         struct hci_conn *hcon;
6969         u16 handle, min, max, latency, timeout;
6970
6971         bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6972
6973         handle = le16_to_cpu(ev->handle);
6974         min = le16_to_cpu(ev->interval_min);
6975         max = le16_to_cpu(ev->interval_max);
6976         latency = le16_to_cpu(ev->latency);
6977         timeout = le16_to_cpu(ev->timeout);
6978
6979         hcon = hci_conn_hash_lookup_handle(hdev, handle);
6980         if (!hcon || hcon->state != BT_CONNECTED)
6981                 return send_conn_param_neg_reply(hdev, handle,
6982                                                  HCI_ERROR_UNKNOWN_CONN_ID);
6983
6984         if (hci_check_conn_params(min, max, latency, timeout))
6985                 return send_conn_param_neg_reply(hdev, handle,
6986                                                  HCI_ERROR_INVALID_LL_PARAMS);
6987
6988         if (hcon->role == HCI_ROLE_MASTER) {
6989                 struct hci_conn_params *params;
6990                 u8 store_hint;
6991
6992                 hci_dev_lock(hdev);
6993
6994                 params = hci_conn_params_lookup(hdev, &hcon->dst,
6995                                                 hcon->dst_type);
6996                 if (params) {
6997                         params->conn_min_interval = min;
6998                         params->conn_max_interval = max;
6999                         params->conn_latency = latency;
7000                         params->supervision_timeout = timeout;
7001                         store_hint = 0x01;
7002                 } else {
7003                         store_hint = 0x00;
7004                 }
7005
7006                 hci_dev_unlock(hdev);
7007
7008                 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
7009                                     store_hint, min, max, latency, timeout);
7010         }
7011
7012         cp.handle = ev->handle;
7013         cp.interval_min = ev->interval_min;
7014         cp.interval_max = ev->interval_max;
7015         cp.latency = ev->latency;
7016         cp.timeout = ev->timeout;
7017         cp.min_ce_len = 0;
7018         cp.max_ce_len = 0;
7019
7020         hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
7021 }
7022
7023 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
7024                                          struct sk_buff *skb)
7025 {
7026         struct hci_ev_le_direct_adv_report *ev = data;
7027         u64 instant = jiffies;
7028         int i;
7029
7030         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
7031                                 flex_array_size(ev, info, ev->num)))
7032                 return;
7033
7034         if (!ev->num)
7035                 return;
7036
7037         hci_dev_lock(hdev);
7038
7039         for (i = 0; i < ev->num; i++) {
7040                 struct hci_ev_le_direct_adv_info *info = &ev->info[i];
7041
7042                 process_adv_report(hdev, info->type, &info->bdaddr,
7043                                    info->bdaddr_type, &info->direct_addr,
7044                                    info->direct_addr_type, info->rssi, NULL, 0,
7045                                    false, false, instant);
7046         }
7047
7048         hci_dev_unlock(hdev);
7049 }
7050
7051 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
7052                                   struct sk_buff *skb)
7053 {
7054         struct hci_ev_le_phy_update_complete *ev = data;
7055         struct hci_conn *conn;
7056
7057         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7058
7059         if (ev->status)
7060                 return;
7061
7062         hci_dev_lock(hdev);
7063
7064         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
7065         if (!conn)
7066                 goto unlock;
7067
7068         conn->le_tx_phy = ev->tx_phy;
7069         conn->le_rx_phy = ev->rx_phy;
7070
7071 unlock:
7072         hci_dev_unlock(hdev);
7073 }
7074
7075 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
7076                                         struct sk_buff *skb)
7077 {
7078         struct hci_evt_le_cis_established *ev = data;
7079         struct hci_conn *conn;
7080         u16 handle = __le16_to_cpu(ev->handle);
7081
7082         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7083
7084         hci_dev_lock(hdev);
7085
7086         conn = hci_conn_hash_lookup_handle(hdev, handle);
7087         if (!conn) {
7088                 bt_dev_err(hdev,
7089                            "Unable to find connection with handle 0x%4.4x",
7090                            handle);
7091                 goto unlock;
7092         }
7093
7094         if (conn->type != ISO_LINK) {
7095                 bt_dev_err(hdev,
7096                            "Invalid connection link type handle 0x%4.4x",
7097                            handle);
7098                 goto unlock;
7099         }
7100
7101         if (conn->role == HCI_ROLE_SLAVE) {
7102                 __le32 interval;
7103
7104                 memset(&interval, 0, sizeof(interval));
7105
7106                 memcpy(&interval, ev->c_latency, sizeof(ev->c_latency));
7107                 conn->iso_qos.in.interval = le32_to_cpu(interval);
7108                 memcpy(&interval, ev->p_latency, sizeof(ev->p_latency));
7109                 conn->iso_qos.out.interval = le32_to_cpu(interval);
7110                 conn->iso_qos.in.latency = le16_to_cpu(ev->interval);
7111                 conn->iso_qos.out.latency = le16_to_cpu(ev->interval);
7112                 conn->iso_qos.in.sdu = le16_to_cpu(ev->c_mtu);
7113                 conn->iso_qos.out.sdu = le16_to_cpu(ev->p_mtu);
7114                 conn->iso_qos.in.phy = ev->c_phy;
7115                 conn->iso_qos.out.phy = ev->p_phy;
7116         }
7117
7118         if (!ev->status) {
7119                 conn->state = BT_CONNECTED;
7120                 hci_debugfs_create_conn(conn);
7121                 hci_conn_add_sysfs(conn);
7122                 hci_iso_setup_path(conn);
7123                 goto unlock;
7124         }
7125
7126         hci_connect_cfm(conn, ev->status);
7127         hci_conn_del(conn);
7128
7129 unlock:
7130         hci_dev_unlock(hdev);
7131 }
7132
7133 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
7134 {
7135         struct hci_cp_le_reject_cis cp;
7136
7137         memset(&cp, 0, sizeof(cp));
7138         cp.handle = handle;
7139         cp.reason = HCI_ERROR_REJ_BAD_ADDR;
7140         hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
7141 }
7142
7143 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
7144 {
7145         struct hci_cp_le_accept_cis cp;
7146
7147         memset(&cp, 0, sizeof(cp));
7148         cp.handle = handle;
7149         hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
7150 }
7151
7152 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
7153                                struct sk_buff *skb)
7154 {
7155         struct hci_evt_le_cis_req *ev = data;
7156         u16 acl_handle, cis_handle;
7157         struct hci_conn *acl, *cis;
7158         int mask;
7159         __u8 flags = 0;
7160
7161         acl_handle = __le16_to_cpu(ev->acl_handle);
7162         cis_handle = __le16_to_cpu(ev->cis_handle);
7163
7164         bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
7165                    acl_handle, cis_handle, ev->cig_id, ev->cis_id);
7166
7167         hci_dev_lock(hdev);
7168
7169         acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
7170         if (!acl)
7171                 goto unlock;
7172
7173         mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
7174         if (!(mask & HCI_LM_ACCEPT)) {
7175                 hci_le_reject_cis(hdev, ev->cis_handle);
7176                 goto unlock;
7177         }
7178
7179         cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
7180         if (!cis) {
7181                 cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE);
7182                 if (!cis) {
7183                         hci_le_reject_cis(hdev, ev->cis_handle);
7184                         goto unlock;
7185                 }
7186                 cis->handle = cis_handle;
7187         }
7188
7189         cis->iso_qos.cig = ev->cig_id;
7190         cis->iso_qos.cis = ev->cis_id;
7191
7192         if (!(flags & HCI_PROTO_DEFER)) {
7193                 hci_le_accept_cis(hdev, ev->cis_handle);
7194         } else {
7195                 cis->state = BT_CONNECT2;
7196                 hci_connect_cfm(cis, 0);
7197         }
7198
7199 unlock:
7200         hci_dev_unlock(hdev);
7201 }
7202
7203 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
7204                                            struct sk_buff *skb)
7205 {
7206         struct hci_evt_le_create_big_complete *ev = data;
7207         struct hci_conn *conn;
7208
7209         BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
7210
7211         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
7212                                 flex_array_size(ev, bis_handle, ev->num_bis)))
7213                 return;
7214
7215         hci_dev_lock(hdev);
7216
7217         conn = hci_conn_hash_lookup_big(hdev, ev->handle);
7218         if (!conn)
7219                 goto unlock;
7220
7221         if (conn->type != ISO_LINK) {
7222                 bt_dev_err(hdev,
7223                            "Invalid connection link type handle 0x%2.2x",
7224                            ev->handle);
7225                 goto unlock;
7226         }
7227
7228         if (ev->num_bis)
7229                 conn->handle = __le16_to_cpu(ev->bis_handle[0]);
7230
7231         if (!ev->status) {
7232                 conn->state = BT_CONNECTED;
7233                 hci_debugfs_create_conn(conn);
7234                 hci_conn_add_sysfs(conn);
7235                 hci_iso_setup_path(conn);
7236                 goto unlock;
7237         }
7238
7239         hci_connect_cfm(conn, ev->status);
7240         hci_conn_del(conn);
7241
7242 unlock:
7243         hci_dev_unlock(hdev);
7244 }
7245
7246 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
7247                                             struct sk_buff *skb)
7248 {
7249         struct hci_evt_le_big_sync_estabilished *ev = data;
7250         struct hci_conn *bis;
7251         int i;
7252
7253         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7254
7255         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7256                                 flex_array_size(ev, bis, ev->num_bis)))
7257                 return;
7258
7259         if (ev->status)
7260                 return;
7261
7262         hci_dev_lock(hdev);
7263
7264         for (i = 0; i < ev->num_bis; i++) {
7265                 u16 handle = le16_to_cpu(ev->bis[i]);
7266                 __le32 interval;
7267
7268                 bis = hci_conn_hash_lookup_handle(hdev, handle);
7269                 if (!bis) {
7270                         bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
7271                                            HCI_ROLE_SLAVE);
7272                         if (!bis)
7273                                 continue;
7274                         bis->handle = handle;
7275                 }
7276
7277                 bis->iso_qos.big = ev->handle;
7278                 memset(&interval, 0, sizeof(interval));
7279                 memcpy(&interval, ev->latency, sizeof(ev->latency));
7280                 bis->iso_qos.in.interval = le32_to_cpu(interval);
7281                 /* Convert ISO Interval (1.25 ms slots) to latency (ms) */
7282                 bis->iso_qos.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
7283                 bis->iso_qos.in.sdu = le16_to_cpu(ev->max_pdu);
7284
7285                 hci_connect_cfm(bis, ev->status);
7286         }
7287
7288         hci_dev_unlock(hdev);
7289 }
7290
7291 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7292                                            struct sk_buff *skb)
7293 {
7294         struct hci_evt_le_big_info_adv_report *ev = data;
7295         int mask = hdev->link_mode;
7296         __u8 flags = 0;
7297
7298         bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7299
7300         hci_dev_lock(hdev);
7301
7302         mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
7303         if (!(mask & HCI_LM_ACCEPT))
7304                 hci_le_pa_term_sync(hdev, ev->sync_handle);
7305
7306         hci_dev_unlock(hdev);
7307 }
7308
7309 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7310 [_op] = { \
7311         .func = _func, \
7312         .min_len = _min_len, \
7313         .max_len = _max_len, \
7314 }
7315
7316 #define HCI_LE_EV(_op, _func, _len) \
7317         HCI_LE_EV_VL(_op, _func, _len, _len)
7318
7319 #define HCI_LE_EV_STATUS(_op, _func) \
7320         HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7321
7322 /* Entries in this table shall have their position according to the subevent
7323  * opcode they handle so the use of the macros above is recommend since it does
7324  * attempt to initialize at its proper index using Designated Initializers that
7325  * way events without a callback function can be ommited.
7326  */
7327 static const struct hci_le_ev {
7328         void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7329         u16  min_len;
7330         u16  max_len;
7331 } hci_le_ev_table[U8_MAX + 1] = {
7332         /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7333         HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7334                   sizeof(struct hci_ev_le_conn_complete)),
7335         /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7336         HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7337                      sizeof(struct hci_ev_le_advertising_report),
7338                      HCI_MAX_EVENT_SIZE),
7339         /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7340         HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7341                   hci_le_conn_update_complete_evt,
7342                   sizeof(struct hci_ev_le_conn_update_complete)),
7343         /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7344         HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7345                   hci_le_remote_feat_complete_evt,
7346                   sizeof(struct hci_ev_le_remote_feat_complete)),
7347         /* [0x05 = HCI_EV_LE_LTK_REQ] */
7348         HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7349                   sizeof(struct hci_ev_le_ltk_req)),
7350         /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7351         HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7352                   hci_le_remote_conn_param_req_evt,
7353                   sizeof(struct hci_ev_le_remote_conn_param_req)),
7354 #ifdef TIZEN_BT
7355         /* [0x07 = HCI_EV_LE_DATA_LEN_CHANGE] */
7356         HCI_LE_EV(HCI_EV_LE_DATA_LEN_CHANGE,
7357                   hci_le_data_length_changed_complete_evt,
7358                   sizeof(struct hci_ev_le_data_len_change)),
7359 #endif
7360         /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7361         HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7362                   hci_le_enh_conn_complete_evt,
7363                   sizeof(struct hci_ev_le_enh_conn_complete)),
7364         /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7365         HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7366                      sizeof(struct hci_ev_le_direct_adv_report),
7367                      HCI_MAX_EVENT_SIZE),
7368         /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7369         HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7370                   sizeof(struct hci_ev_le_phy_update_complete)),
7371         /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7372         HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7373                      sizeof(struct hci_ev_le_ext_adv_report),
7374                      HCI_MAX_EVENT_SIZE),
7375         /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7376         HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7377                   hci_le_pa_sync_estabilished_evt,
7378                   sizeof(struct hci_ev_le_pa_sync_established)),
7379         /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7380         HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7381                   sizeof(struct hci_evt_le_ext_adv_set_term)),
7382         /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7383         HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7384                   sizeof(struct hci_evt_le_cis_established)),
7385         /* [0x1a = HCI_EVT_LE_CIS_REQ] */
7386         HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7387                   sizeof(struct hci_evt_le_cis_req)),
7388         /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7389         HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7390                      hci_le_create_big_complete_evt,
7391                      sizeof(struct hci_evt_le_create_big_complete),
7392                      HCI_MAX_EVENT_SIZE),
7393         /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7394         HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7395                      hci_le_big_sync_established_evt,
7396                      sizeof(struct hci_evt_le_big_sync_estabilished),
7397                      HCI_MAX_EVENT_SIZE),
7398         /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7399         HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7400                      hci_le_big_info_adv_report_evt,
7401                      sizeof(struct hci_evt_le_big_info_adv_report),
7402                      HCI_MAX_EVENT_SIZE),
7403 };
7404
7405 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7406                             struct sk_buff *skb, u16 *opcode, u8 *status,
7407                             hci_req_complete_t *req_complete,
7408                             hci_req_complete_skb_t *req_complete_skb)
7409 {
7410         struct hci_ev_le_meta *ev = data;
7411         const struct hci_le_ev *subev;
7412
7413         bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7414
7415         /* Only match event if command OGF is for LE */
7416         if (hdev->sent_cmd &&
7417             hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
7418             hci_skb_event(hdev->sent_cmd) == ev->subevent) {
7419                 *opcode = hci_skb_opcode(hdev->sent_cmd);
7420                 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7421                                      req_complete_skb);
7422         }
7423
7424         subev = &hci_le_ev_table[ev->subevent];
7425         if (!subev->func)
7426                 return;
7427
7428         if (skb->len < subev->min_len) {
7429                 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7430                            ev->subevent, skb->len, subev->min_len);
7431                 return;
7432         }
7433
7434         /* Just warn if the length is over max_len size it still be
7435          * possible to partially parse the event so leave to callback to
7436          * decide if that is acceptable.
7437          */
7438         if (skb->len > subev->max_len)
7439                 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7440                             ev->subevent, skb->len, subev->max_len);
7441         data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7442         if (!data)
7443                 return;
7444
7445         subev->func(hdev, data, skb);
7446 }
7447
7448 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7449                                  u8 event, struct sk_buff *skb)
7450 {
7451         struct hci_ev_cmd_complete *ev;
7452         struct hci_event_hdr *hdr;
7453
7454         if (!skb)
7455                 return false;
7456
7457         hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7458         if (!hdr)
7459                 return false;
7460
7461         if (event) {
7462                 if (hdr->evt != event)
7463                         return false;
7464                 return true;
7465         }
7466
7467         /* Check if request ended in Command Status - no way to retrieve
7468          * any extra parameters in this case.
7469          */
7470         if (hdr->evt == HCI_EV_CMD_STATUS)
7471                 return false;
7472
7473         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7474                 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7475                            hdr->evt);
7476                 return false;
7477         }
7478
7479         ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7480         if (!ev)
7481                 return false;
7482
7483         if (opcode != __le16_to_cpu(ev->opcode)) {
7484                 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7485                        __le16_to_cpu(ev->opcode));
7486                 return false;
7487         }
7488
7489         return true;
7490 }
7491
7492 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7493                                   struct sk_buff *skb)
7494 {
7495         struct hci_ev_le_advertising_info *adv;
7496         struct hci_ev_le_direct_adv_info *direct_adv;
7497         struct hci_ev_le_ext_adv_info *ext_adv;
7498         const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7499         const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7500
7501         hci_dev_lock(hdev);
7502
7503         /* If we are currently suspended and this is the first BT event seen,
7504          * save the wake reason associated with the event.
7505          */
7506         if (!hdev->suspended || hdev->wake_reason)
7507                 goto unlock;
7508
7509         /* Default to remote wake. Values for wake_reason are documented in the
7510          * Bluez mgmt api docs.
7511          */
7512         hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7513
7514         /* Once configured for remote wakeup, we should only wake up for
7515          * reconnections. It's useful to see which device is waking us up so
7516          * keep track of the bdaddr of the connection event that woke us up.
7517          */
7518         if (event == HCI_EV_CONN_REQUEST) {
7519                 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7520                 hdev->wake_addr_type = BDADDR_BREDR;
7521         } else if (event == HCI_EV_CONN_COMPLETE) {
7522                 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7523                 hdev->wake_addr_type = BDADDR_BREDR;
7524         } else if (event == HCI_EV_LE_META) {
7525                 struct hci_ev_le_meta *le_ev = (void *)skb->data;
7526                 u8 subevent = le_ev->subevent;
7527                 u8 *ptr = &skb->data[sizeof(*le_ev)];
7528                 u8 num_reports = *ptr;
7529
7530                 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7531                      subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7532                      subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7533                     num_reports) {
7534                         adv = (void *)(ptr + 1);
7535                         direct_adv = (void *)(ptr + 1);
7536                         ext_adv = (void *)(ptr + 1);
7537
7538                         switch (subevent) {
7539                         case HCI_EV_LE_ADVERTISING_REPORT:
7540                                 bacpy(&hdev->wake_addr, &adv->bdaddr);
7541                                 hdev->wake_addr_type = adv->bdaddr_type;
7542                                 break;
7543                         case HCI_EV_LE_DIRECT_ADV_REPORT:
7544                                 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7545                                 hdev->wake_addr_type = direct_adv->bdaddr_type;
7546                                 break;
7547                         case HCI_EV_LE_EXT_ADV_REPORT:
7548                                 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7549                                 hdev->wake_addr_type = ext_adv->bdaddr_type;
7550                                 break;
7551                         }
7552                 }
7553         } else {
7554                 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7555         }
7556
7557 unlock:
7558         hci_dev_unlock(hdev);
7559 }
7560
7561 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7562 [_op] = { \
7563         .req = false, \
7564         .func = _func, \
7565         .min_len = _min_len, \
7566         .max_len = _max_len, \
7567 }
7568
7569 #define HCI_EV(_op, _func, _len) \
7570         HCI_EV_VL(_op, _func, _len, _len)
7571
7572 #define HCI_EV_STATUS(_op, _func) \
7573         HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7574
7575 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7576 [_op] = { \
7577         .req = true, \
7578         .func_req = _func, \
7579         .min_len = _min_len, \
7580         .max_len = _max_len, \
7581 }
7582
7583 #define HCI_EV_REQ(_op, _func, _len) \
7584         HCI_EV_REQ_VL(_op, _func, _len, _len)
7585
7586 /* Entries in this table shall have their position according to the event opcode
7587  * they handle so the use of the macros above is recommend since it does attempt
7588  * to initialize at its proper index using Designated Initializers that way
7589  * events without a callback function don't have entered.
7590  */
7591 static const struct hci_ev {
7592         bool req;
7593         union {
7594                 void (*func)(struct hci_dev *hdev, void *data,
7595                              struct sk_buff *skb);
7596                 void (*func_req)(struct hci_dev *hdev, void *data,
7597                                  struct sk_buff *skb, u16 *opcode, u8 *status,
7598                                  hci_req_complete_t *req_complete,
7599                                  hci_req_complete_skb_t *req_complete_skb);
7600         };
7601         u16  min_len;
7602         u16  max_len;
7603 } hci_ev_table[U8_MAX + 1] = {
7604         /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7605         HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7606         /* [0x02 = HCI_EV_INQUIRY_RESULT] */
7607         HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7608                   sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7609         /* [0x03 = HCI_EV_CONN_COMPLETE] */
7610         HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7611                sizeof(struct hci_ev_conn_complete)),
7612         /* [0x04 = HCI_EV_CONN_REQUEST] */
7613         HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7614                sizeof(struct hci_ev_conn_request)),
7615         /* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7616         HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7617                sizeof(struct hci_ev_disconn_complete)),
7618         /* [0x06 = HCI_EV_AUTH_COMPLETE] */
7619         HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7620                sizeof(struct hci_ev_auth_complete)),
7621         /* [0x07 = HCI_EV_REMOTE_NAME] */
7622         HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7623                sizeof(struct hci_ev_remote_name)),
7624         /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7625         HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7626                sizeof(struct hci_ev_encrypt_change)),
7627         /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7628         HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7629                hci_change_link_key_complete_evt,
7630                sizeof(struct hci_ev_change_link_key_complete)),
7631         /* [0x0b = HCI_EV_REMOTE_FEATURES] */
7632         HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7633                sizeof(struct hci_ev_remote_features)),
7634         /* [0x0e = HCI_EV_CMD_COMPLETE] */
7635         HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7636                       sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7637         /* [0x0f = HCI_EV_CMD_STATUS] */
7638         HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7639                    sizeof(struct hci_ev_cmd_status)),
7640         /* [0x10 = HCI_EV_CMD_STATUS] */
7641         HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7642                sizeof(struct hci_ev_hardware_error)),
7643         /* [0x12 = HCI_EV_ROLE_CHANGE] */
7644         HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7645                sizeof(struct hci_ev_role_change)),
7646         /* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7647         HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7648                   sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7649         /* [0x14 = HCI_EV_MODE_CHANGE] */
7650         HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7651                sizeof(struct hci_ev_mode_change)),
7652         /* [0x16 = HCI_EV_PIN_CODE_REQ] */
7653         HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7654                sizeof(struct hci_ev_pin_code_req)),
7655         /* [0x17 = HCI_EV_LINK_KEY_REQ] */
7656         HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7657                sizeof(struct hci_ev_link_key_req)),
7658         /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7659         HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7660                sizeof(struct hci_ev_link_key_notify)),
7661         /* [0x1c = HCI_EV_CLOCK_OFFSET] */
7662         HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7663                sizeof(struct hci_ev_clock_offset)),
7664         /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7665         HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7666                sizeof(struct hci_ev_pkt_type_change)),
7667         /* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7668         HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7669                sizeof(struct hci_ev_pscan_rep_mode)),
7670         /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7671         HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7672                   hci_inquiry_result_with_rssi_evt,
7673                   sizeof(struct hci_ev_inquiry_result_rssi),
7674                   HCI_MAX_EVENT_SIZE),
7675         /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7676         HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7677                sizeof(struct hci_ev_remote_ext_features)),
7678         /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7679         HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7680                sizeof(struct hci_ev_sync_conn_complete)),
7681         /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7682         HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7683                   hci_extended_inquiry_result_evt,
7684                   sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7685         /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7686         HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7687                sizeof(struct hci_ev_key_refresh_complete)),
7688         /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7689         HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7690                sizeof(struct hci_ev_io_capa_request)),
7691         /* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7692         HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7693                sizeof(struct hci_ev_io_capa_reply)),
7694         /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7695         HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7696                sizeof(struct hci_ev_user_confirm_req)),
7697         /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7698         HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7699                sizeof(struct hci_ev_user_passkey_req)),
7700         /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7701         HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7702                sizeof(struct hci_ev_remote_oob_data_request)),
7703         /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7704         HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7705                sizeof(struct hci_ev_simple_pair_complete)),
7706         /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7707         HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7708                sizeof(struct hci_ev_user_passkey_notify)),
7709         /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7710         HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7711                sizeof(struct hci_ev_keypress_notify)),
7712         /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7713         HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7714                sizeof(struct hci_ev_remote_host_features)),
7715         /* [0x3e = HCI_EV_LE_META] */
7716         HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7717                       sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7718 #if IS_ENABLED(CONFIG_BT_HS)
7719         /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
7720         HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
7721                sizeof(struct hci_ev_phy_link_complete)),
7722         /* [0x41 = HCI_EV_CHANNEL_SELECTED] */
7723         HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
7724                sizeof(struct hci_ev_channel_selected)),
7725         /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
7726         HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
7727                hci_disconn_loglink_complete_evt,
7728                sizeof(struct hci_ev_disconn_logical_link_complete)),
7729         /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
7730         HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
7731                sizeof(struct hci_ev_logical_link_complete)),
7732         /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
7733         HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
7734                hci_disconn_phylink_complete_evt,
7735                sizeof(struct hci_ev_disconn_phy_link_complete)),
7736 #endif
7737         /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
7738         HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
7739                sizeof(struct hci_ev_num_comp_blocks)),
7740 #ifdef TIZEN_BT
7741         /* [0xFF = HCI_EV_VENDOR_SPECIFIC] */
7742         HCI_EV(HCI_EV_VENDOR_SPECIFIC, hci_vendor_specific_evt,
7743                sizeof(struct hci_ev_vendor_specific)),
7744 #else
7745         /* [0xff = HCI_EV_VENDOR] */
7746         HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7747 #endif
7748 };
7749
7750 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7751                            u16 *opcode, u8 *status,
7752                            hci_req_complete_t *req_complete,
7753                            hci_req_complete_skb_t *req_complete_skb)
7754 {
7755         const struct hci_ev *ev = &hci_ev_table[event];
7756         void *data;
7757
7758         if (!ev->func)
7759                 return;
7760
7761         if (skb->len < ev->min_len) {
7762                 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7763                            event, skb->len, ev->min_len);
7764                 return;
7765         }
7766
7767         /* Just warn if the length is over max_len size it still be
7768          * possible to partially parse the event so leave to callback to
7769          * decide if that is acceptable.
7770          */
7771         if (skb->len > ev->max_len)
7772                 bt_dev_warn_ratelimited(hdev,
7773                                         "unexpected event 0x%2.2x length: %u > %u",
7774                                         event, skb->len, ev->max_len);
7775
7776         data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7777         if (!data)
7778                 return;
7779
7780         if (ev->req)
7781                 ev->func_req(hdev, data, skb, opcode, status, req_complete,
7782                              req_complete_skb);
7783         else
7784                 ev->func(hdev, data, skb);
7785 }
7786
7787 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7788 {
7789         struct hci_event_hdr *hdr = (void *) skb->data;
7790         hci_req_complete_t req_complete = NULL;
7791         hci_req_complete_skb_t req_complete_skb = NULL;
7792         struct sk_buff *orig_skb = NULL;
7793         u8 status = 0, event, req_evt = 0;
7794         u16 opcode = HCI_OP_NOP;
7795
7796         if (skb->len < sizeof(*hdr)) {
7797                 bt_dev_err(hdev, "Malformed HCI Event");
7798                 goto done;
7799         }
7800
7801         kfree_skb(hdev->recv_event);
7802         hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7803
7804         event = hdr->evt;
7805         if (!event) {
7806                 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7807                             event);
7808                 goto done;
7809         }
7810
7811         /* Only match event if command OGF is not for LE */
7812         if (hdev->sent_cmd &&
7813             hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
7814             hci_skb_event(hdev->sent_cmd) == event) {
7815                 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
7816                                      status, &req_complete, &req_complete_skb);
7817                 req_evt = event;
7818         }
7819
7820         /* If it looks like we might end up having to call
7821          * req_complete_skb, store a pristine copy of the skb since the
7822          * various handlers may modify the original one through
7823          * skb_pull() calls, etc.
7824          */
7825         if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7826             event == HCI_EV_CMD_COMPLETE)
7827                 orig_skb = skb_clone(skb, GFP_KERNEL);
7828
7829         skb_pull(skb, HCI_EVENT_HDR_SIZE);
7830
7831         /* Store wake reason if we're suspended */
7832         hci_store_wake_reason(hdev, event, skb);
7833
7834         bt_dev_dbg(hdev, "event 0x%2.2x", event);
7835
7836         hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7837                        &req_complete_skb);
7838
7839         if (req_complete) {
7840                 req_complete(hdev, status, opcode);
7841         } else if (req_complete_skb) {
7842                 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7843                         kfree_skb(orig_skb);
7844                         orig_skb = NULL;
7845                 }
7846                 req_complete_skb(hdev, status, opcode, orig_skb);
7847         }
7848
7849 done:
7850         kfree_skb(orig_skb);
7851         kfree_skb(skb);
7852         hdev->stat.evt_rx++;
7853 }