Bluetooth: Read host suggested default le data length
[platform/kernel/linux-starfive.git] / net / bluetooth / hci_event.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
38 #include "msft.h"
39 #include "eir.h"
40
41 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
42                  "\x00\x00\x00\x00\x00\x00\x00\x00"
43
44 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
45
46 /* Handle HCI Event packets */
47
48 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
49                              u8 ev, size_t len)
50 {
51         void *data;
52
53         data = skb_pull_data(skb, len);
54         if (!data)
55                 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
56
57         return data;
58 }
59
60 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
61                              u16 op, size_t len)
62 {
63         void *data;
64
65         data = skb_pull_data(skb, len);
66         if (!data)
67                 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
68
69         return data;
70 }
71
72 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
73                                 u8 ev, size_t len)
74 {
75         void *data;
76
77         data = skb_pull_data(skb, len);
78         if (!data)
79                 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
80
81         return data;
82 }
83
84 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
85                                 struct sk_buff *skb)
86 {
87         struct hci_ev_status *rp = data;
88
89         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
90
91         /* It is possible that we receive Inquiry Complete event right
92          * before we receive Inquiry Cancel Command Complete event, in
93          * which case the latter event should have status of Command
94          * Disallowed (0x0c). This should not be treated as error, since
95          * we actually achieve what Inquiry Cancel wants to achieve,
96          * which is to end the last Inquiry session.
97          */
98         if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
99                 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
100                 rp->status = 0x00;
101         }
102
103         if (rp->status)
104                 return rp->status;
105
106         clear_bit(HCI_INQUIRY, &hdev->flags);
107         smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
108         wake_up_bit(&hdev->flags, HCI_INQUIRY);
109
110         hci_dev_lock(hdev);
111         /* Set discovery state to stopped if we're not doing LE active
112          * scanning.
113          */
114         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
115             hdev->le_scan_type != LE_SCAN_ACTIVE)
116                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
117         hci_dev_unlock(hdev);
118
119         hci_conn_check_pending(hdev);
120
121         return rp->status;
122 }
123
124 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
125                               struct sk_buff *skb)
126 {
127         struct hci_ev_status *rp = data;
128
129         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
130
131         if (rp->status)
132                 return rp->status;
133
134         hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
135
136         return rp->status;
137 }
138
139 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
140                                    struct sk_buff *skb)
141 {
142         struct hci_ev_status *rp = data;
143
144         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
145
146         if (rp->status)
147                 return rp->status;
148
149         hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
150
151         hci_conn_check_pending(hdev);
152
153         return rp->status;
154 }
155
156 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
157                                         struct sk_buff *skb)
158 {
159         struct hci_ev_status *rp = data;
160
161         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
162
163         return rp->status;
164 }
165
166 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
167                                 struct sk_buff *skb)
168 {
169         struct hci_rp_role_discovery *rp = data;
170         struct hci_conn *conn;
171
172         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
173
174         if (rp->status)
175                 return rp->status;
176
177         hci_dev_lock(hdev);
178
179         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
180         if (conn)
181                 conn->role = rp->role;
182
183         hci_dev_unlock(hdev);
184
185         return rp->status;
186 }
187
188 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
189                                   struct sk_buff *skb)
190 {
191         struct hci_rp_read_link_policy *rp = data;
192         struct hci_conn *conn;
193
194         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
195
196         if (rp->status)
197                 return rp->status;
198
199         hci_dev_lock(hdev);
200
201         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
202         if (conn)
203                 conn->link_policy = __le16_to_cpu(rp->policy);
204
205         hci_dev_unlock(hdev);
206
207         return rp->status;
208 }
209
210 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
211                                    struct sk_buff *skb)
212 {
213         struct hci_rp_write_link_policy *rp = data;
214         struct hci_conn *conn;
215         void *sent;
216 #ifdef TIZEN_BT
217         struct hci_cp_write_link_policy cp;
218         struct hci_conn *sco_conn;
219 #endif
220
221         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
222
223         if (rp->status)
224                 return rp->status;
225
226         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
227         if (!sent)
228                 return rp->status;
229
230         hci_dev_lock(hdev);
231
232         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
233         if (conn)
234                 conn->link_policy = get_unaligned_le16(sent + 2);
235
236 #ifdef TIZEN_BT
237         sco_conn = hci_conn_hash_lookup_sco(hdev);
238         if (sco_conn && bacmp(&sco_conn->dst, &conn->dst) == 0 &&
239             conn->link_policy & HCI_LP_SNIFF) {
240                 BT_ERR("SNIFF is not allowed during sco connection");
241                 cp.handle = __cpu_to_le16(conn->handle);
242                 cp.policy = __cpu_to_le16(conn->link_policy & ~HCI_LP_SNIFF);
243                 hci_send_cmd(hdev, HCI_OP_WRITE_LINK_POLICY, sizeof(cp), &cp);
244         }
245 #endif
246
247         hci_dev_unlock(hdev);
248
249         return rp->status;
250 }
251
252 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
253                                       struct sk_buff *skb)
254 {
255         struct hci_rp_read_def_link_policy *rp = data;
256
257         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
258
259         if (rp->status)
260                 return rp->status;
261
262         hdev->link_policy = __le16_to_cpu(rp->policy);
263
264         return rp->status;
265 }
266
267 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
268                                        struct sk_buff *skb)
269 {
270         struct hci_ev_status *rp = data;
271         void *sent;
272
273         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
274
275         if (rp->status)
276                 return rp->status;
277
278         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
279         if (!sent)
280                 return rp->status;
281
282         hdev->link_policy = get_unaligned_le16(sent);
283
284         return rp->status;
285 }
286
287 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
288 {
289         struct hci_ev_status *rp = data;
290
291         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
292
293         clear_bit(HCI_RESET, &hdev->flags);
294
295         if (rp->status)
296                 return rp->status;
297
298         /* Reset all non-persistent flags */
299         hci_dev_clear_volatile_flags(hdev);
300
301         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
302
303         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
304         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
305
306         memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
307         hdev->adv_data_len = 0;
308
309         memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
310         hdev->scan_rsp_data_len = 0;
311
312         hdev->le_scan_type = LE_SCAN_PASSIVE;
313
314         hdev->ssp_debug_mode = 0;
315
316         hci_bdaddr_list_clear(&hdev->le_accept_list);
317         hci_bdaddr_list_clear(&hdev->le_resolv_list);
318
319         return rp->status;
320 }
321
322 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
323                                       struct sk_buff *skb)
324 {
325         struct hci_rp_read_stored_link_key *rp = data;
326         struct hci_cp_read_stored_link_key *sent;
327
328         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
329
330         sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
331         if (!sent)
332                 return rp->status;
333
334         if (!rp->status && sent->read_all == 0x01) {
335                 hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
336                 hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
337         }
338
339         return rp->status;
340 }
341
342 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
343                                         struct sk_buff *skb)
344 {
345         struct hci_rp_delete_stored_link_key *rp = data;
346         u16 num_keys;
347
348         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
349
350         if (rp->status)
351                 return rp->status;
352
353         num_keys = le16_to_cpu(rp->num_keys);
354
355         if (num_keys <= hdev->stored_num_keys)
356                 hdev->stored_num_keys -= num_keys;
357         else
358                 hdev->stored_num_keys = 0;
359
360         return rp->status;
361 }
362
363 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
364                                   struct sk_buff *skb)
365 {
366         struct hci_ev_status *rp = data;
367         void *sent;
368
369         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
370
371         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
372         if (!sent)
373                 return rp->status;
374
375         hci_dev_lock(hdev);
376
377         if (hci_dev_test_flag(hdev, HCI_MGMT))
378                 mgmt_set_local_name_complete(hdev, sent, rp->status);
379         else if (!rp->status)
380                 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
381
382         hci_dev_unlock(hdev);
383
384         return rp->status;
385 }
386
387 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
388                                  struct sk_buff *skb)
389 {
390         struct hci_rp_read_local_name *rp = data;
391
392         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
393
394         if (rp->status)
395                 return rp->status;
396
397         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
398             hci_dev_test_flag(hdev, HCI_CONFIG))
399                 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
400
401         return rp->status;
402 }
403
404 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
405                                    struct sk_buff *skb)
406 {
407         struct hci_ev_status *rp = data;
408         void *sent;
409
410         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
411
412         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
413         if (!sent)
414                 return rp->status;
415
416         hci_dev_lock(hdev);
417
418         if (!rp->status) {
419                 __u8 param = *((__u8 *) sent);
420
421                 if (param == AUTH_ENABLED)
422                         set_bit(HCI_AUTH, &hdev->flags);
423                 else
424                         clear_bit(HCI_AUTH, &hdev->flags);
425         }
426
427         if (hci_dev_test_flag(hdev, HCI_MGMT))
428                 mgmt_auth_enable_complete(hdev, rp->status);
429
430         hci_dev_unlock(hdev);
431
432         return rp->status;
433 }
434
435 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
436                                     struct sk_buff *skb)
437 {
438         struct hci_ev_status *rp = data;
439         __u8 param;
440         void *sent;
441
442         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
443
444         if (rp->status)
445                 return rp->status;
446
447         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
448         if (!sent)
449                 return rp->status;
450
451         param = *((__u8 *) sent);
452
453         if (param)
454                 set_bit(HCI_ENCRYPT, &hdev->flags);
455         else
456                 clear_bit(HCI_ENCRYPT, &hdev->flags);
457
458         return rp->status;
459 }
460
461 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
462                                    struct sk_buff *skb)
463 {
464         struct hci_ev_status *rp = data;
465         __u8 param;
466         void *sent;
467
468         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
469
470         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
471         if (!sent)
472                 return rp->status;
473
474         param = *((__u8 *) sent);
475
476         hci_dev_lock(hdev);
477
478         if (rp->status) {
479                 hdev->discov_timeout = 0;
480                 goto done;
481         }
482
483         if (param & SCAN_INQUIRY)
484                 set_bit(HCI_ISCAN, &hdev->flags);
485         else
486                 clear_bit(HCI_ISCAN, &hdev->flags);
487
488         if (param & SCAN_PAGE)
489                 set_bit(HCI_PSCAN, &hdev->flags);
490         else
491                 clear_bit(HCI_PSCAN, &hdev->flags);
492
493 done:
494         hci_dev_unlock(hdev);
495
496         return rp->status;
497 }
498
499 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
500                                   struct sk_buff *skb)
501 {
502         struct hci_ev_status *rp = data;
503         struct hci_cp_set_event_filter *cp;
504         void *sent;
505
506         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
507
508         if (rp->status)
509                 return rp->status;
510
511         sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
512         if (!sent)
513                 return rp->status;
514
515         cp = (struct hci_cp_set_event_filter *)sent;
516
517         if (cp->flt_type == HCI_FLT_CLEAR_ALL)
518                 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
519         else
520                 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
521
522         return rp->status;
523 }
524
525 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
526                                    struct sk_buff *skb)
527 {
528         struct hci_rp_read_class_of_dev *rp = data;
529
530         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
531
532         if (rp->status)
533                 return rp->status;
534
535         memcpy(hdev->dev_class, rp->dev_class, 3);
536
537         bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
538                    hdev->dev_class[1], hdev->dev_class[0]);
539
540         return rp->status;
541 }
542
543 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
544                                     struct sk_buff *skb)
545 {
546         struct hci_ev_status *rp = data;
547         void *sent;
548
549         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
550
551         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
552         if (!sent)
553                 return rp->status;
554
555         hci_dev_lock(hdev);
556
557         if (!rp->status)
558                 memcpy(hdev->dev_class, sent, 3);
559
560         if (hci_dev_test_flag(hdev, HCI_MGMT))
561                 mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
562
563         hci_dev_unlock(hdev);
564
565         return rp->status;
566 }
567
568 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
569                                     struct sk_buff *skb)
570 {
571         struct hci_rp_read_voice_setting *rp = data;
572         __u16 setting;
573
574         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
575
576         if (rp->status)
577                 return rp->status;
578
579         setting = __le16_to_cpu(rp->voice_setting);
580
581         if (hdev->voice_setting == setting)
582                 return rp->status;
583
584         hdev->voice_setting = setting;
585
586         bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
587
588         if (hdev->notify)
589                 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
590
591         return rp->status;
592 }
593
594 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
595                                      struct sk_buff *skb)
596 {
597         struct hci_ev_status *rp = data;
598         __u16 setting;
599         void *sent;
600
601         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
602
603         if (rp->status)
604                 return rp->status;
605
606         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
607         if (!sent)
608                 return rp->status;
609
610         setting = get_unaligned_le16(sent);
611
612         if (hdev->voice_setting == setting)
613                 return rp->status;
614
615         hdev->voice_setting = setting;
616
617         bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
618
619         if (hdev->notify)
620                 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
621
622         return rp->status;
623 }
624
625 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
626                                         struct sk_buff *skb)
627 {
628         struct hci_rp_read_num_supported_iac *rp = data;
629
630         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
631
632         if (rp->status)
633                 return rp->status;
634
635         hdev->num_iac = rp->num_iac;
636
637         bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
638
639         return rp->status;
640 }
641
642 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
643                                 struct sk_buff *skb)
644 {
645         struct hci_ev_status *rp = data;
646         struct hci_cp_write_ssp_mode *sent;
647
648         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
649
650         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
651         if (!sent)
652                 return rp->status;
653
654         hci_dev_lock(hdev);
655
656         if (!rp->status) {
657                 if (sent->mode)
658                         hdev->features[1][0] |= LMP_HOST_SSP;
659                 else
660                         hdev->features[1][0] &= ~LMP_HOST_SSP;
661         }
662
663         if (!rp->status) {
664                 if (sent->mode)
665                         hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
666                 else
667                         hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
668         }
669
670         hci_dev_unlock(hdev);
671
672         return rp->status;
673 }
674
675 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
676                                   struct sk_buff *skb)
677 {
678         struct hci_ev_status *rp = data;
679         struct hci_cp_write_sc_support *sent;
680
681         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
682
683         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
684         if (!sent)
685                 return rp->status;
686
687         hci_dev_lock(hdev);
688
689         if (!rp->status) {
690                 if (sent->support)
691                         hdev->features[1][0] |= LMP_HOST_SC;
692                 else
693                         hdev->features[1][0] &= ~LMP_HOST_SC;
694         }
695
696         if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
697                 if (sent->support)
698                         hci_dev_set_flag(hdev, HCI_SC_ENABLED);
699                 else
700                         hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
701         }
702
703         hci_dev_unlock(hdev);
704
705         return rp->status;
706 }
707
708 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
709                                     struct sk_buff *skb)
710 {
711         struct hci_rp_read_local_version *rp = data;
712
713         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
714
715         if (rp->status)
716                 return rp->status;
717
718         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
719             hci_dev_test_flag(hdev, HCI_CONFIG)) {
720                 hdev->hci_ver = rp->hci_ver;
721                 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
722                 hdev->lmp_ver = rp->lmp_ver;
723                 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
724                 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
725         }
726
727         return rp->status;
728 }
729
730 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
731                                    struct sk_buff *skb)
732 {
733         struct hci_rp_read_enc_key_size *rp = data;
734         struct hci_conn *conn;
735         u16 handle;
736         u8 status = rp->status;
737
738         bt_dev_dbg(hdev, "status 0x%2.2x", status);
739
740         handle = le16_to_cpu(rp->handle);
741
742         hci_dev_lock(hdev);
743
744         conn = hci_conn_hash_lookup_handle(hdev, handle);
745         if (!conn) {
746                 status = 0xFF;
747                 goto done;
748         }
749
750         /* While unexpected, the read_enc_key_size command may fail. The most
751          * secure approach is to then assume the key size is 0 to force a
752          * disconnection.
753          */
754         if (status) {
755                 bt_dev_err(hdev, "failed to read key size for handle %u",
756                            handle);
757                 conn->enc_key_size = 0;
758         } else {
759                 conn->enc_key_size = rp->key_size;
760                 status = 0;
761         }
762
763         hci_encrypt_cfm(conn, 0);
764
765 done:
766         hci_dev_unlock(hdev);
767
768         return status;
769 }
770
771 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
772                                      struct sk_buff *skb)
773 {
774         struct hci_rp_read_local_commands *rp = data;
775
776         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
777
778         if (rp->status)
779                 return rp->status;
780
781         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
782             hci_dev_test_flag(hdev, HCI_CONFIG))
783                 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
784
785         return rp->status;
786 }
787
788 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
789                                            struct sk_buff *skb)
790 {
791         struct hci_rp_read_auth_payload_to *rp = data;
792         struct hci_conn *conn;
793
794         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
795
796         if (rp->status)
797                 return rp->status;
798
799         hci_dev_lock(hdev);
800
801         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
802         if (conn)
803                 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
804
805         hci_dev_unlock(hdev);
806
807         return rp->status;
808 }
809
810 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
811                                             struct sk_buff *skb)
812 {
813         struct hci_rp_write_auth_payload_to *rp = data;
814         struct hci_conn *conn;
815         void *sent;
816
817         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
818
819         if (rp->status)
820                 return rp->status;
821
822         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
823         if (!sent)
824                 return rp->status;
825
826         hci_dev_lock(hdev);
827
828         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
829         if (conn)
830                 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
831
832         hci_dev_unlock(hdev);
833
834         return rp->status;
835 }
836
837 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
838                                      struct sk_buff *skb)
839 {
840         struct hci_rp_read_local_features *rp = data;
841
842         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
843
844         if (rp->status)
845                 return rp->status;
846
847         memcpy(hdev->features, rp->features, 8);
848
849         /* Adjust default settings according to features
850          * supported by device. */
851
852         if (hdev->features[0][0] & LMP_3SLOT)
853                 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
854
855         if (hdev->features[0][0] & LMP_5SLOT)
856                 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
857
858         if (hdev->features[0][1] & LMP_HV2) {
859                 hdev->pkt_type  |= (HCI_HV2);
860                 hdev->esco_type |= (ESCO_HV2);
861         }
862
863         if (hdev->features[0][1] & LMP_HV3) {
864                 hdev->pkt_type  |= (HCI_HV3);
865                 hdev->esco_type |= (ESCO_HV3);
866         }
867
868         if (lmp_esco_capable(hdev))
869                 hdev->esco_type |= (ESCO_EV3);
870
871         if (hdev->features[0][4] & LMP_EV4)
872                 hdev->esco_type |= (ESCO_EV4);
873
874         if (hdev->features[0][4] & LMP_EV5)
875                 hdev->esco_type |= (ESCO_EV5);
876
877         if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
878                 hdev->esco_type |= (ESCO_2EV3);
879
880         if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
881                 hdev->esco_type |= (ESCO_3EV3);
882
883         if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
884                 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
885
886         return rp->status;
887 }
888
889 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
890                                          struct sk_buff *skb)
891 {
892         struct hci_rp_read_local_ext_features *rp = data;
893
894         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
895
896         if (rp->status)
897                 return rp->status;
898
899         if (hdev->max_page < rp->max_page) {
900                 if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
901                              &hdev->quirks))
902                         bt_dev_warn(hdev, "broken local ext features page 2");
903                 else
904                         hdev->max_page = rp->max_page;
905         }
906
907         if (rp->page < HCI_MAX_PAGES)
908                 memcpy(hdev->features[rp->page], rp->features, 8);
909
910         return rp->status;
911 }
912
913 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
914                                         struct sk_buff *skb)
915 {
916         struct hci_rp_read_flow_control_mode *rp = data;
917
918         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
919
920         if (rp->status)
921                 return rp->status;
922
923         hdev->flow_ctl_mode = rp->mode;
924
925         return rp->status;
926 }
927
928 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
929                                   struct sk_buff *skb)
930 {
931         struct hci_rp_read_buffer_size *rp = data;
932
933         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
934
935         if (rp->status)
936                 return rp->status;
937
938         hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
939         hdev->sco_mtu  = rp->sco_mtu;
940         hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
941         hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
942
943         if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
944                 hdev->sco_mtu  = 64;
945                 hdev->sco_pkts = 8;
946         }
947
948         hdev->acl_cnt = hdev->acl_pkts;
949         hdev->sco_cnt = hdev->sco_pkts;
950
951         BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
952                hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
953
954         return rp->status;
955 }
956
957 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
958                               struct sk_buff *skb)
959 {
960         struct hci_rp_read_bd_addr *rp = data;
961
962         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
963
964         if (rp->status)
965                 return rp->status;
966
967         if (test_bit(HCI_INIT, &hdev->flags))
968                 bacpy(&hdev->bdaddr, &rp->bdaddr);
969
970         if (hci_dev_test_flag(hdev, HCI_SETUP))
971                 bacpy(&hdev->setup_addr, &rp->bdaddr);
972
973         return rp->status;
974 }
975
976 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
977                                          struct sk_buff *skb)
978 {
979         struct hci_rp_read_local_pairing_opts *rp = data;
980
981         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
982
983         if (rp->status)
984                 return rp->status;
985
986         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
987             hci_dev_test_flag(hdev, HCI_CONFIG)) {
988                 hdev->pairing_opts = rp->pairing_opts;
989                 hdev->max_enc_key_size = rp->max_key_size;
990         }
991
992         return rp->status;
993 }
994
995 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
996                                          struct sk_buff *skb)
997 {
998         struct hci_rp_read_page_scan_activity *rp = data;
999
1000         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1001
1002         if (rp->status)
1003                 return rp->status;
1004
1005         if (test_bit(HCI_INIT, &hdev->flags)) {
1006                 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
1007                 hdev->page_scan_window = __le16_to_cpu(rp->window);
1008         }
1009
1010         return rp->status;
1011 }
1012
1013 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1014                                           struct sk_buff *skb)
1015 {
1016         struct hci_ev_status *rp = data;
1017         struct hci_cp_write_page_scan_activity *sent;
1018
1019         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1020
1021         if (rp->status)
1022                 return rp->status;
1023
1024         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1025         if (!sent)
1026                 return rp->status;
1027
1028         hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1029         hdev->page_scan_window = __le16_to_cpu(sent->window);
1030
1031         return rp->status;
1032 }
1033
1034 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1035                                      struct sk_buff *skb)
1036 {
1037         struct hci_rp_read_page_scan_type *rp = data;
1038
1039         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1040
1041         if (rp->status)
1042                 return rp->status;
1043
1044         if (test_bit(HCI_INIT, &hdev->flags))
1045                 hdev->page_scan_type = rp->type;
1046
1047         return rp->status;
1048 }
1049
1050 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1051                                       struct sk_buff *skb)
1052 {
1053         struct hci_ev_status *rp = data;
1054         u8 *type;
1055
1056         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1057
1058         if (rp->status)
1059                 return rp->status;
1060
1061         type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1062         if (type)
1063                 hdev->page_scan_type = *type;
1064
1065         return rp->status;
1066 }
1067
1068 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
1069                                       struct sk_buff *skb)
1070 {
1071         struct hci_rp_read_data_block_size *rp = data;
1072
1073         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1074
1075         if (rp->status)
1076                 return rp->status;
1077
1078         hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
1079         hdev->block_len = __le16_to_cpu(rp->block_len);
1080         hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
1081
1082         hdev->block_cnt = hdev->num_blocks;
1083
1084         BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
1085                hdev->block_cnt, hdev->block_len);
1086
1087         return rp->status;
1088 }
1089
1090 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1091                             struct sk_buff *skb)
1092 {
1093         struct hci_rp_read_clock *rp = data;
1094         struct hci_cp_read_clock *cp;
1095         struct hci_conn *conn;
1096
1097         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1098
1099         if (rp->status)
1100                 return rp->status;
1101
1102         hci_dev_lock(hdev);
1103
1104         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1105         if (!cp)
1106                 goto unlock;
1107
1108         if (cp->which == 0x00) {
1109                 hdev->clock = le32_to_cpu(rp->clock);
1110                 goto unlock;
1111         }
1112
1113         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1114         if (conn) {
1115                 conn->clock = le32_to_cpu(rp->clock);
1116                 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1117         }
1118
1119 unlock:
1120         hci_dev_unlock(hdev);
1121         return rp->status;
1122 }
1123
1124 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
1125                                      struct sk_buff *skb)
1126 {
1127         struct hci_rp_read_local_amp_info *rp = data;
1128
1129         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1130
1131         if (rp->status)
1132                 return rp->status;
1133
1134         hdev->amp_status = rp->amp_status;
1135         hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
1136         hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
1137         hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
1138         hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
1139         hdev->amp_type = rp->amp_type;
1140         hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
1141         hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
1142         hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
1143         hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
1144
1145         return rp->status;
1146 }
1147
1148 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1149                                        struct sk_buff *skb)
1150 {
1151         struct hci_rp_read_inq_rsp_tx_power *rp = data;
1152
1153         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1154
1155         if (rp->status)
1156                 return rp->status;
1157
1158         hdev->inq_tx_power = rp->tx_power;
1159
1160         return rp->status;
1161 }
1162
1163 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1164                                              struct sk_buff *skb)
1165 {
1166         struct hci_rp_read_def_err_data_reporting *rp = data;
1167
1168         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1169
1170         if (rp->status)
1171                 return rp->status;
1172
1173         hdev->err_data_reporting = rp->err_data_reporting;
1174
1175         return rp->status;
1176 }
1177
1178 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1179                                               struct sk_buff *skb)
1180 {
1181         struct hci_ev_status *rp = data;
1182         struct hci_cp_write_def_err_data_reporting *cp;
1183
1184         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1185
1186         if (rp->status)
1187                 return rp->status;
1188
1189         cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1190         if (!cp)
1191                 return rp->status;
1192
1193         hdev->err_data_reporting = cp->err_data_reporting;
1194
1195         return rp->status;
1196 }
1197
1198 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1199                                 struct sk_buff *skb)
1200 {
1201         struct hci_rp_pin_code_reply *rp = data;
1202         struct hci_cp_pin_code_reply *cp;
1203         struct hci_conn *conn;
1204
1205         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1206
1207         hci_dev_lock(hdev);
1208
1209         if (hci_dev_test_flag(hdev, HCI_MGMT))
1210                 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1211
1212         if (rp->status)
1213                 goto unlock;
1214
1215         cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1216         if (!cp)
1217                 goto unlock;
1218
1219         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1220         if (conn)
1221                 conn->pin_length = cp->pin_len;
1222
1223 unlock:
1224         hci_dev_unlock(hdev);
1225         return rp->status;
1226 }
1227
1228 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1229                                     struct sk_buff *skb)
1230 {
1231         struct hci_rp_pin_code_neg_reply *rp = data;
1232
1233         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1234
1235         hci_dev_lock(hdev);
1236
1237         if (hci_dev_test_flag(hdev, HCI_MGMT))
1238                 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1239                                                  rp->status);
1240
1241         hci_dev_unlock(hdev);
1242
1243         return rp->status;
1244 }
1245
1246 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1247                                      struct sk_buff *skb)
1248 {
1249         struct hci_rp_le_read_buffer_size *rp = data;
1250
1251         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1252
1253         if (rp->status)
1254                 return rp->status;
1255
1256         hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1257         hdev->le_pkts = rp->le_max_pkt;
1258
1259         hdev->le_cnt = hdev->le_pkts;
1260
1261         BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1262
1263         return rp->status;
1264 }
1265
1266 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1267                                         struct sk_buff *skb)
1268 {
1269         struct hci_rp_le_read_local_features *rp = data;
1270
1271         BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1272
1273         if (rp->status)
1274                 return rp->status;
1275
1276         memcpy(hdev->le_features, rp->features, 8);
1277
1278         return rp->status;
1279 }
1280
1281 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1282                                       struct sk_buff *skb)
1283 {
1284         struct hci_rp_le_read_adv_tx_power *rp = data;
1285
1286         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1287
1288         if (rp->status)
1289                 return rp->status;
1290
1291         hdev->adv_tx_power = rp->tx_power;
1292
1293         return rp->status;
1294 }
1295
1296 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1297                                     struct sk_buff *skb)
1298 {
1299         struct hci_rp_user_confirm_reply *rp = data;
1300
1301         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1302
1303         hci_dev_lock(hdev);
1304
1305         if (hci_dev_test_flag(hdev, HCI_MGMT))
1306                 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1307                                                  rp->status);
1308
1309         hci_dev_unlock(hdev);
1310
1311         return rp->status;
1312 }
1313
1314 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1315                                         struct sk_buff *skb)
1316 {
1317         struct hci_rp_user_confirm_reply *rp = data;
1318
1319         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1320
1321         hci_dev_lock(hdev);
1322
1323         if (hci_dev_test_flag(hdev, HCI_MGMT))
1324                 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1325                                                      ACL_LINK, 0, rp->status);
1326
1327         hci_dev_unlock(hdev);
1328
1329         return rp->status;
1330 }
1331
1332 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1333                                     struct sk_buff *skb)
1334 {
1335         struct hci_rp_user_confirm_reply *rp = data;
1336
1337         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1338
1339         hci_dev_lock(hdev);
1340
1341         if (hci_dev_test_flag(hdev, HCI_MGMT))
1342                 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1343                                                  0, rp->status);
1344
1345         hci_dev_unlock(hdev);
1346
1347         return rp->status;
1348 }
1349
1350 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1351                                         struct sk_buff *skb)
1352 {
1353         struct hci_rp_user_confirm_reply *rp = data;
1354
1355         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1356
1357         hci_dev_lock(hdev);
1358
1359         if (hci_dev_test_flag(hdev, HCI_MGMT))
1360                 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1361                                                      ACL_LINK, 0, rp->status);
1362
1363         hci_dev_unlock(hdev);
1364
1365         return rp->status;
1366 }
1367
1368 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1369                                      struct sk_buff *skb)
1370 {
1371         struct hci_rp_read_local_oob_data *rp = data;
1372
1373         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1374
1375         return rp->status;
1376 }
1377
1378 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1379                                          struct sk_buff *skb)
1380 {
1381         struct hci_rp_read_local_oob_ext_data *rp = data;
1382
1383         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1384
1385         return rp->status;
1386 }
1387
1388 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1389                                     struct sk_buff *skb)
1390 {
1391         struct hci_ev_status *rp = data;
1392         bdaddr_t *sent;
1393
1394         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1395
1396         if (rp->status)
1397                 return rp->status;
1398
1399         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1400         if (!sent)
1401                 return rp->status;
1402
1403         hci_dev_lock(hdev);
1404
1405         bacpy(&hdev->random_addr, sent);
1406
1407         if (!bacmp(&hdev->rpa, sent)) {
1408                 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1409                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1410                                    secs_to_jiffies(hdev->rpa_timeout));
1411         }
1412
1413         hci_dev_unlock(hdev);
1414
1415         return rp->status;
1416 }
1417
1418 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1419                                     struct sk_buff *skb)
1420 {
1421         struct hci_ev_status *rp = data;
1422         struct hci_cp_le_set_default_phy *cp;
1423
1424         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1425
1426         if (rp->status)
1427                 return rp->status;
1428
1429         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1430         if (!cp)
1431                 return rp->status;
1432
1433         hci_dev_lock(hdev);
1434
1435         hdev->le_tx_def_phys = cp->tx_phys;
1436         hdev->le_rx_def_phys = cp->rx_phys;
1437
1438         hci_dev_unlock(hdev);
1439
1440         return rp->status;
1441 }
1442
1443 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1444                                             struct sk_buff *skb)
1445 {
1446         struct hci_ev_status *rp = data;
1447         struct hci_cp_le_set_adv_set_rand_addr *cp;
1448         struct adv_info *adv;
1449
1450         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1451
1452         if (rp->status)
1453                 return rp->status;
1454
1455         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1456         /* Update only in case the adv instance since handle 0x00 shall be using
1457          * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1458          * non-extended adverting.
1459          */
1460         if (!cp || !cp->handle)
1461                 return rp->status;
1462
1463         hci_dev_lock(hdev);
1464
1465         adv = hci_find_adv_instance(hdev, cp->handle);
1466         if (adv) {
1467                 bacpy(&adv->random_addr, &cp->bdaddr);
1468                 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1469                         adv->rpa_expired = false;
1470                         queue_delayed_work(hdev->workqueue,
1471                                            &adv->rpa_expired_cb,
1472                                            secs_to_jiffies(hdev->rpa_timeout));
1473                 }
1474         }
1475
1476         hci_dev_unlock(hdev);
1477
1478         return rp->status;
1479 }
1480
1481 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1482                                    struct sk_buff *skb)
1483 {
1484         struct hci_ev_status *rp = data;
1485         u8 *instance;
1486         int err;
1487
1488         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1489
1490         if (rp->status)
1491                 return rp->status;
1492
1493         instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1494         if (!instance)
1495                 return rp->status;
1496
1497         hci_dev_lock(hdev);
1498
1499         err = hci_remove_adv_instance(hdev, *instance);
1500         if (!err)
1501                 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1502                                          *instance);
1503
1504         hci_dev_unlock(hdev);
1505
1506         return rp->status;
1507 }
1508
1509 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1510                                    struct sk_buff *skb)
1511 {
1512         struct hci_ev_status *rp = data;
1513         struct adv_info *adv, *n;
1514         int err;
1515
1516         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1517
1518         if (rp->status)
1519                 return rp->status;
1520
1521         if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1522                 return rp->status;
1523
1524         hci_dev_lock(hdev);
1525
1526         list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1527                 u8 instance = adv->instance;
1528
1529                 err = hci_remove_adv_instance(hdev, instance);
1530                 if (!err)
1531                         mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1532                                                  hdev, instance);
1533         }
1534
1535         hci_dev_unlock(hdev);
1536
1537         return rp->status;
1538 }
1539
1540 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1541                                         struct sk_buff *skb)
1542 {
1543         struct hci_rp_le_read_transmit_power *rp = data;
1544
1545         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1546
1547         if (rp->status)
1548                 return rp->status;
1549
1550         hdev->min_le_tx_power = rp->min_le_tx_power;
1551         hdev->max_le_tx_power = rp->max_le_tx_power;
1552
1553         return rp->status;
1554 }
1555
1556 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1557                                      struct sk_buff *skb)
1558 {
1559         struct hci_ev_status *rp = data;
1560         struct hci_cp_le_set_privacy_mode *cp;
1561         struct hci_conn_params *params;
1562
1563         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1564
1565         if (rp->status)
1566                 return rp->status;
1567
1568         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1569         if (!cp)
1570                 return rp->status;
1571
1572         hci_dev_lock(hdev);
1573
1574         params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1575         if (params)
1576                 params->privacy_mode = cp->mode;
1577
1578         hci_dev_unlock(hdev);
1579
1580         return rp->status;
1581 }
1582
1583 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1584                                    struct sk_buff *skb)
1585 {
1586         struct hci_ev_status *rp = data;
1587         __u8 *sent;
1588
1589         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1590
1591         if (rp->status)
1592                 return rp->status;
1593
1594         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1595         if (!sent)
1596                 return rp->status;
1597
1598         hci_dev_lock(hdev);
1599
1600         /* If we're doing connection initiation as peripheral. Set a
1601          * timeout in case something goes wrong.
1602          */
1603         if (*sent) {
1604                 struct hci_conn *conn;
1605
1606                 hci_dev_set_flag(hdev, HCI_LE_ADV);
1607
1608                 conn = hci_lookup_le_connect(hdev);
1609                 if (conn)
1610                         queue_delayed_work(hdev->workqueue,
1611                                            &conn->le_conn_timeout,
1612                                            conn->conn_timeout);
1613         } else {
1614                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1615         }
1616
1617         hci_dev_unlock(hdev);
1618
1619         return rp->status;
1620 }
1621
1622 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1623                                        struct sk_buff *skb)
1624 {
1625         struct hci_cp_le_set_ext_adv_enable *cp;
1626         struct hci_cp_ext_adv_set *set;
1627         struct adv_info *adv = NULL, *n;
1628         struct hci_ev_status *rp = data;
1629
1630         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1631
1632         if (rp->status)
1633                 return rp->status;
1634
1635         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1636         if (!cp)
1637                 return rp->status;
1638
1639         set = (void *)cp->data;
1640
1641         hci_dev_lock(hdev);
1642
1643         if (cp->num_of_sets)
1644                 adv = hci_find_adv_instance(hdev, set->handle);
1645
1646         if (cp->enable) {
1647                 struct hci_conn *conn;
1648
1649                 hci_dev_set_flag(hdev, HCI_LE_ADV);
1650
1651                 if (adv)
1652                         adv->enabled = true;
1653
1654                 conn = hci_lookup_le_connect(hdev);
1655                 if (conn)
1656                         queue_delayed_work(hdev->workqueue,
1657                                            &conn->le_conn_timeout,
1658                                            conn->conn_timeout);
1659         } else {
1660                 if (cp->num_of_sets) {
1661                         if (adv)
1662                                 adv->enabled = false;
1663
1664                         /* If just one instance was disabled check if there are
1665                          * any other instance enabled before clearing HCI_LE_ADV
1666                          */
1667                         list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1668                                                  list) {
1669                                 if (adv->enabled)
1670                                         goto unlock;
1671                         }
1672                 } else {
1673                         /* All instances shall be considered disabled */
1674                         list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1675                                                  list)
1676                                 adv->enabled = false;
1677                 }
1678
1679                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1680         }
1681
1682 unlock:
1683         hci_dev_unlock(hdev);
1684         return rp->status;
1685 }
1686
1687 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1688                                    struct sk_buff *skb)
1689 {
1690         struct hci_cp_le_set_scan_param *cp;
1691         struct hci_ev_status *rp = data;
1692
1693         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1694
1695         if (rp->status)
1696                 return rp->status;
1697
1698         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1699         if (!cp)
1700                 return rp->status;
1701
1702         hci_dev_lock(hdev);
1703
1704         hdev->le_scan_type = cp->type;
1705
1706         hci_dev_unlock(hdev);
1707
1708         return rp->status;
1709 }
1710
1711 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1712                                        struct sk_buff *skb)
1713 {
1714         struct hci_cp_le_set_ext_scan_params *cp;
1715         struct hci_ev_status *rp = data;
1716         struct hci_cp_le_scan_phy_params *phy_param;
1717
1718         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1719
1720         if (rp->status)
1721                 return rp->status;
1722
1723         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1724         if (!cp)
1725                 return rp->status;
1726
1727         phy_param = (void *)cp->data;
1728
1729         hci_dev_lock(hdev);
1730
1731         hdev->le_scan_type = phy_param->type;
1732
1733         hci_dev_unlock(hdev);
1734
1735         return rp->status;
1736 }
1737
1738 static bool has_pending_adv_report(struct hci_dev *hdev)
1739 {
1740         struct discovery_state *d = &hdev->discovery;
1741
1742         return bacmp(&d->last_adv_addr, BDADDR_ANY);
1743 }
1744
1745 static void clear_pending_adv_report(struct hci_dev *hdev)
1746 {
1747         struct discovery_state *d = &hdev->discovery;
1748
1749         bacpy(&d->last_adv_addr, BDADDR_ANY);
1750         d->last_adv_data_len = 0;
1751 }
1752
1753 #ifndef TIZEN_BT
1754 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1755                                      u8 bdaddr_type, s8 rssi, u32 flags,
1756                                      u8 *data, u8 len)
1757 {
1758         struct discovery_state *d = &hdev->discovery;
1759
1760         if (len > HCI_MAX_AD_LENGTH)
1761                 return;
1762
1763         bacpy(&d->last_adv_addr, bdaddr);
1764         d->last_adv_addr_type = bdaddr_type;
1765         d->last_adv_rssi = rssi;
1766         d->last_adv_flags = flags;
1767         memcpy(d->last_adv_data, data, len);
1768         d->last_adv_data_len = len;
1769 }
1770 #endif
1771
1772 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1773 {
1774         hci_dev_lock(hdev);
1775
1776         switch (enable) {
1777         case LE_SCAN_ENABLE:
1778                 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1779                 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1780                         clear_pending_adv_report(hdev);
1781                 if (hci_dev_test_flag(hdev, HCI_MESH))
1782                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1783                 break;
1784
1785         case LE_SCAN_DISABLE:
1786                 /* We do this here instead of when setting DISCOVERY_STOPPED
1787                  * since the latter would potentially require waiting for
1788                  * inquiry to stop too.
1789                  */
1790                 if (has_pending_adv_report(hdev)) {
1791                         struct discovery_state *d = &hdev->discovery;
1792
1793                         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1794                                           d->last_adv_addr_type, NULL,
1795                                           d->last_adv_rssi, d->last_adv_flags,
1796                                           d->last_adv_data,
1797                                           d->last_adv_data_len, NULL, 0, 0);
1798                 }
1799
1800                 /* Cancel this timer so that we don't try to disable scanning
1801                  * when it's already disabled.
1802                  */
1803                 cancel_delayed_work(&hdev->le_scan_disable);
1804
1805                 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1806
1807                 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1808                  * interrupted scanning due to a connect request. Mark
1809                  * therefore discovery as stopped.
1810                  */
1811                 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1812 #ifndef TIZEN_BT /* The below line is kernel bug. */
1813                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1814 #else
1815                         hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
1816 #endif
1817                 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1818                          hdev->discovery.state == DISCOVERY_FINDING)
1819                         queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1820
1821                 break;
1822
1823         default:
1824                 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1825                            enable);
1826                 break;
1827         }
1828
1829         hci_dev_unlock(hdev);
1830 }
1831
1832 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1833                                     struct sk_buff *skb)
1834 {
1835         struct hci_cp_le_set_scan_enable *cp;
1836         struct hci_ev_status *rp = data;
1837
1838         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1839
1840         if (rp->status)
1841                 return rp->status;
1842
1843         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1844         if (!cp)
1845                 return rp->status;
1846
1847         le_set_scan_enable_complete(hdev, cp->enable);
1848
1849         return rp->status;
1850 }
1851
1852 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1853                                         struct sk_buff *skb)
1854 {
1855         struct hci_cp_le_set_ext_scan_enable *cp;
1856         struct hci_ev_status *rp = data;
1857
1858         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1859
1860         if (rp->status)
1861                 return rp->status;
1862
1863         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1864         if (!cp)
1865                 return rp->status;
1866
1867         le_set_scan_enable_complete(hdev, cp->enable);
1868
1869         return rp->status;
1870 }
1871
1872 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1873                                       struct sk_buff *skb)
1874 {
1875         struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1876
1877         bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1878                    rp->num_of_sets);
1879
1880         if (rp->status)
1881                 return rp->status;
1882
1883         hdev->le_num_of_adv_sets = rp->num_of_sets;
1884
1885         return rp->status;
1886 }
1887
1888 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1889                                           struct sk_buff *skb)
1890 {
1891         struct hci_rp_le_read_accept_list_size *rp = data;
1892
1893         bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1894
1895         if (rp->status)
1896                 return rp->status;
1897
1898         hdev->le_accept_list_size = rp->size;
1899
1900         return rp->status;
1901 }
1902
1903 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1904                                       struct sk_buff *skb)
1905 {
1906         struct hci_ev_status *rp = data;
1907
1908         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1909
1910         if (rp->status)
1911                 return rp->status;
1912
1913         hci_dev_lock(hdev);
1914         hci_bdaddr_list_clear(&hdev->le_accept_list);
1915         hci_dev_unlock(hdev);
1916
1917         return rp->status;
1918 }
1919
1920 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1921                                        struct sk_buff *skb)
1922 {
1923         struct hci_cp_le_add_to_accept_list *sent;
1924         struct hci_ev_status *rp = data;
1925
1926         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1927
1928         if (rp->status)
1929                 return rp->status;
1930
1931         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1932         if (!sent)
1933                 return rp->status;
1934
1935         hci_dev_lock(hdev);
1936         hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1937                             sent->bdaddr_type);
1938         hci_dev_unlock(hdev);
1939
1940         return rp->status;
1941 }
1942
1943 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1944                                          struct sk_buff *skb)
1945 {
1946         struct hci_cp_le_del_from_accept_list *sent;
1947         struct hci_ev_status *rp = data;
1948
1949         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1950
1951         if (rp->status)
1952                 return rp->status;
1953
1954         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1955         if (!sent)
1956                 return rp->status;
1957
1958         hci_dev_lock(hdev);
1959         hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1960                             sent->bdaddr_type);
1961         hci_dev_unlock(hdev);
1962
1963         return rp->status;
1964 }
1965
1966 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1967                                           struct sk_buff *skb)
1968 {
1969         struct hci_rp_le_read_supported_states *rp = data;
1970
1971         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1972
1973         if (rp->status)
1974                 return rp->status;
1975
1976         memcpy(hdev->le_states, rp->le_states, 8);
1977
1978         return rp->status;
1979 }
1980
1981 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1982                                       struct sk_buff *skb)
1983 {
1984         struct hci_rp_le_read_def_data_len *rp = data;
1985
1986         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1987
1988 #ifdef TIZEN_BT
1989         hci_dev_lock(hdev);
1990 #else
1991         if (rp->status)
1992                 return rp->status;
1993 #endif
1994
1995         hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1996         hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1997
1998 #ifdef TIZEN_BT
1999         mgmt_le_read_host_suggested_data_length_complete(hdev, rp->status);
2000
2001         hci_dev_unlock(hdev);
2002 #endif
2003
2004         return rp->status;
2005 }
2006
2007 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
2008                                        struct sk_buff *skb)
2009 {
2010         struct hci_cp_le_write_def_data_len *sent;
2011         struct hci_ev_status *rp = data;
2012
2013         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2014
2015         if (rp->status)
2016 #ifndef TIZEN_BT
2017                 return rp->status;
2018 #else
2019                 goto unblock;
2020 #endif
2021
2022         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
2023         if (!sent)
2024 #ifndef TIZEN_BT
2025                 return rp->status;
2026 #else
2027                 goto unblock;
2028 #endif
2029
2030         hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
2031         hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
2032
2033         return rp->status;
2034 #ifdef TIZEN_BT
2035 unblock:
2036         mgmt_le_write_host_suggested_data_length_complete(hdev, rp->status);
2037         return rp->status;
2038 #endif
2039 }
2040
2041 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
2042                                        struct sk_buff *skb)
2043 {
2044         struct hci_cp_le_add_to_resolv_list *sent;
2045         struct hci_ev_status *rp = data;
2046
2047         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2048
2049         if (rp->status)
2050                 return rp->status;
2051
2052         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
2053         if (!sent)
2054                 return rp->status;
2055
2056         hci_dev_lock(hdev);
2057         hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2058                                 sent->bdaddr_type, sent->peer_irk,
2059                                 sent->local_irk);
2060         hci_dev_unlock(hdev);
2061
2062         return rp->status;
2063 }
2064
2065 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2066                                          struct sk_buff *skb)
2067 {
2068         struct hci_cp_le_del_from_resolv_list *sent;
2069         struct hci_ev_status *rp = data;
2070
2071         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2072
2073         if (rp->status)
2074                 return rp->status;
2075
2076         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2077         if (!sent)
2078                 return rp->status;
2079
2080         hci_dev_lock(hdev);
2081         hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2082                             sent->bdaddr_type);
2083         hci_dev_unlock(hdev);
2084
2085         return rp->status;
2086 }
2087
2088 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2089                                       struct sk_buff *skb)
2090 {
2091         struct hci_ev_status *rp = data;
2092
2093         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2094
2095         if (rp->status)
2096                 return rp->status;
2097
2098         hci_dev_lock(hdev);
2099         hci_bdaddr_list_clear(&hdev->le_resolv_list);
2100         hci_dev_unlock(hdev);
2101
2102         return rp->status;
2103 }
2104
2105 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2106                                           struct sk_buff *skb)
2107 {
2108         struct hci_rp_le_read_resolv_list_size *rp = data;
2109
2110         bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2111
2112         if (rp->status)
2113                 return rp->status;
2114
2115         hdev->le_resolv_list_size = rp->size;
2116
2117         return rp->status;
2118 }
2119
2120 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2121                                                struct sk_buff *skb)
2122 {
2123         struct hci_ev_status *rp = data;
2124         __u8 *sent;
2125
2126         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2127
2128         if (rp->status)
2129                 return rp->status;
2130
2131         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2132         if (!sent)
2133                 return rp->status;
2134
2135         hci_dev_lock(hdev);
2136
2137         if (*sent)
2138                 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2139         else
2140                 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2141
2142         hci_dev_unlock(hdev);
2143
2144         return rp->status;
2145 }
2146
2147 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2148                                       struct sk_buff *skb)
2149 {
2150         struct hci_rp_le_read_max_data_len *rp = data;
2151
2152         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2153
2154 #ifndef TIZEN_BT
2155         if (rp->status)
2156                 return rp->status;
2157 #else
2158         hci_dev_lock(hdev);
2159 #endif
2160
2161         hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2162         hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2163         hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2164         hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2165
2166 #ifdef TIZEN_BT
2167         mgmt_le_read_maximum_data_length_complete(hdev, rp->status);
2168         hci_dev_unlock(hdev);
2169 #endif
2170
2171         return rp->status;
2172 }
2173
2174 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2175                                          struct sk_buff *skb)
2176 {
2177         struct hci_cp_write_le_host_supported *sent;
2178         struct hci_ev_status *rp = data;
2179
2180         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2181
2182         if (rp->status)
2183                 return rp->status;
2184
2185         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2186         if (!sent)
2187                 return rp->status;
2188
2189         hci_dev_lock(hdev);
2190
2191         if (sent->le) {
2192                 hdev->features[1][0] |= LMP_HOST_LE;
2193                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2194         } else {
2195                 hdev->features[1][0] &= ~LMP_HOST_LE;
2196                 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2197                 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2198         }
2199
2200         if (sent->simul)
2201                 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2202         else
2203                 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2204
2205         hci_dev_unlock(hdev);
2206
2207         return rp->status;
2208 }
2209
2210 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2211                                struct sk_buff *skb)
2212 {
2213         struct hci_cp_le_set_adv_param *cp;
2214         struct hci_ev_status *rp = data;
2215
2216         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2217
2218         if (rp->status)
2219                 return rp->status;
2220
2221         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2222         if (!cp)
2223                 return rp->status;
2224
2225         hci_dev_lock(hdev);
2226         hdev->adv_addr_type = cp->own_address_type;
2227         hci_dev_unlock(hdev);
2228
2229         return rp->status;
2230 }
2231
2232 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2233                                    struct sk_buff *skb)
2234 {
2235         struct hci_rp_le_set_ext_adv_params *rp = data;
2236         struct hci_cp_le_set_ext_adv_params *cp;
2237         struct adv_info *adv_instance;
2238
2239         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2240
2241         if (rp->status)
2242                 return rp->status;
2243
2244         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2245         if (!cp)
2246                 return rp->status;
2247
2248         hci_dev_lock(hdev);
2249         hdev->adv_addr_type = cp->own_addr_type;
2250         if (!cp->handle) {
2251                 /* Store in hdev for instance 0 */
2252                 hdev->adv_tx_power = rp->tx_power;
2253         } else {
2254                 adv_instance = hci_find_adv_instance(hdev, cp->handle);
2255                 if (adv_instance)
2256                         adv_instance->tx_power = rp->tx_power;
2257         }
2258         /* Update adv data as tx power is known now */
2259         hci_update_adv_data(hdev, cp->handle);
2260
2261         hci_dev_unlock(hdev);
2262
2263         return rp->status;
2264 }
2265
2266 #ifdef TIZEN_BT
2267 static u8 hci_cc_enable_rssi(struct hci_dev *hdev, void *data,
2268                              struct sk_buff *skb)
2269 {
2270         struct hci_cc_rsp_enable_rssi *rp = data;
2271
2272         BT_DBG("hci_cc_enable_rssi - %s status 0x%2.2x Event_LE_ext_Opcode 0x%2.2x",
2273                hdev->name, rp->status, rp->le_ext_opcode);
2274
2275         mgmt_enable_rssi_cc(hdev, rp, rp->status);
2276
2277         return rp->status;
2278 }
2279
2280 static u8 hci_cc_get_raw_rssi(struct hci_dev *hdev, void *data,
2281                               struct sk_buff *skb)
2282 {
2283         struct hci_cc_rp_get_raw_rssi *rp = data;
2284
2285         BT_DBG("hci_cc_get_raw_rssi- %s Get Raw Rssi Response[%2.2x %4.4x %2.2X]",
2286                hdev->name, rp->status, rp->conn_handle, rp->rssi_dbm);
2287
2288         mgmt_raw_rssi_response(hdev, rp, rp->status);
2289
2290         return rp->status;
2291 }
2292
2293 static void hci_vendor_ext_rssi_link_alert_evt(struct hci_dev *hdev,
2294                                                struct sk_buff *skb)
2295 {
2296         struct hci_ev_vendor_specific_rssi_alert *ev = (void *)skb->data;
2297
2298         BT_DBG("RSSI event LE_RSSI_LINK_ALERT %X", LE_RSSI_LINK_ALERT);
2299
2300         mgmt_rssi_alert_evt(hdev, ev->conn_handle, ev->alert_type,
2301                             ev->rssi_dbm);
2302 }
2303
2304 static void hci_vendor_specific_group_ext_evt(struct hci_dev *hdev,
2305                                               struct sk_buff *skb)
2306 {
2307         struct hci_ev_ext_vendor_specific *ev = (void *)skb->data;
2308         __u8 event_le_ext_sub_code;
2309
2310         BT_DBG("RSSI event LE_META_VENDOR_SPECIFIC_GROUP_EVENT: %X",
2311                LE_META_VENDOR_SPECIFIC_GROUP_EVENT);
2312
2313         skb_pull(skb, sizeof(*ev));
2314         event_le_ext_sub_code = ev->event_le_ext_sub_code;
2315
2316         switch (event_le_ext_sub_code) {
2317         case LE_RSSI_LINK_ALERT:
2318                 hci_vendor_ext_rssi_link_alert_evt(hdev, skb);
2319                 break;
2320
2321         default:
2322                 break;
2323         }
2324 }
2325
2326 static void hci_vendor_multi_adv_state_change_evt(struct hci_dev *hdev,
2327                                                   struct sk_buff *skb)
2328 {
2329         struct hci_ev_vendor_specific_multi_adv_state *ev = (void *)skb->data;
2330
2331         BT_DBG("LE_MULTI_ADV_STATE_CHANGE_SUB_EVENT");
2332
2333         mgmt_multi_adv_state_change_evt(hdev, ev->adv_instance,
2334                                         ev->state_change_reason,
2335                                         ev->connection_handle);
2336 }
2337
2338 static void hci_vendor_specific_evt(struct hci_dev *hdev, void *data,
2339                                     struct sk_buff *skb)
2340 {
2341         struct hci_ev_vendor_specific *ev = (void *)skb->data;
2342         __u8 event_sub_code;
2343
2344         BT_DBG("hci_vendor_specific_evt");
2345
2346         skb_pull(skb, sizeof(*ev));
2347         event_sub_code = ev->event_sub_code;
2348
2349         switch (event_sub_code) {
2350         case LE_META_VENDOR_SPECIFIC_GROUP_EVENT:
2351                 hci_vendor_specific_group_ext_evt(hdev, skb);
2352                 break;
2353
2354         case LE_MULTI_ADV_STATE_CHANGE_SUB_EVENT:
2355                 hci_vendor_multi_adv_state_change_evt(hdev, skb);
2356                 break;
2357
2358         default:
2359                 break;
2360         }
2361 }
2362 #endif
2363
2364 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2365                            struct sk_buff *skb)
2366 {
2367         struct hci_rp_read_rssi *rp = data;
2368         struct hci_conn *conn;
2369
2370         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2371
2372         if (rp->status)
2373                 return rp->status;
2374
2375         hci_dev_lock(hdev);
2376
2377         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2378         if (conn)
2379                 conn->rssi = rp->rssi;
2380
2381         hci_dev_unlock(hdev);
2382
2383         return rp->status;
2384 }
2385
2386 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2387                                struct sk_buff *skb)
2388 {
2389         struct hci_cp_read_tx_power *sent;
2390         struct hci_rp_read_tx_power *rp = data;
2391         struct hci_conn *conn;
2392
2393         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2394
2395         if (rp->status)
2396                 return rp->status;
2397
2398         sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2399         if (!sent)
2400                 return rp->status;
2401
2402         hci_dev_lock(hdev);
2403
2404         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2405         if (!conn)
2406                 goto unlock;
2407
2408         switch (sent->type) {
2409         case 0x00:
2410                 conn->tx_power = rp->tx_power;
2411                 break;
2412         case 0x01:
2413                 conn->max_tx_power = rp->tx_power;
2414                 break;
2415         }
2416
2417 unlock:
2418         hci_dev_unlock(hdev);
2419         return rp->status;
2420 }
2421
2422 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2423                                       struct sk_buff *skb)
2424 {
2425         struct hci_ev_status *rp = data;
2426         u8 *mode;
2427
2428         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2429
2430         if (rp->status)
2431                 return rp->status;
2432
2433         mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2434         if (mode)
2435                 hdev->ssp_debug_mode = *mode;
2436
2437         return rp->status;
2438 }
2439
2440 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2441 {
2442         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2443
2444         if (status) {
2445                 hci_conn_check_pending(hdev);
2446                 return;
2447         }
2448
2449         set_bit(HCI_INQUIRY, &hdev->flags);
2450 }
2451
2452 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2453 {
2454         struct hci_cp_create_conn *cp;
2455         struct hci_conn *conn;
2456
2457         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2458
2459         cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2460         if (!cp)
2461                 return;
2462
2463         hci_dev_lock(hdev);
2464
2465         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2466
2467         bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2468
2469         if (status) {
2470                 if (conn && conn->state == BT_CONNECT) {
2471                         if (status != 0x0c || conn->attempt > 2) {
2472                                 conn->state = BT_CLOSED;
2473                                 hci_connect_cfm(conn, status);
2474                                 hci_conn_del(conn);
2475                         } else
2476                                 conn->state = BT_CONNECT2;
2477                 }
2478         } else {
2479                 if (!conn) {
2480                         conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
2481                                             HCI_ROLE_MASTER);
2482                         if (!conn)
2483                                 bt_dev_err(hdev, "no memory for new connection");
2484                 }
2485         }
2486
2487         hci_dev_unlock(hdev);
2488 }
2489
2490 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2491 {
2492         struct hci_cp_add_sco *cp;
2493         struct hci_conn *acl, *sco;
2494         __u16 handle;
2495
2496         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2497
2498         if (!status)
2499                 return;
2500
2501         cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2502         if (!cp)
2503                 return;
2504
2505         handle = __le16_to_cpu(cp->handle);
2506
2507         bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2508
2509         hci_dev_lock(hdev);
2510
2511         acl = hci_conn_hash_lookup_handle(hdev, handle);
2512         if (acl) {
2513                 sco = acl->link;
2514                 if (sco) {
2515                         sco->state = BT_CLOSED;
2516
2517                         hci_connect_cfm(sco, status);
2518                         hci_conn_del(sco);
2519                 }
2520         }
2521
2522         hci_dev_unlock(hdev);
2523 }
2524
2525 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2526 {
2527         struct hci_cp_auth_requested *cp;
2528         struct hci_conn *conn;
2529
2530         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2531
2532         if (!status)
2533                 return;
2534
2535         cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2536         if (!cp)
2537                 return;
2538
2539         hci_dev_lock(hdev);
2540
2541         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2542         if (conn) {
2543                 if (conn->state == BT_CONFIG) {
2544                         hci_connect_cfm(conn, status);
2545                         hci_conn_drop(conn);
2546                 }
2547         }
2548
2549         hci_dev_unlock(hdev);
2550 }
2551
2552 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2553 {
2554         struct hci_cp_set_conn_encrypt *cp;
2555         struct hci_conn *conn;
2556
2557         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2558
2559         if (!status)
2560                 return;
2561
2562         cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2563         if (!cp)
2564                 return;
2565
2566         hci_dev_lock(hdev);
2567
2568         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2569         if (conn) {
2570                 if (conn->state == BT_CONFIG) {
2571                         hci_connect_cfm(conn, status);
2572                         hci_conn_drop(conn);
2573                 }
2574         }
2575
2576         hci_dev_unlock(hdev);
2577 }
2578
2579 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2580                                     struct hci_conn *conn)
2581 {
2582         if (conn->state != BT_CONFIG || !conn->out)
2583                 return 0;
2584
2585         if (conn->pending_sec_level == BT_SECURITY_SDP)
2586                 return 0;
2587
2588         /* Only request authentication for SSP connections or non-SSP
2589          * devices with sec_level MEDIUM or HIGH or if MITM protection
2590          * is requested.
2591          */
2592         if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2593             conn->pending_sec_level != BT_SECURITY_FIPS &&
2594             conn->pending_sec_level != BT_SECURITY_HIGH &&
2595             conn->pending_sec_level != BT_SECURITY_MEDIUM)
2596                 return 0;
2597
2598         return 1;
2599 }
2600
2601 static int hci_resolve_name(struct hci_dev *hdev,
2602                                    struct inquiry_entry *e)
2603 {
2604         struct hci_cp_remote_name_req cp;
2605
2606         memset(&cp, 0, sizeof(cp));
2607
2608         bacpy(&cp.bdaddr, &e->data.bdaddr);
2609         cp.pscan_rep_mode = e->data.pscan_rep_mode;
2610         cp.pscan_mode = e->data.pscan_mode;
2611         cp.clock_offset = e->data.clock_offset;
2612
2613         return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2614 }
2615
2616 static bool hci_resolve_next_name(struct hci_dev *hdev)
2617 {
2618         struct discovery_state *discov = &hdev->discovery;
2619         struct inquiry_entry *e;
2620
2621         if (list_empty(&discov->resolve))
2622                 return false;
2623
2624         /* We should stop if we already spent too much time resolving names. */
2625         if (time_after(jiffies, discov->name_resolve_timeout)) {
2626                 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2627                 return false;
2628         }
2629
2630         e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2631         if (!e)
2632                 return false;
2633
2634         if (hci_resolve_name(hdev, e) == 0) {
2635                 e->name_state = NAME_PENDING;
2636                 return true;
2637         }
2638
2639         return false;
2640 }
2641
2642 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2643                                    bdaddr_t *bdaddr, u8 *name, u8 name_len)
2644 {
2645         struct discovery_state *discov = &hdev->discovery;
2646         struct inquiry_entry *e;
2647
2648 #ifdef TIZEN_BT
2649         /* Update the mgmt connected state if necessary. Be careful with
2650          * conn objects that exist but are not (yet) connected however.
2651          * Only those in BT_CONFIG or BT_CONNECTED states can be
2652          * considered connected.
2653          */
2654         if (conn &&
2655             (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) {
2656                 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2657                         mgmt_device_connected(hdev, conn, 0, name, name_len);
2658                 else
2659                         mgmt_device_name_update(hdev, bdaddr, name, name_len);
2660         }
2661 #else
2662         if (conn &&
2663             (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2664             !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2665                 mgmt_device_connected(hdev, conn, name, name_len);
2666 #endif
2667
2668         if (discov->state == DISCOVERY_STOPPED)
2669                 return;
2670
2671         if (discov->state == DISCOVERY_STOPPING)
2672                 goto discov_complete;
2673
2674         if (discov->state != DISCOVERY_RESOLVING)
2675                 return;
2676
2677         e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2678         /* If the device was not found in a list of found devices names of which
2679          * are pending. there is no need to continue resolving a next name as it
2680          * will be done upon receiving another Remote Name Request Complete
2681          * Event */
2682         if (!e)
2683                 return;
2684
2685         list_del(&e->list);
2686
2687         e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2688         mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2689                          name, name_len);
2690
2691         if (hci_resolve_next_name(hdev))
2692                 return;
2693
2694 discov_complete:
2695         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2696 }
2697
2698 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2699 {
2700         struct hci_cp_remote_name_req *cp;
2701         struct hci_conn *conn;
2702
2703         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2704
2705         /* If successful wait for the name req complete event before
2706          * checking for the need to do authentication */
2707         if (!status)
2708                 return;
2709
2710         cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2711         if (!cp)
2712                 return;
2713
2714         hci_dev_lock(hdev);
2715
2716         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2717
2718         if (hci_dev_test_flag(hdev, HCI_MGMT))
2719                 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2720
2721         if (!conn)
2722                 goto unlock;
2723
2724         if (!hci_outgoing_auth_needed(hdev, conn))
2725                 goto unlock;
2726
2727         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2728                 struct hci_cp_auth_requested auth_cp;
2729
2730                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2731
2732                 auth_cp.handle = __cpu_to_le16(conn->handle);
2733                 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2734                              sizeof(auth_cp), &auth_cp);
2735         }
2736
2737 unlock:
2738         hci_dev_unlock(hdev);
2739 }
2740
2741 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2742 {
2743         struct hci_cp_read_remote_features *cp;
2744         struct hci_conn *conn;
2745
2746         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2747
2748         if (!status)
2749                 return;
2750
2751         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2752         if (!cp)
2753                 return;
2754
2755         hci_dev_lock(hdev);
2756
2757         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2758         if (conn) {
2759                 if (conn->state == BT_CONFIG) {
2760                         hci_connect_cfm(conn, status);
2761                         hci_conn_drop(conn);
2762                 }
2763         }
2764
2765         hci_dev_unlock(hdev);
2766 }
2767
2768 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2769 {
2770         struct hci_cp_read_remote_ext_features *cp;
2771         struct hci_conn *conn;
2772
2773         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2774
2775         if (!status)
2776                 return;
2777
2778         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2779         if (!cp)
2780                 return;
2781
2782         hci_dev_lock(hdev);
2783
2784         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2785         if (conn) {
2786                 if (conn->state == BT_CONFIG) {
2787                         hci_connect_cfm(conn, status);
2788                         hci_conn_drop(conn);
2789                 }
2790         }
2791
2792         hci_dev_unlock(hdev);
2793 }
2794
2795 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2796 {
2797         struct hci_cp_setup_sync_conn *cp;
2798         struct hci_conn *acl, *sco;
2799         __u16 handle;
2800
2801         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2802
2803         if (!status)
2804                 return;
2805
2806         cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2807         if (!cp)
2808                 return;
2809
2810         handle = __le16_to_cpu(cp->handle);
2811
2812         bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2813
2814         hci_dev_lock(hdev);
2815
2816         acl = hci_conn_hash_lookup_handle(hdev, handle);
2817         if (acl) {
2818                 sco = acl->link;
2819                 if (sco) {
2820                         sco->state = BT_CLOSED;
2821
2822                         hci_connect_cfm(sco, status);
2823                         hci_conn_del(sco);
2824                 }
2825         }
2826
2827         hci_dev_unlock(hdev);
2828 }
2829
2830 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2831 {
2832         struct hci_cp_enhanced_setup_sync_conn *cp;
2833         struct hci_conn *acl, *sco;
2834         __u16 handle;
2835
2836         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2837
2838         if (!status)
2839                 return;
2840
2841         cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2842         if (!cp)
2843                 return;
2844
2845         handle = __le16_to_cpu(cp->handle);
2846
2847         bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2848
2849         hci_dev_lock(hdev);
2850
2851         acl = hci_conn_hash_lookup_handle(hdev, handle);
2852         if (acl) {
2853                 sco = acl->link;
2854                 if (sco) {
2855                         sco->state = BT_CLOSED;
2856
2857                         hci_connect_cfm(sco, status);
2858                         hci_conn_del(sco);
2859                 }
2860         }
2861
2862         hci_dev_unlock(hdev);
2863 }
2864
2865 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2866 {
2867         struct hci_cp_sniff_mode *cp;
2868         struct hci_conn *conn;
2869
2870         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2871
2872         if (!status)
2873                 return;
2874
2875         cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2876         if (!cp)
2877                 return;
2878
2879         hci_dev_lock(hdev);
2880
2881         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2882         if (conn) {
2883                 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2884
2885                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2886                         hci_sco_setup(conn, status);
2887         }
2888
2889         hci_dev_unlock(hdev);
2890 }
2891
2892 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2893 {
2894         struct hci_cp_exit_sniff_mode *cp;
2895         struct hci_conn *conn;
2896
2897         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2898
2899         if (!status)
2900                 return;
2901
2902         cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2903         if (!cp)
2904                 return;
2905
2906         hci_dev_lock(hdev);
2907
2908         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2909         if (conn) {
2910                 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2911
2912                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2913                         hci_sco_setup(conn, status);
2914         }
2915
2916         hci_dev_unlock(hdev);
2917 }
2918
2919 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2920 {
2921         struct hci_cp_disconnect *cp;
2922         struct hci_conn_params *params;
2923         struct hci_conn *conn;
2924         bool mgmt_conn;
2925
2926         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2927
2928         /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2929          * otherwise cleanup the connection immediately.
2930          */
2931         if (!status && !hdev->suspended)
2932                 return;
2933
2934         cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2935         if (!cp)
2936                 return;
2937
2938         hci_dev_lock(hdev);
2939
2940         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2941         if (!conn)
2942                 goto unlock;
2943
2944         if (status) {
2945                 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2946                                        conn->dst_type, status);
2947
2948                 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2949                         hdev->cur_adv_instance = conn->adv_instance;
2950                         hci_enable_advertising(hdev);
2951                 }
2952
2953                 goto done;
2954         }
2955
2956         mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2957
2958         if (conn->type == ACL_LINK) {
2959                 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2960                         hci_remove_link_key(hdev, &conn->dst);
2961         }
2962
2963         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2964         if (params) {
2965                 switch (params->auto_connect) {
2966                 case HCI_AUTO_CONN_LINK_LOSS:
2967                         if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2968                                 break;
2969                         fallthrough;
2970
2971                 case HCI_AUTO_CONN_DIRECT:
2972                 case HCI_AUTO_CONN_ALWAYS:
2973                         list_del_init(&params->action);
2974                         list_add(&params->action, &hdev->pend_le_conns);
2975                         break;
2976
2977                 default:
2978                         break;
2979                 }
2980         }
2981
2982         mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2983                                  cp->reason, mgmt_conn);
2984
2985         hci_disconn_cfm(conn, cp->reason);
2986
2987 done:
2988         /* If the disconnection failed for any reason, the upper layer
2989          * does not retry to disconnect in current implementation.
2990          * Hence, we need to do some basic cleanup here and re-enable
2991          * advertising if necessary.
2992          */
2993         hci_conn_del(conn);
2994 unlock:
2995         hci_dev_unlock(hdev);
2996 }
2997
2998 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2999 {
3000         /* When using controller based address resolution, then the new
3001          * address types 0x02 and 0x03 are used. These types need to be
3002          * converted back into either public address or random address type
3003          */
3004         switch (type) {
3005         case ADDR_LE_DEV_PUBLIC_RESOLVED:
3006                 if (resolved)
3007                         *resolved = true;
3008                 return ADDR_LE_DEV_PUBLIC;
3009         case ADDR_LE_DEV_RANDOM_RESOLVED:
3010                 if (resolved)
3011                         *resolved = true;
3012                 return ADDR_LE_DEV_RANDOM;
3013         }
3014
3015         if (resolved)
3016                 *resolved = false;
3017         return type;
3018 }
3019
3020 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
3021                               u8 peer_addr_type, u8 own_address_type,
3022                               u8 filter_policy)
3023 {
3024         struct hci_conn *conn;
3025
3026         conn = hci_conn_hash_lookup_le(hdev, peer_addr,
3027                                        peer_addr_type);
3028         if (!conn)
3029                 return;
3030
3031         own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
3032
3033         /* Store the initiator and responder address information which
3034          * is needed for SMP. These values will not change during the
3035          * lifetime of the connection.
3036          */
3037         conn->init_addr_type = own_address_type;
3038         if (own_address_type == ADDR_LE_DEV_RANDOM)
3039                 bacpy(&conn->init_addr, &hdev->random_addr);
3040         else
3041                 bacpy(&conn->init_addr, &hdev->bdaddr);
3042
3043         conn->resp_addr_type = peer_addr_type;
3044         bacpy(&conn->resp_addr, peer_addr);
3045 }
3046
3047 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
3048 {
3049         struct hci_cp_le_create_conn *cp;
3050
3051         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3052
3053         /* All connection failure handling is taken care of by the
3054          * hci_conn_failed function which is triggered by the HCI
3055          * request completion callbacks used for connecting.
3056          */
3057         if (status)
3058                 return;
3059
3060         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
3061         if (!cp)
3062                 return;
3063
3064         hci_dev_lock(hdev);
3065
3066         cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
3067                           cp->own_address_type, cp->filter_policy);
3068
3069         hci_dev_unlock(hdev);
3070 }
3071
3072 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
3073 {
3074         struct hci_cp_le_ext_create_conn *cp;
3075
3076         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3077
3078         /* All connection failure handling is taken care of by the
3079          * hci_conn_failed function which is triggered by the HCI
3080          * request completion callbacks used for connecting.
3081          */
3082         if (status)
3083                 return;
3084
3085         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
3086         if (!cp)
3087                 return;
3088
3089         hci_dev_lock(hdev);
3090
3091         cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
3092                           cp->own_addr_type, cp->filter_policy);
3093
3094         hci_dev_unlock(hdev);
3095 }
3096
3097 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
3098 {
3099         struct hci_cp_le_read_remote_features *cp;
3100         struct hci_conn *conn;
3101
3102         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3103
3104         if (!status)
3105                 return;
3106
3107         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
3108         if (!cp)
3109                 return;
3110
3111         hci_dev_lock(hdev);
3112
3113         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3114         if (conn) {
3115                 if (conn->state == BT_CONFIG) {
3116                         hci_connect_cfm(conn, status);
3117                         hci_conn_drop(conn);
3118                 }
3119         }
3120
3121         hci_dev_unlock(hdev);
3122 }
3123
3124 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
3125 {
3126         struct hci_cp_le_start_enc *cp;
3127         struct hci_conn *conn;
3128
3129         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3130
3131         if (!status)
3132                 return;
3133
3134         hci_dev_lock(hdev);
3135
3136         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
3137         if (!cp)
3138                 goto unlock;
3139
3140         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3141         if (!conn)
3142                 goto unlock;
3143
3144         if (conn->state != BT_CONNECTED)
3145                 goto unlock;
3146
3147         hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3148         hci_conn_drop(conn);
3149
3150 unlock:
3151         hci_dev_unlock(hdev);
3152 }
3153
3154 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
3155 {
3156         struct hci_cp_switch_role *cp;
3157         struct hci_conn *conn;
3158
3159         BT_DBG("%s status 0x%2.2x", hdev->name, status);
3160
3161         if (!status)
3162                 return;
3163
3164         cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
3165         if (!cp)
3166                 return;
3167
3168         hci_dev_lock(hdev);
3169
3170         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
3171         if (conn)
3172                 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3173
3174         hci_dev_unlock(hdev);
3175 }
3176
3177 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
3178                                      struct sk_buff *skb)
3179 {
3180         struct hci_ev_status *ev = data;
3181         struct discovery_state *discov = &hdev->discovery;
3182         struct inquiry_entry *e;
3183
3184         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3185
3186         hci_conn_check_pending(hdev);
3187
3188         if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
3189                 return;
3190
3191         smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
3192         wake_up_bit(&hdev->flags, HCI_INQUIRY);
3193
3194         if (!hci_dev_test_flag(hdev, HCI_MGMT))
3195                 return;
3196
3197         hci_dev_lock(hdev);
3198
3199         if (discov->state != DISCOVERY_FINDING)
3200                 goto unlock;
3201
3202         if (list_empty(&discov->resolve)) {
3203                 /* When BR/EDR inquiry is active and no LE scanning is in
3204                  * progress, then change discovery state to indicate completion.
3205                  *
3206                  * When running LE scanning and BR/EDR inquiry simultaneously
3207                  * and the LE scan already finished, then change the discovery
3208                  * state to indicate completion.
3209                  */
3210                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3211                     !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3212                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3213                 goto unlock;
3214         }
3215
3216         e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3217         if (e && hci_resolve_name(hdev, e) == 0) {
3218                 e->name_state = NAME_PENDING;
3219                 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3220                 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3221         } else {
3222                 /* When BR/EDR inquiry is active and no LE scanning is in
3223                  * progress, then change discovery state to indicate completion.
3224                  *
3225                  * When running LE scanning and BR/EDR inquiry simultaneously
3226                  * and the LE scan already finished, then change the discovery
3227                  * state to indicate completion.
3228                  */
3229                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3230                     !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3231                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3232         }
3233
3234 unlock:
3235         hci_dev_unlock(hdev);
3236 }
3237
3238 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3239                                    struct sk_buff *skb)
3240 {
3241         struct hci_ev_inquiry_result *ev = edata;
3242         struct inquiry_data data;
3243         int i;
3244
3245         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3246                              flex_array_size(ev, info, ev->num)))
3247                 return;
3248
3249         bt_dev_dbg(hdev, "num %d", ev->num);
3250
3251         if (!ev->num)
3252                 return;
3253
3254         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3255                 return;
3256
3257         hci_dev_lock(hdev);
3258
3259         for (i = 0; i < ev->num; i++) {
3260                 struct inquiry_info *info = &ev->info[i];
3261                 u32 flags;
3262
3263                 bacpy(&data.bdaddr, &info->bdaddr);
3264                 data.pscan_rep_mode     = info->pscan_rep_mode;
3265                 data.pscan_period_mode  = info->pscan_period_mode;
3266                 data.pscan_mode         = info->pscan_mode;
3267                 memcpy(data.dev_class, info->dev_class, 3);
3268                 data.clock_offset       = info->clock_offset;
3269                 data.rssi               = HCI_RSSI_INVALID;
3270                 data.ssp_mode           = 0x00;
3271
3272                 flags = hci_inquiry_cache_update(hdev, &data, false);
3273
3274                 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3275                                   info->dev_class, HCI_RSSI_INVALID,
3276                                   flags, NULL, 0, NULL, 0, 0);
3277         }
3278
3279         hci_dev_unlock(hdev);
3280 }
3281
3282 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3283                                   struct sk_buff *skb)
3284 {
3285         struct hci_ev_conn_complete *ev = data;
3286         struct hci_conn *conn;
3287         u8 status = ev->status;
3288
3289         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3290
3291         hci_dev_lock(hdev);
3292
3293         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3294         if (!conn) {
3295                 /* In case of error status and there is no connection pending
3296                  * just unlock as there is nothing to cleanup.
3297                  */
3298                 if (ev->status)
3299                         goto unlock;
3300
3301                 /* Connection may not exist if auto-connected. Check the bredr
3302                  * allowlist to see if this device is allowed to auto connect.
3303                  * If link is an ACL type, create a connection class
3304                  * automatically.
3305                  *
3306                  * Auto-connect will only occur if the event filter is
3307                  * programmed with a given address. Right now, event filter is
3308                  * only used during suspend.
3309                  */
3310                 if (ev->link_type == ACL_LINK &&
3311                     hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3312                                                       &ev->bdaddr,
3313                                                       BDADDR_BREDR)) {
3314                         conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3315                                             HCI_ROLE_SLAVE);
3316                         if (!conn) {
3317                                 bt_dev_err(hdev, "no memory for new conn");
3318                                 goto unlock;
3319                         }
3320                 } else {
3321                         if (ev->link_type != SCO_LINK)
3322                                 goto unlock;
3323
3324                         conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3325                                                        &ev->bdaddr);
3326                         if (!conn)
3327                                 goto unlock;
3328
3329                         conn->type = SCO_LINK;
3330                 }
3331         }
3332
3333         /* The HCI_Connection_Complete event is only sent once per connection.
3334          * Processing it more than once per connection can corrupt kernel memory.
3335          *
3336          * As the connection handle is set here for the first time, it indicates
3337          * whether the connection is already set up.
3338          */
3339         if (conn->handle != HCI_CONN_HANDLE_UNSET) {
3340                 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3341                 goto unlock;
3342         }
3343
3344         if (!status) {
3345                 conn->handle = __le16_to_cpu(ev->handle);
3346                 if (conn->handle > HCI_CONN_HANDLE_MAX) {
3347                         bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
3348                                    conn->handle, HCI_CONN_HANDLE_MAX);
3349                         status = HCI_ERROR_INVALID_PARAMETERS;
3350                         goto done;
3351                 }
3352
3353                 if (conn->type == ACL_LINK) {
3354                         conn->state = BT_CONFIG;
3355                         hci_conn_hold(conn);
3356
3357                         if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3358                             !hci_find_link_key(hdev, &ev->bdaddr))
3359                                 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3360                         else
3361                                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3362                 } else
3363                         conn->state = BT_CONNECTED;
3364
3365                 hci_debugfs_create_conn(conn);
3366                 hci_conn_add_sysfs(conn);
3367
3368                 if (test_bit(HCI_AUTH, &hdev->flags))
3369                         set_bit(HCI_CONN_AUTH, &conn->flags);
3370
3371                 if (test_bit(HCI_ENCRYPT, &hdev->flags))
3372                         set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3373
3374                 /* Get remote features */
3375                 if (conn->type == ACL_LINK) {
3376                         struct hci_cp_read_remote_features cp;
3377                         cp.handle = ev->handle;
3378                         hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3379                                      sizeof(cp), &cp);
3380
3381                         hci_update_scan(hdev);
3382                 }
3383
3384                 /* Set packet type for incoming connection */
3385                 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3386                         struct hci_cp_change_conn_ptype cp;
3387                         cp.handle = ev->handle;
3388                         cp.pkt_type = cpu_to_le16(conn->pkt_type);
3389                         hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3390                                      &cp);
3391                 }
3392
3393 #ifdef TIZEN_BT
3394                 if (get_link_mode(conn) & HCI_LM_MASTER)
3395                         hci_conn_change_supervision_timeout(conn,
3396                                         LINK_SUPERVISION_TIMEOUT);
3397 #endif
3398         }
3399
3400         if (conn->type == ACL_LINK)
3401                 hci_sco_setup(conn, ev->status);
3402
3403 done:
3404         if (status) {
3405                 hci_conn_failed(conn, status);
3406         } else if (ev->link_type == SCO_LINK) {
3407                 switch (conn->setting & SCO_AIRMODE_MASK) {
3408                 case SCO_AIRMODE_CVSD:
3409                         if (hdev->notify)
3410                                 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3411                         break;
3412                 }
3413
3414                 hci_connect_cfm(conn, status);
3415         }
3416
3417 unlock:
3418         hci_dev_unlock(hdev);
3419
3420         hci_conn_check_pending(hdev);
3421 }
3422
3423 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3424 {
3425         struct hci_cp_reject_conn_req cp;
3426
3427         bacpy(&cp.bdaddr, bdaddr);
3428         cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3429         hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3430 }
3431
3432 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3433                                  struct sk_buff *skb)
3434 {
3435         struct hci_ev_conn_request *ev = data;
3436         int mask = hdev->link_mode;
3437         struct inquiry_entry *ie;
3438         struct hci_conn *conn;
3439         __u8 flags = 0;
3440
3441         bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3442
3443         mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3444                                       &flags);
3445
3446         if (!(mask & HCI_LM_ACCEPT)) {
3447                 hci_reject_conn(hdev, &ev->bdaddr);
3448                 return;
3449         }
3450
3451         hci_dev_lock(hdev);
3452
3453         if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3454                                    BDADDR_BREDR)) {
3455                 hci_reject_conn(hdev, &ev->bdaddr);
3456                 goto unlock;
3457         }
3458
3459         /* Require HCI_CONNECTABLE or an accept list entry to accept the
3460          * connection. These features are only touched through mgmt so
3461          * only do the checks if HCI_MGMT is set.
3462          */
3463         if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3464             !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3465             !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3466                                                BDADDR_BREDR)) {
3467                 hci_reject_conn(hdev, &ev->bdaddr);
3468                 goto unlock;
3469         }
3470
3471         /* Connection accepted */
3472
3473         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3474         if (ie)
3475                 memcpy(ie->data.dev_class, ev->dev_class, 3);
3476
3477 #ifdef TIZEN_BT
3478                 if ((ev->link_type == SCO_LINK || ev->link_type == ESCO_LINK) &&
3479                     hci_conn_hash_lookup_sco(hdev)) {
3480                         struct hci_cp_reject_conn_req cp;
3481
3482                         bacpy(&cp.bdaddr, &ev->bdaddr);
3483                         cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
3484                         hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ,
3485                                      sizeof(cp), &cp);
3486                         hci_dev_unlock(hdev);
3487                         return;
3488                 }
3489 #endif
3490
3491         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3492                         &ev->bdaddr);
3493         if (!conn) {
3494                 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3495                                     HCI_ROLE_SLAVE);
3496                 if (!conn) {
3497                         bt_dev_err(hdev, "no memory for new connection");
3498                         goto unlock;
3499                 }
3500         }
3501
3502         memcpy(conn->dev_class, ev->dev_class, 3);
3503
3504         hci_dev_unlock(hdev);
3505
3506         if (ev->link_type == ACL_LINK ||
3507             (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3508                 struct hci_cp_accept_conn_req cp;
3509                 conn->state = BT_CONNECT;
3510
3511                 bacpy(&cp.bdaddr, &ev->bdaddr);
3512
3513                 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3514                         cp.role = 0x00; /* Become central */
3515                 else
3516                         cp.role = 0x01; /* Remain peripheral */
3517
3518                 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3519         } else if (!(flags & HCI_PROTO_DEFER)) {
3520                 struct hci_cp_accept_sync_conn_req cp;
3521                 conn->state = BT_CONNECT;
3522
3523                 bacpy(&cp.bdaddr, &ev->bdaddr);
3524                 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3525
3526                 cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
3527                 cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
3528                 cp.max_latency    = cpu_to_le16(0xffff);
3529                 cp.content_format = cpu_to_le16(hdev->voice_setting);
3530                 cp.retrans_effort = 0xff;
3531
3532                 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3533                              &cp);
3534         } else {
3535                 conn->state = BT_CONNECT2;
3536                 hci_connect_cfm(conn, 0);
3537         }
3538
3539         return;
3540 unlock:
3541         hci_dev_unlock(hdev);
3542 }
3543
3544 static u8 hci_to_mgmt_reason(u8 err)
3545 {
3546         switch (err) {
3547         case HCI_ERROR_CONNECTION_TIMEOUT:
3548                 return MGMT_DEV_DISCONN_TIMEOUT;
3549         case HCI_ERROR_REMOTE_USER_TERM:
3550         case HCI_ERROR_REMOTE_LOW_RESOURCES:
3551         case HCI_ERROR_REMOTE_POWER_OFF:
3552                 return MGMT_DEV_DISCONN_REMOTE;
3553         case HCI_ERROR_LOCAL_HOST_TERM:
3554                 return MGMT_DEV_DISCONN_LOCAL_HOST;
3555         default:
3556                 return MGMT_DEV_DISCONN_UNKNOWN;
3557         }
3558 }
3559
3560 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3561                                      struct sk_buff *skb)
3562 {
3563         struct hci_ev_disconn_complete *ev = data;
3564         u8 reason;
3565         struct hci_conn_params *params;
3566         struct hci_conn *conn;
3567         bool mgmt_connected;
3568
3569         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3570
3571         hci_dev_lock(hdev);
3572
3573         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3574         if (!conn)
3575                 goto unlock;
3576
3577         if (ev->status) {
3578                 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3579                                        conn->dst_type, ev->status);
3580                 goto unlock;
3581         }
3582
3583         conn->state = BT_CLOSED;
3584
3585         mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3586
3587         if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3588                 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3589         else
3590                 reason = hci_to_mgmt_reason(ev->reason);
3591
3592         mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3593                                 reason, mgmt_connected);
3594
3595         if (conn->type == ACL_LINK) {
3596                 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3597                         hci_remove_link_key(hdev, &conn->dst);
3598
3599                 hci_update_scan(hdev);
3600         }
3601
3602         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3603         if (params) {
3604                 switch (params->auto_connect) {
3605                 case HCI_AUTO_CONN_LINK_LOSS:
3606                         if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3607                                 break;
3608                         fallthrough;
3609
3610                 case HCI_AUTO_CONN_DIRECT:
3611                 case HCI_AUTO_CONN_ALWAYS:
3612                         list_del_init(&params->action);
3613                         list_add(&params->action, &hdev->pend_le_conns);
3614                         hci_update_passive_scan(hdev);
3615                         break;
3616
3617                 default:
3618                         break;
3619                 }
3620         }
3621
3622         hci_disconn_cfm(conn, ev->reason);
3623
3624         /* Re-enable advertising if necessary, since it might
3625          * have been disabled by the connection. From the
3626          * HCI_LE_Set_Advertise_Enable command description in
3627          * the core specification (v4.0):
3628          * "The Controller shall continue advertising until the Host
3629          * issues an LE_Set_Advertise_Enable command with
3630          * Advertising_Enable set to 0x00 (Advertising is disabled)
3631          * or until a connection is created or until the Advertising
3632          * is timed out due to Directed Advertising."
3633          */
3634         if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3635                 hdev->cur_adv_instance = conn->adv_instance;
3636                 hci_enable_advertising(hdev);
3637         }
3638
3639         hci_conn_del(conn);
3640
3641 #ifdef TIZEN_BT
3642         if (type == ACL_LINK && !hci_conn_num(hdev, ACL_LINK)) {
3643                 int iscan;
3644                 int pscan;
3645
3646                 iscan = test_bit(HCI_ISCAN, &hdev->flags);
3647                 pscan = test_bit(HCI_PSCAN, &hdev->flags);
3648                 if (!iscan && !pscan) {
3649                         u8 scan_enable = SCAN_PAGE;
3650
3651                         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE,
3652                                      sizeof(scan_enable), &scan_enable);
3653                 }
3654         }
3655 #endif
3656
3657 unlock:
3658         hci_dev_unlock(hdev);
3659 }
3660
3661 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3662                                   struct sk_buff *skb)
3663 {
3664         struct hci_ev_auth_complete *ev = data;
3665         struct hci_conn *conn;
3666
3667         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3668
3669         hci_dev_lock(hdev);
3670
3671         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3672         if (!conn)
3673                 goto unlock;
3674
3675 #ifdef TIZEN_BT
3676         /*  PIN or Key Missing patch */
3677         BT_DBG("remote_auth %x, remote_cap %x, auth_type %x, io_capability %x",
3678                conn->remote_auth, conn->remote_cap,
3679                conn->auth_type, conn->io_capability);
3680
3681         if (ev->status == 0x06 && hci_conn_ssp_enabled(conn)) {
3682                 struct hci_cp_auth_requested cp;
3683
3684                 BT_DBG("Pin or key missing");
3685                 hci_remove_link_key(hdev, &conn->dst);
3686                 cp.handle = cpu_to_le16(conn->handle);
3687                 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
3688                              sizeof(cp), &cp);
3689                 goto unlock;
3690         }
3691 #endif
3692
3693         if (!ev->status) {
3694                 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3695
3696                 if (!hci_conn_ssp_enabled(conn) &&
3697                     test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
3698                         bt_dev_info(hdev, "re-auth of legacy device is not possible.");
3699                 } else {
3700                         set_bit(HCI_CONN_AUTH, &conn->flags);
3701                         conn->sec_level = conn->pending_sec_level;
3702                 }
3703         } else {
3704                 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3705                         set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3706
3707                 mgmt_auth_failed(conn, ev->status);
3708         }
3709
3710         clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3711         clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3712
3713         if (conn->state == BT_CONFIG) {
3714                 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3715                         struct hci_cp_set_conn_encrypt cp;
3716                         cp.handle  = ev->handle;
3717                         cp.encrypt = 0x01;
3718                         hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3719                                      &cp);
3720                 } else {
3721                         conn->state = BT_CONNECTED;
3722                         hci_connect_cfm(conn, ev->status);
3723                         hci_conn_drop(conn);
3724                 }
3725         } else {
3726                 hci_auth_cfm(conn, ev->status);
3727
3728                 hci_conn_hold(conn);
3729                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3730                 hci_conn_drop(conn);
3731         }
3732
3733         if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3734                 if (!ev->status) {
3735                         struct hci_cp_set_conn_encrypt cp;
3736                         cp.handle  = ev->handle;
3737                         cp.encrypt = 0x01;
3738                         hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3739                                      &cp);
3740                 } else {
3741                         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3742                         hci_encrypt_cfm(conn, ev->status);
3743                 }
3744         }
3745
3746 unlock:
3747         hci_dev_unlock(hdev);
3748 }
3749
3750 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3751                                 struct sk_buff *skb)
3752 {
3753         struct hci_ev_remote_name *ev = data;
3754         struct hci_conn *conn;
3755
3756         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3757
3758         hci_conn_check_pending(hdev);
3759
3760         hci_dev_lock(hdev);
3761
3762         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3763
3764         if (!hci_dev_test_flag(hdev, HCI_MGMT))
3765                 goto check_auth;
3766
3767         if (ev->status == 0)
3768                 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3769                                        strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3770         else
3771                 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3772
3773 check_auth:
3774         if (!conn)
3775                 goto unlock;
3776
3777         if (!hci_outgoing_auth_needed(hdev, conn))
3778                 goto unlock;
3779
3780         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3781                 struct hci_cp_auth_requested cp;
3782
3783                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3784
3785                 cp.handle = __cpu_to_le16(conn->handle);
3786                 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3787         }
3788
3789 unlock:
3790         hci_dev_unlock(hdev);
3791 }
3792
3793 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3794                                    struct sk_buff *skb)
3795 {
3796         struct hci_ev_encrypt_change *ev = data;
3797         struct hci_conn *conn;
3798
3799         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3800
3801         hci_dev_lock(hdev);
3802
3803         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3804         if (!conn)
3805                 goto unlock;
3806
3807         if (!ev->status) {
3808                 if (ev->encrypt) {
3809                         /* Encryption implies authentication */
3810                         set_bit(HCI_CONN_AUTH, &conn->flags);
3811                         set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3812                         conn->sec_level = conn->pending_sec_level;
3813
3814                         /* P-256 authentication key implies FIPS */
3815                         if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3816                                 set_bit(HCI_CONN_FIPS, &conn->flags);
3817
3818                         if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3819                             conn->type == LE_LINK)
3820                                 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3821                 } else {
3822                         clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3823                         clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3824                 }
3825         }
3826
3827         /* We should disregard the current RPA and generate a new one
3828          * whenever the encryption procedure fails.
3829          */
3830         if (ev->status && conn->type == LE_LINK) {
3831                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3832                 hci_adv_instances_set_rpa_expired(hdev, true);
3833         }
3834
3835         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3836
3837         /* Check link security requirements are met */
3838         if (!hci_conn_check_link_mode(conn))
3839                 ev->status = HCI_ERROR_AUTH_FAILURE;
3840
3841         if (ev->status && conn->state == BT_CONNECTED) {
3842                 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3843                         set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3844
3845                 /* Notify upper layers so they can cleanup before
3846                  * disconnecting.
3847                  */
3848                 hci_encrypt_cfm(conn, ev->status);
3849                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3850                 hci_conn_drop(conn);
3851                 goto unlock;
3852         }
3853
3854         /* Try reading the encryption key size for encrypted ACL links */
3855         if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3856                 struct hci_cp_read_enc_key_size cp;
3857
3858                 /* Only send HCI_Read_Encryption_Key_Size if the
3859                  * controller really supports it. If it doesn't, assume
3860                  * the default size (16).
3861                  */
3862                 if (!(hdev->commands[20] & 0x10)) {
3863                         conn->enc_key_size = HCI_LINK_KEY_SIZE;
3864                         goto notify;
3865                 }
3866
3867                 cp.handle = cpu_to_le16(conn->handle);
3868                 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3869                                  sizeof(cp), &cp)) {
3870                         bt_dev_err(hdev, "sending read key size failed");
3871                         conn->enc_key_size = HCI_LINK_KEY_SIZE;
3872                         goto notify;
3873                 }
3874
3875                 goto unlock;
3876         }
3877
3878         /* Set the default Authenticated Payload Timeout after
3879          * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3880          * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3881          * sent when the link is active and Encryption is enabled, the conn
3882          * type can be either LE or ACL and controller must support LMP Ping.
3883          * Ensure for AES-CCM encryption as well.
3884          */
3885         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3886             test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3887             ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3888              (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3889                 struct hci_cp_write_auth_payload_to cp;
3890
3891                 cp.handle = cpu_to_le16(conn->handle);
3892                 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3893                 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3894                              sizeof(cp), &cp);
3895         }
3896
3897 notify:
3898         hci_encrypt_cfm(conn, ev->status);
3899
3900 unlock:
3901         hci_dev_unlock(hdev);
3902 }
3903
3904 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3905                                              struct sk_buff *skb)
3906 {
3907         struct hci_ev_change_link_key_complete *ev = data;
3908         struct hci_conn *conn;
3909
3910         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3911
3912         hci_dev_lock(hdev);
3913
3914         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3915         if (conn) {
3916                 if (!ev->status)
3917                         set_bit(HCI_CONN_SECURE, &conn->flags);
3918
3919                 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3920
3921                 hci_key_change_cfm(conn, ev->status);
3922         }
3923
3924         hci_dev_unlock(hdev);
3925 }
3926
3927 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3928                                     struct sk_buff *skb)
3929 {
3930         struct hci_ev_remote_features *ev = data;
3931         struct hci_conn *conn;
3932
3933         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3934
3935         hci_dev_lock(hdev);
3936
3937         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3938         if (!conn)
3939                 goto unlock;
3940
3941         if (!ev->status)
3942                 memcpy(conn->features[0], ev->features, 8);
3943
3944         if (conn->state != BT_CONFIG)
3945                 goto unlock;
3946
3947         if (!ev->status && lmp_ext_feat_capable(hdev) &&
3948             lmp_ext_feat_capable(conn)) {
3949                 struct hci_cp_read_remote_ext_features cp;
3950                 cp.handle = ev->handle;
3951                 cp.page = 0x01;
3952                 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3953                              sizeof(cp), &cp);
3954                 goto unlock;
3955         }
3956
3957         if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3958                 struct hci_cp_remote_name_req cp;
3959                 memset(&cp, 0, sizeof(cp));
3960                 bacpy(&cp.bdaddr, &conn->dst);
3961                 cp.pscan_rep_mode = 0x02;
3962                 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3963         } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3964                 mgmt_device_connected(hdev, conn, NULL, 0);
3965
3966         if (!hci_outgoing_auth_needed(hdev, conn)) {
3967                 conn->state = BT_CONNECTED;
3968                 hci_connect_cfm(conn, ev->status);
3969                 hci_conn_drop(conn);
3970         }
3971
3972 unlock:
3973         hci_dev_unlock(hdev);
3974 }
3975
3976 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3977 {
3978         cancel_delayed_work(&hdev->cmd_timer);
3979
3980         rcu_read_lock();
3981         if (!test_bit(HCI_RESET, &hdev->flags)) {
3982                 if (ncmd) {
3983                         cancel_delayed_work(&hdev->ncmd_timer);
3984                         atomic_set(&hdev->cmd_cnt, 1);
3985                 } else {
3986                         if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3987                                 queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3988                                                    HCI_NCMD_TIMEOUT);
3989                 }
3990         }
3991         rcu_read_unlock();
3992 }
3993
3994 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3995                                         struct sk_buff *skb)
3996 {
3997         struct hci_rp_le_read_buffer_size_v2 *rp = data;
3998
3999         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4000
4001         if (rp->status)
4002                 return rp->status;
4003
4004         hdev->le_mtu   = __le16_to_cpu(rp->acl_mtu);
4005         hdev->le_pkts  = rp->acl_max_pkt;
4006         hdev->iso_mtu  = __le16_to_cpu(rp->iso_mtu);
4007         hdev->iso_pkts = rp->iso_max_pkt;
4008
4009         hdev->le_cnt  = hdev->le_pkts;
4010         hdev->iso_cnt = hdev->iso_pkts;
4011
4012         BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
4013                hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
4014
4015         return rp->status;
4016 }
4017
4018 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
4019                                    struct sk_buff *skb)
4020 {
4021         struct hci_rp_le_set_cig_params *rp = data;
4022         struct hci_conn *conn;
4023         int i = 0;
4024
4025         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4026
4027         hci_dev_lock(hdev);
4028
4029         if (rp->status) {
4030                 while ((conn = hci_conn_hash_lookup_cig(hdev, rp->cig_id))) {
4031                         conn->state = BT_CLOSED;
4032                         hci_connect_cfm(conn, rp->status);
4033                         hci_conn_del(conn);
4034                 }
4035                 goto unlock;
4036         }
4037
4038         rcu_read_lock();
4039
4040         list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
4041                 if (conn->type != ISO_LINK || conn->iso_qos.cig != rp->cig_id ||
4042                     conn->state == BT_CONNECTED)
4043                         continue;
4044
4045                 conn->handle = __le16_to_cpu(rp->handle[i++]);
4046
4047                 bt_dev_dbg(hdev, "%p handle 0x%4.4x link %p", conn,
4048                            conn->handle, conn->link);
4049
4050                 /* Create CIS if LE is already connected */
4051                 if (conn->link && conn->link->state == BT_CONNECTED) {
4052                         rcu_read_unlock();
4053                         hci_le_create_cis(conn->link);
4054                         rcu_read_lock();
4055                 }
4056
4057                 if (i == rp->num_handles)
4058                         break;
4059         }
4060
4061         rcu_read_unlock();
4062
4063 unlock:
4064         hci_dev_unlock(hdev);
4065
4066         return rp->status;
4067 }
4068
4069 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
4070                                    struct sk_buff *skb)
4071 {
4072         struct hci_rp_le_setup_iso_path *rp = data;
4073         struct hci_cp_le_setup_iso_path *cp;
4074         struct hci_conn *conn;
4075
4076         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4077
4078         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
4079         if (!cp)
4080                 return rp->status;
4081
4082         hci_dev_lock(hdev);
4083
4084         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
4085         if (!conn)
4086                 goto unlock;
4087
4088         if (rp->status) {
4089                 hci_connect_cfm(conn, rp->status);
4090                 hci_conn_del(conn);
4091                 goto unlock;
4092         }
4093
4094         switch (cp->direction) {
4095         /* Input (Host to Controller) */
4096         case 0x00:
4097                 /* Only confirm connection if output only */
4098                 if (conn->iso_qos.out.sdu && !conn->iso_qos.in.sdu)
4099                         hci_connect_cfm(conn, rp->status);
4100                 break;
4101         /* Output (Controller to Host) */
4102         case 0x01:
4103                 /* Confirm connection since conn->iso_qos is always configured
4104                  * last.
4105                  */
4106                 hci_connect_cfm(conn, rp->status);
4107                 break;
4108         }
4109
4110 unlock:
4111         hci_dev_unlock(hdev);
4112         return rp->status;
4113 }
4114
4115 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
4116 {
4117         bt_dev_dbg(hdev, "status 0x%2.2x", status);
4118 }
4119
4120 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
4121                                    struct sk_buff *skb)
4122 {
4123         struct hci_ev_status *rp = data;
4124         struct hci_cp_le_set_per_adv_params *cp;
4125
4126         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4127
4128         if (rp->status)
4129                 return rp->status;
4130
4131         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
4132         if (!cp)
4133                 return rp->status;
4134
4135         /* TODO: set the conn state */
4136         return rp->status;
4137 }
4138
4139 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
4140                                        struct sk_buff *skb)
4141 {
4142         struct hci_ev_status *rp = data;
4143         __u8 *sent;
4144
4145         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4146
4147         if (rp->status)
4148                 return rp->status;
4149
4150         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
4151         if (!sent)
4152                 return rp->status;
4153
4154         hci_dev_lock(hdev);
4155
4156         if (*sent)
4157                 hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
4158         else
4159                 hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
4160
4161         hci_dev_unlock(hdev);
4162
4163         return rp->status;
4164 }
4165
4166 #define HCI_CC_VL(_op, _func, _min, _max) \
4167 { \
4168         .op = _op, \
4169         .func = _func, \
4170         .min_len = _min, \
4171         .max_len = _max, \
4172 }
4173
4174 #define HCI_CC(_op, _func, _len) \
4175         HCI_CC_VL(_op, _func, _len, _len)
4176
4177 #define HCI_CC_STATUS(_op, _func) \
4178         HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4179
4180 static const struct hci_cc {
4181         u16  op;
4182         u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4183         u16  min_len;
4184         u16  max_len;
4185 } hci_cc_table[] = {
4186         HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4187         HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4188         HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4189         HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
4190                       hci_cc_remote_name_req_cancel),
4191         HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4192                sizeof(struct hci_rp_role_discovery)),
4193         HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4194                sizeof(struct hci_rp_read_link_policy)),
4195         HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4196                sizeof(struct hci_rp_write_link_policy)),
4197         HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4198                sizeof(struct hci_rp_read_def_link_policy)),
4199         HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4200                       hci_cc_write_def_link_policy),
4201         HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4202         HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4203                sizeof(struct hci_rp_read_stored_link_key)),
4204         HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4205                sizeof(struct hci_rp_delete_stored_link_key)),
4206         HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4207         HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4208                sizeof(struct hci_rp_read_local_name)),
4209         HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4210         HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4211         HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4212         HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4213         HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4214                sizeof(struct hci_rp_read_class_of_dev)),
4215         HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4216         HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4217                sizeof(struct hci_rp_read_voice_setting)),
4218         HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4219         HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4220                sizeof(struct hci_rp_read_num_supported_iac)),
4221         HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4222         HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4223         HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4224                sizeof(struct hci_rp_read_auth_payload_to)),
4225         HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4226                sizeof(struct hci_rp_write_auth_payload_to)),
4227         HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4228                sizeof(struct hci_rp_read_local_version)),
4229         HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4230                sizeof(struct hci_rp_read_local_commands)),
4231         HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4232                sizeof(struct hci_rp_read_local_features)),
4233         HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4234                sizeof(struct hci_rp_read_local_ext_features)),
4235         HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4236                sizeof(struct hci_rp_read_buffer_size)),
4237         HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4238                sizeof(struct hci_rp_read_bd_addr)),
4239         HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4240                sizeof(struct hci_rp_read_local_pairing_opts)),
4241         HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4242                sizeof(struct hci_rp_read_page_scan_activity)),
4243         HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4244                       hci_cc_write_page_scan_activity),
4245         HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4246                sizeof(struct hci_rp_read_page_scan_type)),
4247         HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4248         HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
4249                sizeof(struct hci_rp_read_data_block_size)),
4250         HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
4251                sizeof(struct hci_rp_read_flow_control_mode)),
4252         HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
4253                sizeof(struct hci_rp_read_local_amp_info)),
4254         HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4255                sizeof(struct hci_rp_read_clock)),
4256         HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4257                sizeof(struct hci_rp_read_enc_key_size)),
4258         HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4259                sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4260         HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4261                hci_cc_read_def_err_data_reporting,
4262                sizeof(struct hci_rp_read_def_err_data_reporting)),
4263         HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4264                       hci_cc_write_def_err_data_reporting),
4265         HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4266                sizeof(struct hci_rp_pin_code_reply)),
4267         HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4268                sizeof(struct hci_rp_pin_code_neg_reply)),
4269         HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4270                sizeof(struct hci_rp_read_local_oob_data)),
4271         HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4272                sizeof(struct hci_rp_read_local_oob_ext_data)),
4273         HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4274                sizeof(struct hci_rp_le_read_buffer_size)),
4275         HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4276                sizeof(struct hci_rp_le_read_local_features)),
4277         HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4278                sizeof(struct hci_rp_le_read_adv_tx_power)),
4279         HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4280                sizeof(struct hci_rp_user_confirm_reply)),
4281         HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4282                sizeof(struct hci_rp_user_confirm_reply)),
4283         HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4284                sizeof(struct hci_rp_user_confirm_reply)),
4285         HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4286                sizeof(struct hci_rp_user_confirm_reply)),
4287         HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4288         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4289         HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4290         HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4291         HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4292                hci_cc_le_read_accept_list_size,
4293                sizeof(struct hci_rp_le_read_accept_list_size)),
4294         HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4295         HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4296                       hci_cc_le_add_to_accept_list),
4297         HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4298                       hci_cc_le_del_from_accept_list),
4299         HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4300                sizeof(struct hci_rp_le_read_supported_states)),
4301         HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4302                sizeof(struct hci_rp_le_read_def_data_len)),
4303         HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4304                       hci_cc_le_write_def_data_len),
4305         HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4306                       hci_cc_le_add_to_resolv_list),
4307         HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4308                       hci_cc_le_del_from_resolv_list),
4309         HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4310                       hci_cc_le_clear_resolv_list),
4311         HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4312                sizeof(struct hci_rp_le_read_resolv_list_size)),
4313         HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4314                       hci_cc_le_set_addr_resolution_enable),
4315         HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4316                sizeof(struct hci_rp_le_read_max_data_len)),
4317         HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4318                       hci_cc_write_le_host_supported),
4319         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4320         HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4321                sizeof(struct hci_rp_read_rssi)),
4322         HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4323                sizeof(struct hci_rp_read_tx_power)),
4324         HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4325         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4326                       hci_cc_le_set_ext_scan_param),
4327         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4328                       hci_cc_le_set_ext_scan_enable),
4329         HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4330         HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4331                hci_cc_le_read_num_adv_sets,
4332                sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4333         HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4334                sizeof(struct hci_rp_le_set_ext_adv_params)),
4335         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4336                       hci_cc_le_set_ext_adv_enable),
4337         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4338                       hci_cc_le_set_adv_set_random_addr),
4339         HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4340         HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4341         HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4342         HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4343                       hci_cc_le_set_per_adv_enable),
4344         HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4345                sizeof(struct hci_rp_le_read_transmit_power)),
4346 #ifdef TIZEN_BT
4347         HCI_CC(HCI_OP_ENABLE_RSSI, hci_cc_enable_rssi,
4348                sizeof(struct hci_cc_rsp_enable_rssi)),
4349         HCI_CC(HCI_OP_GET_RAW_RSSI, hci_cc_get_raw_rssi,
4350                sizeof(struct hci_cc_rp_get_raw_rssi)),
4351 #endif
4352         HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4353         HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4354                sizeof(struct hci_rp_le_read_buffer_size_v2)),
4355         HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4356                   sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4357         HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4358                sizeof(struct hci_rp_le_setup_iso_path)),
4359 };
4360
4361 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4362                       struct sk_buff *skb)
4363 {
4364         void *data;
4365
4366         if (skb->len < cc->min_len) {
4367                 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4368                            cc->op, skb->len, cc->min_len);
4369                 return HCI_ERROR_UNSPECIFIED;
4370         }
4371
4372         /* Just warn if the length is over max_len size it still be possible to
4373          * partially parse the cc so leave to callback to decide if that is
4374          * acceptable.
4375          */
4376         if (skb->len > cc->max_len)
4377                 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4378                             cc->op, skb->len, cc->max_len);
4379
4380         data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4381         if (!data)
4382                 return HCI_ERROR_UNSPECIFIED;
4383
4384         return cc->func(hdev, data, skb);
4385 }
4386
4387 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4388                                  struct sk_buff *skb, u16 *opcode, u8 *status,
4389                                  hci_req_complete_t *req_complete,
4390                                  hci_req_complete_skb_t *req_complete_skb)
4391 {
4392         struct hci_ev_cmd_complete *ev = data;
4393         int i;
4394
4395         *opcode = __le16_to_cpu(ev->opcode);
4396
4397         bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4398
4399         for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4400                 if (hci_cc_table[i].op == *opcode) {
4401                         *status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4402                         break;
4403                 }
4404         }
4405
4406         if (i == ARRAY_SIZE(hci_cc_table)) {
4407                 /* Unknown opcode, assume byte 0 contains the status, so
4408                  * that e.g. __hci_cmd_sync() properly returns errors
4409                  * for vendor specific commands send by HCI drivers.
4410                  * If a vendor doesn't actually follow this convention we may
4411                  * need to introduce a vendor CC table in order to properly set
4412                  * the status.
4413                  */
4414                 *status = skb->data[0];
4415         }
4416
4417         handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4418
4419         hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4420                              req_complete_skb);
4421
4422         if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4423                 bt_dev_err(hdev,
4424                            "unexpected event for opcode 0x%4.4x", *opcode);
4425                 return;
4426         }
4427
4428         if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4429                 queue_work(hdev->workqueue, &hdev->cmd_work);
4430 }
4431
4432 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4433 {
4434         struct hci_cp_le_create_cis *cp;
4435         int i;
4436
4437         bt_dev_dbg(hdev, "status 0x%2.2x", status);
4438
4439         if (!status)
4440                 return;
4441
4442         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4443         if (!cp)
4444                 return;
4445
4446         hci_dev_lock(hdev);
4447
4448         /* Remove connection if command failed */
4449         for (i = 0; cp->num_cis; cp->num_cis--, i++) {
4450                 struct hci_conn *conn;
4451                 u16 handle;
4452
4453                 handle = __le16_to_cpu(cp->cis[i].cis_handle);
4454
4455                 conn = hci_conn_hash_lookup_handle(hdev, handle);
4456                 if (conn) {
4457                         conn->state = BT_CLOSED;
4458                         hci_connect_cfm(conn, status);
4459                         hci_conn_del(conn);
4460                 }
4461         }
4462
4463         hci_dev_unlock(hdev);
4464 }
4465
4466 #define HCI_CS(_op, _func) \
4467 { \
4468         .op = _op, \
4469         .func = _func, \
4470 }
4471
4472 static const struct hci_cs {
4473         u16  op;
4474         void (*func)(struct hci_dev *hdev, __u8 status);
4475 } hci_cs_table[] = {
4476         HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4477         HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4478         HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4479         HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4480         HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4481         HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4482         HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4483         HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4484         HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4485                hci_cs_read_remote_ext_features),
4486         HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4487         HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4488                hci_cs_enhanced_setup_sync_conn),
4489         HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4490         HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4491         HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4492         HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4493         HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4494         HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4495         HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4496         HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4497         HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4498 };
4499
4500 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4501                                struct sk_buff *skb, u16 *opcode, u8 *status,
4502                                hci_req_complete_t *req_complete,
4503                                hci_req_complete_skb_t *req_complete_skb)
4504 {
4505         struct hci_ev_cmd_status *ev = data;
4506         int i;
4507
4508         *opcode = __le16_to_cpu(ev->opcode);
4509         *status = ev->status;
4510
4511         bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4512
4513         for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4514                 if (hci_cs_table[i].op == *opcode) {
4515                         hci_cs_table[i].func(hdev, ev->status);
4516                         break;
4517                 }
4518         }
4519
4520         handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4521
4522         /* Indicate request completion if the command failed. Also, if
4523          * we're not waiting for a special event and we get a success
4524          * command status we should try to flag the request as completed
4525          * (since for this kind of commands there will not be a command
4526          * complete event).
4527          */
4528         if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
4529                 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4530                                      req_complete_skb);
4531                 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4532                         bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4533                                    *opcode);
4534                         return;
4535                 }
4536         }
4537
4538         if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4539                 queue_work(hdev->workqueue, &hdev->cmd_work);
4540 }
4541
4542 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4543                                    struct sk_buff *skb)
4544 {
4545         struct hci_ev_hardware_error *ev = data;
4546
4547         bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4548
4549 #ifdef TIZEN_BT
4550         hci_dev_lock(hdev);
4551         mgmt_hardware_error(hdev, ev->code);
4552         hci_dev_unlock(hdev);
4553 #endif
4554         hdev->hw_error_code = ev->code;
4555
4556         queue_work(hdev->req_workqueue, &hdev->error_reset);
4557 }
4558
4559 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4560                                 struct sk_buff *skb)
4561 {
4562         struct hci_ev_role_change *ev = data;
4563         struct hci_conn *conn;
4564
4565         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4566
4567         hci_dev_lock(hdev);
4568
4569         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4570         if (conn) {
4571                 if (!ev->status)
4572                         conn->role = ev->role;
4573
4574                 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4575
4576                 hci_role_switch_cfm(conn, ev->status, ev->role);
4577 #ifdef TIZEN_BT
4578                 if (!ev->status && (get_link_mode(conn) & HCI_LM_MASTER))
4579                         hci_conn_change_supervision_timeout(conn,
4580                                         LINK_SUPERVISION_TIMEOUT);
4581 #endif
4582         }
4583
4584         hci_dev_unlock(hdev);
4585 }
4586
4587 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4588                                   struct sk_buff *skb)
4589 {
4590         struct hci_ev_num_comp_pkts *ev = data;
4591         int i;
4592
4593         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4594                              flex_array_size(ev, handles, ev->num)))
4595                 return;
4596
4597         if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4598                 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4599                 return;
4600         }
4601
4602         bt_dev_dbg(hdev, "num %d", ev->num);
4603
4604         for (i = 0; i < ev->num; i++) {
4605                 struct hci_comp_pkts_info *info = &ev->handles[i];
4606                 struct hci_conn *conn;
4607                 __u16  handle, count;
4608
4609                 handle = __le16_to_cpu(info->handle);
4610                 count  = __le16_to_cpu(info->count);
4611
4612                 conn = hci_conn_hash_lookup_handle(hdev, handle);
4613                 if (!conn)
4614                         continue;
4615
4616                 conn->sent -= count;
4617
4618                 switch (conn->type) {
4619                 case ACL_LINK:
4620                         hdev->acl_cnt += count;
4621                         if (hdev->acl_cnt > hdev->acl_pkts)
4622                                 hdev->acl_cnt = hdev->acl_pkts;
4623                         break;
4624
4625                 case LE_LINK:
4626                         if (hdev->le_pkts) {
4627                                 hdev->le_cnt += count;
4628                                 if (hdev->le_cnt > hdev->le_pkts)
4629                                         hdev->le_cnt = hdev->le_pkts;
4630                         } else {
4631                                 hdev->acl_cnt += count;
4632                                 if (hdev->acl_cnt > hdev->acl_pkts)
4633                                         hdev->acl_cnt = hdev->acl_pkts;
4634                         }
4635                         break;
4636
4637                 case SCO_LINK:
4638                         hdev->sco_cnt += count;
4639                         if (hdev->sco_cnt > hdev->sco_pkts)
4640                                 hdev->sco_cnt = hdev->sco_pkts;
4641                         break;
4642
4643                 case ISO_LINK:
4644                         if (hdev->iso_pkts) {
4645                                 hdev->iso_cnt += count;
4646                                 if (hdev->iso_cnt > hdev->iso_pkts)
4647                                         hdev->iso_cnt = hdev->iso_pkts;
4648                         } else if (hdev->le_pkts) {
4649                                 hdev->le_cnt += count;
4650                                 if (hdev->le_cnt > hdev->le_pkts)
4651                                         hdev->le_cnt = hdev->le_pkts;
4652                         } else {
4653                                 hdev->acl_cnt += count;
4654                                 if (hdev->acl_cnt > hdev->acl_pkts)
4655                                         hdev->acl_cnt = hdev->acl_pkts;
4656                         }
4657                         break;
4658
4659                 default:
4660                         bt_dev_err(hdev, "unknown type %d conn %p",
4661                                    conn->type, conn);
4662                         break;
4663                 }
4664         }
4665
4666         queue_work(hdev->workqueue, &hdev->tx_work);
4667 }
4668
4669 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4670                                                  __u16 handle)
4671 {
4672         struct hci_chan *chan;
4673
4674         switch (hdev->dev_type) {
4675         case HCI_PRIMARY:
4676                 return hci_conn_hash_lookup_handle(hdev, handle);
4677         case HCI_AMP:
4678                 chan = hci_chan_lookup_handle(hdev, handle);
4679                 if (chan)
4680                         return chan->conn;
4681                 break;
4682         default:
4683                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4684                 break;
4685         }
4686
4687         return NULL;
4688 }
4689
4690 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
4691                                     struct sk_buff *skb)
4692 {
4693         struct hci_ev_num_comp_blocks *ev = data;
4694         int i;
4695
4696         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
4697                              flex_array_size(ev, handles, ev->num_hndl)))
4698                 return;
4699
4700         if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4701                 bt_dev_err(hdev, "wrong event for mode %d",
4702                            hdev->flow_ctl_mode);
4703                 return;
4704         }
4705
4706         bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
4707                    ev->num_hndl);
4708
4709         for (i = 0; i < ev->num_hndl; i++) {
4710                 struct hci_comp_blocks_info *info = &ev->handles[i];
4711                 struct hci_conn *conn = NULL;
4712                 __u16  handle, block_count;
4713
4714                 handle = __le16_to_cpu(info->handle);
4715                 block_count = __le16_to_cpu(info->blocks);
4716
4717                 conn = __hci_conn_lookup_handle(hdev, handle);
4718                 if (!conn)
4719                         continue;
4720
4721                 conn->sent -= block_count;
4722
4723                 switch (conn->type) {
4724                 case ACL_LINK:
4725                 case AMP_LINK:
4726                         hdev->block_cnt += block_count;
4727                         if (hdev->block_cnt > hdev->num_blocks)
4728                                 hdev->block_cnt = hdev->num_blocks;
4729                         break;
4730
4731                 default:
4732                         bt_dev_err(hdev, "unknown type %d conn %p",
4733                                    conn->type, conn);
4734                         break;
4735                 }
4736         }
4737
4738         queue_work(hdev->workqueue, &hdev->tx_work);
4739 }
4740
4741 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4742                                 struct sk_buff *skb)
4743 {
4744         struct hci_ev_mode_change *ev = data;
4745         struct hci_conn *conn;
4746
4747         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4748
4749         hci_dev_lock(hdev);
4750
4751         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4752         if (conn) {
4753                 conn->mode = ev->mode;
4754
4755                 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4756                                         &conn->flags)) {
4757                         if (conn->mode == HCI_CM_ACTIVE)
4758                                 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4759                         else
4760                                 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4761                 }
4762
4763                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4764                         hci_sco_setup(conn, ev->status);
4765         }
4766
4767         hci_dev_unlock(hdev);
4768 }
4769
4770 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4771                                      struct sk_buff *skb)
4772 {
4773         struct hci_ev_pin_code_req *ev = data;
4774         struct hci_conn *conn;
4775
4776         bt_dev_dbg(hdev, "");
4777
4778         hci_dev_lock(hdev);
4779
4780         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4781         if (!conn)
4782                 goto unlock;
4783
4784         if (conn->state == BT_CONNECTED) {
4785                 hci_conn_hold(conn);
4786                 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4787                 hci_conn_drop(conn);
4788         }
4789
4790         if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4791             !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4792                 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4793                              sizeof(ev->bdaddr), &ev->bdaddr);
4794         } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4795                 u8 secure;
4796
4797                 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4798                         secure = 1;
4799                 else
4800                         secure = 0;
4801
4802                 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4803         }
4804
4805 unlock:
4806         hci_dev_unlock(hdev);
4807 }
4808
4809 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4810 {
4811         if (key_type == HCI_LK_CHANGED_COMBINATION)
4812                 return;
4813
4814         conn->pin_length = pin_len;
4815         conn->key_type = key_type;
4816
4817         switch (key_type) {
4818         case HCI_LK_LOCAL_UNIT:
4819         case HCI_LK_REMOTE_UNIT:
4820         case HCI_LK_DEBUG_COMBINATION:
4821                 return;
4822         case HCI_LK_COMBINATION:
4823                 if (pin_len == 16)
4824                         conn->pending_sec_level = BT_SECURITY_HIGH;
4825                 else
4826                         conn->pending_sec_level = BT_SECURITY_MEDIUM;
4827                 break;
4828         case HCI_LK_UNAUTH_COMBINATION_P192:
4829         case HCI_LK_UNAUTH_COMBINATION_P256:
4830                 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4831                 break;
4832         case HCI_LK_AUTH_COMBINATION_P192:
4833                 conn->pending_sec_level = BT_SECURITY_HIGH;
4834                 break;
4835         case HCI_LK_AUTH_COMBINATION_P256:
4836                 conn->pending_sec_level = BT_SECURITY_FIPS;
4837                 break;
4838         }
4839 }
4840
4841 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4842                                      struct sk_buff *skb)
4843 {
4844         struct hci_ev_link_key_req *ev = data;
4845         struct hci_cp_link_key_reply cp;
4846         struct hci_conn *conn;
4847         struct link_key *key;
4848
4849         bt_dev_dbg(hdev, "");
4850
4851         if (!hci_dev_test_flag(hdev, HCI_MGMT))
4852                 return;
4853
4854         hci_dev_lock(hdev);
4855
4856         key = hci_find_link_key(hdev, &ev->bdaddr);
4857         if (!key) {
4858                 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4859                 goto not_found;
4860         }
4861
4862         bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4863
4864         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4865         if (conn) {
4866                 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4867
4868                 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4869                      key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4870                     conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4871                         bt_dev_dbg(hdev, "ignoring unauthenticated key");
4872                         goto not_found;
4873                 }
4874
4875                 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4876                     (conn->pending_sec_level == BT_SECURITY_HIGH ||
4877                      conn->pending_sec_level == BT_SECURITY_FIPS)) {
4878                         bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4879                         goto not_found;
4880                 }
4881
4882                 conn_set_key(conn, key->type, key->pin_len);
4883         }
4884
4885         bacpy(&cp.bdaddr, &ev->bdaddr);
4886         memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4887
4888         hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4889
4890         hci_dev_unlock(hdev);
4891
4892         return;
4893
4894 not_found:
4895         hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4896         hci_dev_unlock(hdev);
4897 }
4898
4899 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4900                                     struct sk_buff *skb)
4901 {
4902         struct hci_ev_link_key_notify *ev = data;
4903         struct hci_conn *conn;
4904         struct link_key *key;
4905         bool persistent;
4906         u8 pin_len = 0;
4907
4908         bt_dev_dbg(hdev, "");
4909
4910         hci_dev_lock(hdev);
4911
4912         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4913         if (!conn)
4914                 goto unlock;
4915
4916         hci_conn_hold(conn);
4917         conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4918         hci_conn_drop(conn);
4919
4920         set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4921         conn_set_key(conn, ev->key_type, conn->pin_length);
4922
4923         if (!hci_dev_test_flag(hdev, HCI_MGMT))
4924                 goto unlock;
4925
4926         key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4927                                 ev->key_type, pin_len, &persistent);
4928         if (!key)
4929                 goto unlock;
4930
4931         /* Update connection information since adding the key will have
4932          * fixed up the type in the case of changed combination keys.
4933          */
4934         if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4935                 conn_set_key(conn, key->type, key->pin_len);
4936
4937         mgmt_new_link_key(hdev, key, persistent);
4938
4939         /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4940          * is set. If it's not set simply remove the key from the kernel
4941          * list (we've still notified user space about it but with
4942          * store_hint being 0).
4943          */
4944         if (key->type == HCI_LK_DEBUG_COMBINATION &&
4945             !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4946                 list_del_rcu(&key->list);
4947                 kfree_rcu(key, rcu);
4948                 goto unlock;
4949         }
4950
4951         if (persistent)
4952                 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4953         else
4954                 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4955
4956 unlock:
4957         hci_dev_unlock(hdev);
4958 }
4959
4960 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4961                                  struct sk_buff *skb)
4962 {
4963         struct hci_ev_clock_offset *ev = data;
4964         struct hci_conn *conn;
4965
4966         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4967
4968         hci_dev_lock(hdev);
4969
4970         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4971         if (conn && !ev->status) {
4972                 struct inquiry_entry *ie;
4973
4974                 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4975                 if (ie) {
4976                         ie->data.clock_offset = ev->clock_offset;
4977                         ie->timestamp = jiffies;
4978                 }
4979         }
4980
4981         hci_dev_unlock(hdev);
4982 }
4983
4984 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4985                                     struct sk_buff *skb)
4986 {
4987         struct hci_ev_pkt_type_change *ev = data;
4988         struct hci_conn *conn;
4989
4990         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4991
4992         hci_dev_lock(hdev);
4993
4994         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4995         if (conn && !ev->status)
4996                 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4997
4998         hci_dev_unlock(hdev);
4999 }
5000
5001 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
5002                                    struct sk_buff *skb)
5003 {
5004         struct hci_ev_pscan_rep_mode *ev = data;
5005         struct inquiry_entry *ie;
5006
5007         bt_dev_dbg(hdev, "");
5008
5009         hci_dev_lock(hdev);
5010
5011         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5012         if (ie) {
5013                 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
5014                 ie->timestamp = jiffies;
5015         }
5016
5017         hci_dev_unlock(hdev);
5018 }
5019
5020 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
5021                                              struct sk_buff *skb)
5022 {
5023         struct hci_ev_inquiry_result_rssi *ev = edata;
5024         struct inquiry_data data;
5025         int i;
5026
5027         bt_dev_dbg(hdev, "num_rsp %d", ev->num);
5028
5029         if (!ev->num)
5030                 return;
5031
5032         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5033                 return;
5034
5035         hci_dev_lock(hdev);
5036
5037         if (skb->len == array_size(ev->num,
5038                                    sizeof(struct inquiry_info_rssi_pscan))) {
5039                 struct inquiry_info_rssi_pscan *info;
5040
5041                 for (i = 0; i < ev->num; i++) {
5042                         u32 flags;
5043
5044                         info = hci_ev_skb_pull(hdev, skb,
5045                                                HCI_EV_INQUIRY_RESULT_WITH_RSSI,
5046                                                sizeof(*info));
5047                         if (!info) {
5048                                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
5049                                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
5050                                 goto unlock;
5051                         }
5052
5053                         bacpy(&data.bdaddr, &info->bdaddr);
5054                         data.pscan_rep_mode     = info->pscan_rep_mode;
5055                         data.pscan_period_mode  = info->pscan_period_mode;
5056                         data.pscan_mode         = info->pscan_mode;
5057                         memcpy(data.dev_class, info->dev_class, 3);
5058                         data.clock_offset       = info->clock_offset;
5059                         data.rssi               = info->rssi;
5060                         data.ssp_mode           = 0x00;
5061
5062                         flags = hci_inquiry_cache_update(hdev, &data, false);
5063
5064                         mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5065                                           info->dev_class, info->rssi,
5066                                           flags, NULL, 0, NULL, 0, 0);
5067                 }
5068         } else if (skb->len == array_size(ev->num,
5069                                           sizeof(struct inquiry_info_rssi))) {
5070                 struct inquiry_info_rssi *info;
5071
5072                 for (i = 0; i < ev->num; i++) {
5073                         u32 flags;
5074
5075                         info = hci_ev_skb_pull(hdev, skb,
5076                                                HCI_EV_INQUIRY_RESULT_WITH_RSSI,
5077                                                sizeof(*info));
5078                         if (!info) {
5079                                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
5080                                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
5081                                 goto unlock;
5082                         }
5083
5084                         bacpy(&data.bdaddr, &info->bdaddr);
5085                         data.pscan_rep_mode     = info->pscan_rep_mode;
5086                         data.pscan_period_mode  = info->pscan_period_mode;
5087                         data.pscan_mode         = 0x00;
5088                         memcpy(data.dev_class, info->dev_class, 3);
5089                         data.clock_offset       = info->clock_offset;
5090                         data.rssi               = info->rssi;
5091                         data.ssp_mode           = 0x00;
5092
5093                         flags = hci_inquiry_cache_update(hdev, &data, false);
5094
5095                         mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5096                                           info->dev_class, info->rssi,
5097                                           flags, NULL, 0, NULL, 0, 0);
5098                 }
5099         } else {
5100                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
5101                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
5102         }
5103 unlock:
5104         hci_dev_unlock(hdev);
5105 }
5106
5107 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
5108                                         struct sk_buff *skb)
5109 {
5110         struct hci_ev_remote_ext_features *ev = data;
5111         struct hci_conn *conn;
5112
5113         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5114
5115         hci_dev_lock(hdev);
5116
5117         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5118         if (!conn)
5119                 goto unlock;
5120
5121         if (ev->page < HCI_MAX_PAGES)
5122                 memcpy(conn->features[ev->page], ev->features, 8);
5123
5124         if (!ev->status && ev->page == 0x01) {
5125                 struct inquiry_entry *ie;
5126
5127                 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
5128                 if (ie)
5129                         ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5130
5131                 if (ev->features[0] & LMP_HOST_SSP) {
5132                         set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5133                 } else {
5134                         /* It is mandatory by the Bluetooth specification that
5135                          * Extended Inquiry Results are only used when Secure
5136                          * Simple Pairing is enabled, but some devices violate
5137                          * this.
5138                          *
5139                          * To make these devices work, the internal SSP
5140                          * enabled flag needs to be cleared if the remote host
5141                          * features do not indicate SSP support */
5142                         clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5143                 }
5144
5145                 if (ev->features[0] & LMP_HOST_SC)
5146                         set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
5147         }
5148
5149         if (conn->state != BT_CONFIG)
5150                 goto unlock;
5151
5152         if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
5153                 struct hci_cp_remote_name_req cp;
5154                 memset(&cp, 0, sizeof(cp));
5155                 bacpy(&cp.bdaddr, &conn->dst);
5156                 cp.pscan_rep_mode = 0x02;
5157                 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
5158         } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5159                 mgmt_device_connected(hdev, conn, NULL, 0);
5160
5161         if (!hci_outgoing_auth_needed(hdev, conn)) {
5162                 conn->state = BT_CONNECTED;
5163                 hci_connect_cfm(conn, ev->status);
5164                 hci_conn_drop(conn);
5165         }
5166
5167 unlock:
5168         hci_dev_unlock(hdev);
5169 }
5170
5171 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
5172                                        struct sk_buff *skb)
5173 {
5174         struct hci_ev_sync_conn_complete *ev = data;
5175         struct hci_conn *conn;
5176         u8 status = ev->status;
5177
5178         switch (ev->link_type) {
5179         case SCO_LINK:
5180         case ESCO_LINK:
5181                 break;
5182         default:
5183                 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
5184                  * for HCI_Synchronous_Connection_Complete is limited to
5185                  * either SCO or eSCO
5186                  */
5187                 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
5188                 return;
5189         }
5190
5191         bt_dev_dbg(hdev, "status 0x%2.2x", status);
5192
5193         hci_dev_lock(hdev);
5194
5195         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
5196         if (!conn) {
5197                 if (ev->link_type == ESCO_LINK)
5198                         goto unlock;
5199
5200                 /* When the link type in the event indicates SCO connection
5201                  * and lookup of the connection object fails, then check
5202                  * if an eSCO connection object exists.
5203                  *
5204                  * The core limits the synchronous connections to either
5205                  * SCO or eSCO. The eSCO connection is preferred and tried
5206                  * to be setup first and until successfully established,
5207                  * the link type will be hinted as eSCO.
5208                  */
5209                 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
5210                 if (!conn)
5211                         goto unlock;
5212         }
5213
5214         /* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
5215          * Processing it more than once per connection can corrupt kernel memory.
5216          *
5217          * As the connection handle is set here for the first time, it indicates
5218          * whether the connection is already set up.
5219          */
5220         if (conn->handle != HCI_CONN_HANDLE_UNSET) {
5221                 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
5222                 goto unlock;
5223         }
5224
5225         switch (status) {
5226         case 0x00:
5227                 conn->handle = __le16_to_cpu(ev->handle);
5228                 if (conn->handle > HCI_CONN_HANDLE_MAX) {
5229                         bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
5230                                    conn->handle, HCI_CONN_HANDLE_MAX);
5231                         status = HCI_ERROR_INVALID_PARAMETERS;
5232                         conn->state = BT_CLOSED;
5233                         break;
5234                 }
5235
5236                 conn->state  = BT_CONNECTED;
5237                 conn->type   = ev->link_type;
5238
5239                 hci_debugfs_create_conn(conn);
5240                 hci_conn_add_sysfs(conn);
5241                 break;
5242
5243         case 0x10:      /* Connection Accept Timeout */
5244         case 0x0d:      /* Connection Rejected due to Limited Resources */
5245         case 0x11:      /* Unsupported Feature or Parameter Value */
5246         case 0x1c:      /* SCO interval rejected */
5247         case 0x1a:      /* Unsupported Remote Feature */
5248         case 0x1e:      /* Invalid LMP Parameters */
5249         case 0x1f:      /* Unspecified error */
5250         case 0x20:      /* Unsupported LMP Parameter value */
5251                 if (conn->out) {
5252                         conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5253                                         (hdev->esco_type & EDR_ESCO_MASK);
5254                         if (hci_setup_sync(conn, conn->link->handle))
5255                                 goto unlock;
5256                 }
5257                 fallthrough;
5258
5259         default:
5260                 conn->state = BT_CLOSED;
5261                 break;
5262         }
5263
5264         bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5265         /* Notify only in case of SCO over HCI transport data path which
5266          * is zero and non-zero value shall be non-HCI transport data path
5267          */
5268         if (conn->codec.data_path == 0 && hdev->notify) {
5269                 switch (ev->air_mode) {
5270                 case 0x02:
5271                         hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5272                         break;
5273                 case 0x03:
5274                         hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5275                         break;
5276                 }
5277         }
5278
5279         hci_connect_cfm(conn, status);
5280         if (status)
5281                 hci_conn_del(conn);
5282
5283 unlock:
5284         hci_dev_unlock(hdev);
5285 }
5286
5287 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5288 {
5289         size_t parsed = 0;
5290
5291         while (parsed < eir_len) {
5292                 u8 field_len = eir[0];
5293
5294                 if (field_len == 0)
5295                         return parsed;
5296
5297                 parsed += field_len + 1;
5298                 eir += field_len + 1;
5299         }
5300
5301         return eir_len;
5302 }
5303
5304 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5305                                             struct sk_buff *skb)
5306 {
5307         struct hci_ev_ext_inquiry_result *ev = edata;
5308         struct inquiry_data data;
5309         size_t eir_len;
5310         int i;
5311
5312         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5313                              flex_array_size(ev, info, ev->num)))
5314                 return;
5315
5316         bt_dev_dbg(hdev, "num %d", ev->num);
5317
5318         if (!ev->num)
5319                 return;
5320
5321         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5322                 return;
5323
5324         hci_dev_lock(hdev);
5325
5326         for (i = 0; i < ev->num; i++) {
5327                 struct extended_inquiry_info *info = &ev->info[i];
5328                 u32 flags;
5329                 bool name_known;
5330
5331                 bacpy(&data.bdaddr, &info->bdaddr);
5332                 data.pscan_rep_mode     = info->pscan_rep_mode;
5333                 data.pscan_period_mode  = info->pscan_period_mode;
5334                 data.pscan_mode         = 0x00;
5335                 memcpy(data.dev_class, info->dev_class, 3);
5336                 data.clock_offset       = info->clock_offset;
5337                 data.rssi               = info->rssi;
5338                 data.ssp_mode           = 0x01;
5339
5340                 if (hci_dev_test_flag(hdev, HCI_MGMT))
5341                         name_known = eir_get_data(info->data,
5342                                                   sizeof(info->data),
5343                                                   EIR_NAME_COMPLETE, NULL);
5344                 else
5345                         name_known = true;
5346
5347                 flags = hci_inquiry_cache_update(hdev, &data, name_known);
5348
5349                 eir_len = eir_get_length(info->data, sizeof(info->data));
5350
5351                 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5352                                   info->dev_class, info->rssi,
5353                                   flags, info->data, eir_len, NULL, 0, 0);
5354         }
5355
5356         hci_dev_unlock(hdev);
5357 }
5358
5359 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5360                                          struct sk_buff *skb)
5361 {
5362         struct hci_ev_key_refresh_complete *ev = data;
5363         struct hci_conn *conn;
5364
5365         bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5366                    __le16_to_cpu(ev->handle));
5367
5368         hci_dev_lock(hdev);
5369
5370         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5371         if (!conn)
5372                 goto unlock;
5373
5374         /* For BR/EDR the necessary steps are taken through the
5375          * auth_complete event.
5376          */
5377         if (conn->type != LE_LINK)
5378                 goto unlock;
5379
5380         if (!ev->status)
5381                 conn->sec_level = conn->pending_sec_level;
5382
5383         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5384
5385         if (ev->status && conn->state == BT_CONNECTED) {
5386                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5387                 hci_conn_drop(conn);
5388                 goto unlock;
5389         }
5390
5391         if (conn->state == BT_CONFIG) {
5392                 if (!ev->status)
5393                         conn->state = BT_CONNECTED;
5394
5395                 hci_connect_cfm(conn, ev->status);
5396                 hci_conn_drop(conn);
5397         } else {
5398                 hci_auth_cfm(conn, ev->status);
5399
5400                 hci_conn_hold(conn);
5401                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5402                 hci_conn_drop(conn);
5403         }
5404
5405 unlock:
5406         hci_dev_unlock(hdev);
5407 }
5408
5409 static u8 hci_get_auth_req(struct hci_conn *conn)
5410 {
5411 #ifdef TIZEN_BT
5412         if (conn->remote_auth == HCI_AT_GENERAL_BONDING_MITM) {
5413                 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5414                     conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5415                         return HCI_AT_GENERAL_BONDING_MITM;
5416         }
5417 #endif
5418
5419         /* If remote requests no-bonding follow that lead */
5420         if (conn->remote_auth == HCI_AT_NO_BONDING ||
5421             conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5422                 return conn->remote_auth | (conn->auth_type & 0x01);
5423
5424         /* If both remote and local have enough IO capabilities, require
5425          * MITM protection
5426          */
5427         if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5428             conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5429                 return conn->remote_auth | 0x01;
5430
5431         /* No MITM protection possible so ignore remote requirement */
5432         return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5433 }
5434
5435 static u8 bredr_oob_data_present(struct hci_conn *conn)
5436 {
5437         struct hci_dev *hdev = conn->hdev;
5438         struct oob_data *data;
5439
5440         data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5441         if (!data)
5442                 return 0x00;
5443
5444         if (bredr_sc_enabled(hdev)) {
5445                 /* When Secure Connections is enabled, then just
5446                  * return the present value stored with the OOB
5447                  * data. The stored value contains the right present
5448                  * information. However it can only be trusted when
5449                  * not in Secure Connection Only mode.
5450                  */
5451                 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5452                         return data->present;
5453
5454                 /* When Secure Connections Only mode is enabled, then
5455                  * the P-256 values are required. If they are not
5456                  * available, then do not declare that OOB data is
5457                  * present.
5458                  */
5459                 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
5460                     !memcmp(data->hash256, ZERO_KEY, 16))
5461                         return 0x00;
5462
5463                 return 0x02;
5464         }
5465
5466         /* When Secure Connections is not enabled or actually
5467          * not supported by the hardware, then check that if
5468          * P-192 data values are present.
5469          */
5470         if (!memcmp(data->rand192, ZERO_KEY, 16) ||
5471             !memcmp(data->hash192, ZERO_KEY, 16))
5472                 return 0x00;
5473
5474         return 0x01;
5475 }
5476
5477 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5478                                     struct sk_buff *skb)
5479 {
5480         struct hci_ev_io_capa_request *ev = data;
5481         struct hci_conn *conn;
5482
5483         bt_dev_dbg(hdev, "");
5484
5485         hci_dev_lock(hdev);
5486
5487         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5488         if (!conn)
5489                 goto unlock;
5490
5491         hci_conn_hold(conn);
5492
5493         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5494                 goto unlock;
5495
5496         /* Allow pairing if we're pairable, the initiators of the
5497          * pairing or if the remote is not requesting bonding.
5498          */
5499         if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5500             test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5501             (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5502                 struct hci_cp_io_capability_reply cp;
5503
5504                 bacpy(&cp.bdaddr, &ev->bdaddr);
5505                 /* Change the IO capability from KeyboardDisplay
5506                  * to DisplayYesNo as it is not supported by BT spec. */
5507                 cp.capability = (conn->io_capability == 0x04) ?
5508                                 HCI_IO_DISPLAY_YESNO : conn->io_capability;
5509
5510                 /* If we are initiators, there is no remote information yet */
5511                 if (conn->remote_auth == 0xff) {
5512                         /* Request MITM protection if our IO caps allow it
5513                          * except for the no-bonding case.
5514                          */
5515                         if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5516                             conn->auth_type != HCI_AT_NO_BONDING)
5517                                 conn->auth_type |= 0x01;
5518                 } else {
5519                         conn->auth_type = hci_get_auth_req(conn);
5520                 }
5521
5522                 /* If we're not bondable, force one of the non-bondable
5523                  * authentication requirement values.
5524                  */
5525                 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5526                         conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5527
5528                 cp.authentication = conn->auth_type;
5529                 cp.oob_data = bredr_oob_data_present(conn);
5530
5531                 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5532                              sizeof(cp), &cp);
5533         } else {
5534                 struct hci_cp_io_capability_neg_reply cp;
5535
5536                 bacpy(&cp.bdaddr, &ev->bdaddr);
5537                 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5538
5539                 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5540                              sizeof(cp), &cp);
5541         }
5542
5543 unlock:
5544         hci_dev_unlock(hdev);
5545 }
5546
5547 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5548                                   struct sk_buff *skb)
5549 {
5550         struct hci_ev_io_capa_reply *ev = data;
5551         struct hci_conn *conn;
5552
5553         bt_dev_dbg(hdev, "");
5554
5555         hci_dev_lock(hdev);
5556
5557         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5558         if (!conn)
5559                 goto unlock;
5560
5561         conn->remote_cap = ev->capability;
5562         conn->remote_auth = ev->authentication;
5563
5564 unlock:
5565         hci_dev_unlock(hdev);
5566 }
5567
5568 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5569                                          struct sk_buff *skb)
5570 {
5571         struct hci_ev_user_confirm_req *ev = data;
5572         int loc_mitm, rem_mitm, confirm_hint = 0;
5573         struct hci_conn *conn;
5574
5575         bt_dev_dbg(hdev, "");
5576
5577         hci_dev_lock(hdev);
5578
5579         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5580                 goto unlock;
5581
5582         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5583         if (!conn)
5584                 goto unlock;
5585
5586         loc_mitm = (conn->auth_type & 0x01);
5587         rem_mitm = (conn->remote_auth & 0x01);
5588
5589         /* If we require MITM but the remote device can't provide that
5590          * (it has NoInputNoOutput) then reject the confirmation
5591          * request. We check the security level here since it doesn't
5592          * necessarily match conn->auth_type.
5593          */
5594         if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5595             conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5596                 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5597                 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5598                              sizeof(ev->bdaddr), &ev->bdaddr);
5599                 goto unlock;
5600         }
5601
5602         /* If no side requires MITM protection; auto-accept */
5603         if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5604             (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5605
5606                 /* If we're not the initiators request authorization to
5607                  * proceed from user space (mgmt_user_confirm with
5608                  * confirm_hint set to 1). The exception is if neither
5609                  * side had MITM or if the local IO capability is
5610                  * NoInputNoOutput, in which case we do auto-accept
5611                  */
5612                 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5613                     conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5614                     (loc_mitm || rem_mitm)) {
5615                         bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5616                         confirm_hint = 1;
5617                         goto confirm;
5618                 }
5619
5620                 /* If there already exists link key in local host, leave the
5621                  * decision to user space since the remote device could be
5622                  * legitimate or malicious.
5623                  */
5624                 if (hci_find_link_key(hdev, &ev->bdaddr)) {
5625                         bt_dev_dbg(hdev, "Local host already has link key");
5626                         confirm_hint = 1;
5627                         goto confirm;
5628                 }
5629
5630                 BT_DBG("Auto-accept of user confirmation with %ums delay",
5631                        hdev->auto_accept_delay);
5632
5633                 if (hdev->auto_accept_delay > 0) {
5634                         int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5635                         queue_delayed_work(conn->hdev->workqueue,
5636                                            &conn->auto_accept_work, delay);
5637                         goto unlock;
5638                 }
5639
5640                 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5641                              sizeof(ev->bdaddr), &ev->bdaddr);
5642                 goto unlock;
5643         }
5644
5645 confirm:
5646         mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5647                                   le32_to_cpu(ev->passkey), confirm_hint);
5648
5649 unlock:
5650         hci_dev_unlock(hdev);
5651 }
5652
5653 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5654                                          struct sk_buff *skb)
5655 {
5656         struct hci_ev_user_passkey_req *ev = data;
5657
5658         bt_dev_dbg(hdev, "");
5659
5660         if (hci_dev_test_flag(hdev, HCI_MGMT))
5661                 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5662 }
5663
5664 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5665                                         struct sk_buff *skb)
5666 {
5667         struct hci_ev_user_passkey_notify *ev = data;
5668         struct hci_conn *conn;
5669
5670         bt_dev_dbg(hdev, "");
5671
5672         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5673         if (!conn)
5674                 return;
5675
5676         conn->passkey_notify = __le32_to_cpu(ev->passkey);
5677         conn->passkey_entered = 0;
5678
5679         if (hci_dev_test_flag(hdev, HCI_MGMT))
5680                 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5681                                          conn->dst_type, conn->passkey_notify,
5682                                          conn->passkey_entered);
5683 }
5684
5685 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5686                                     struct sk_buff *skb)
5687 {
5688         struct hci_ev_keypress_notify *ev = data;
5689         struct hci_conn *conn;
5690
5691         bt_dev_dbg(hdev, "");
5692
5693         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5694         if (!conn)
5695                 return;
5696
5697         switch (ev->type) {
5698         case HCI_KEYPRESS_STARTED:
5699                 conn->passkey_entered = 0;
5700                 return;
5701
5702         case HCI_KEYPRESS_ENTERED:
5703                 conn->passkey_entered++;
5704                 break;
5705
5706         case HCI_KEYPRESS_ERASED:
5707                 conn->passkey_entered--;
5708                 break;
5709
5710         case HCI_KEYPRESS_CLEARED:
5711                 conn->passkey_entered = 0;
5712                 break;
5713
5714         case HCI_KEYPRESS_COMPLETED:
5715                 return;
5716         }
5717
5718         if (hci_dev_test_flag(hdev, HCI_MGMT))
5719                 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5720                                          conn->dst_type, conn->passkey_notify,
5721                                          conn->passkey_entered);
5722 }
5723
5724 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5725                                          struct sk_buff *skb)
5726 {
5727         struct hci_ev_simple_pair_complete *ev = data;
5728         struct hci_conn *conn;
5729
5730         bt_dev_dbg(hdev, "");
5731
5732         hci_dev_lock(hdev);
5733
5734         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5735         if (!conn)
5736                 goto unlock;
5737
5738         /* Reset the authentication requirement to unknown */
5739         conn->remote_auth = 0xff;
5740
5741         /* To avoid duplicate auth_failed events to user space we check
5742          * the HCI_CONN_AUTH_PEND flag which will be set if we
5743          * initiated the authentication. A traditional auth_complete
5744          * event gets always produced as initiator and is also mapped to
5745          * the mgmt_auth_failed event */
5746         if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5747                 mgmt_auth_failed(conn, ev->status);
5748
5749         hci_conn_drop(conn);
5750
5751 unlock:
5752         hci_dev_unlock(hdev);
5753 }
5754
5755 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5756                                          struct sk_buff *skb)
5757 {
5758         struct hci_ev_remote_host_features *ev = data;
5759         struct inquiry_entry *ie;
5760         struct hci_conn *conn;
5761
5762         bt_dev_dbg(hdev, "");
5763
5764         hci_dev_lock(hdev);
5765
5766         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5767         if (conn)
5768                 memcpy(conn->features[1], ev->features, 8);
5769
5770         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5771         if (ie)
5772                 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5773
5774         hci_dev_unlock(hdev);
5775 }
5776
5777 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5778                                             struct sk_buff *skb)
5779 {
5780         struct hci_ev_remote_oob_data_request *ev = edata;
5781         struct oob_data *data;
5782
5783         bt_dev_dbg(hdev, "");
5784
5785         hci_dev_lock(hdev);
5786
5787         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5788                 goto unlock;
5789
5790         data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5791         if (!data) {
5792                 struct hci_cp_remote_oob_data_neg_reply cp;
5793
5794                 bacpy(&cp.bdaddr, &ev->bdaddr);
5795                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5796                              sizeof(cp), &cp);
5797                 goto unlock;
5798         }
5799
5800         if (bredr_sc_enabled(hdev)) {
5801                 struct hci_cp_remote_oob_ext_data_reply cp;
5802
5803                 bacpy(&cp.bdaddr, &ev->bdaddr);
5804                 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5805                         memset(cp.hash192, 0, sizeof(cp.hash192));
5806                         memset(cp.rand192, 0, sizeof(cp.rand192));
5807                 } else {
5808                         memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5809                         memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5810                 }
5811                 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5812                 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5813
5814                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5815                              sizeof(cp), &cp);
5816         } else {
5817                 struct hci_cp_remote_oob_data_reply cp;
5818
5819                 bacpy(&cp.bdaddr, &ev->bdaddr);
5820                 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5821                 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5822
5823                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5824                              sizeof(cp), &cp);
5825         }
5826
5827 unlock:
5828         hci_dev_unlock(hdev);
5829 }
5830
5831 #if IS_ENABLED(CONFIG_BT_HS)
5832 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
5833                                   struct sk_buff *skb)
5834 {
5835         struct hci_ev_channel_selected *ev = data;
5836         struct hci_conn *hcon;
5837
5838         bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
5839
5840         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5841         if (!hcon)
5842                 return;
5843
5844         amp_read_loc_assoc_final_data(hdev, hcon);
5845 }
5846
5847 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
5848                                       struct sk_buff *skb)
5849 {
5850         struct hci_ev_phy_link_complete *ev = data;
5851         struct hci_conn *hcon, *bredr_hcon;
5852
5853         bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
5854                    ev->status);
5855
5856         hci_dev_lock(hdev);
5857
5858         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5859         if (!hcon)
5860                 goto unlock;
5861
5862         if (!hcon->amp_mgr)
5863                 goto unlock;
5864
5865         if (ev->status) {
5866                 hci_conn_del(hcon);
5867                 goto unlock;
5868         }
5869
5870         bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5871
5872         hcon->state = BT_CONNECTED;
5873         bacpy(&hcon->dst, &bredr_hcon->dst);
5874
5875         hci_conn_hold(hcon);
5876         hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5877         hci_conn_drop(hcon);
5878
5879         hci_debugfs_create_conn(hcon);
5880         hci_conn_add_sysfs(hcon);
5881
5882         amp_physical_cfm(bredr_hcon, hcon);
5883
5884 unlock:
5885         hci_dev_unlock(hdev);
5886 }
5887
5888 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
5889                                      struct sk_buff *skb)
5890 {
5891         struct hci_ev_logical_link_complete *ev = data;
5892         struct hci_conn *hcon;
5893         struct hci_chan *hchan;
5894         struct amp_mgr *mgr;
5895
5896         bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5897                    le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
5898
5899         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5900         if (!hcon)
5901                 return;
5902
5903         /* Create AMP hchan */
5904         hchan = hci_chan_create(hcon);
5905         if (!hchan)
5906                 return;
5907
5908         hchan->handle = le16_to_cpu(ev->handle);
5909         hchan->amp = true;
5910
5911         BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5912
5913         mgr = hcon->amp_mgr;
5914         if (mgr && mgr->bredr_chan) {
5915                 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5916
5917                 l2cap_chan_lock(bredr_chan);
5918
5919                 bredr_chan->conn->mtu = hdev->block_mtu;
5920                 l2cap_logical_cfm(bredr_chan, hchan, 0);
5921                 hci_conn_hold(hcon);
5922
5923                 l2cap_chan_unlock(bredr_chan);
5924         }
5925 }
5926
5927 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
5928                                              struct sk_buff *skb)
5929 {
5930         struct hci_ev_disconn_logical_link_complete *ev = data;
5931         struct hci_chan *hchan;
5932
5933         bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
5934                    le16_to_cpu(ev->handle), ev->status);
5935
5936         if (ev->status)
5937                 return;
5938
5939         hci_dev_lock(hdev);
5940
5941         hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5942         if (!hchan || !hchan->amp)
5943                 goto unlock;
5944
5945         amp_destroy_logical_link(hchan, ev->reason);
5946
5947 unlock:
5948         hci_dev_unlock(hdev);
5949 }
5950
5951 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
5952                                              struct sk_buff *skb)
5953 {
5954         struct hci_ev_disconn_phy_link_complete *ev = data;
5955         struct hci_conn *hcon;
5956
5957         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5958
5959         if (ev->status)
5960                 return;
5961
5962         hci_dev_lock(hdev);
5963
5964         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5965         if (hcon && hcon->type == AMP_LINK) {
5966                 hcon->state = BT_CLOSED;
5967                 hci_disconn_cfm(hcon, ev->reason);
5968                 hci_conn_del(hcon);
5969         }
5970
5971         hci_dev_unlock(hdev);
5972 }
5973 #endif
5974
5975 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5976                                 u8 bdaddr_type, bdaddr_t *local_rpa)
5977 {
5978         if (conn->out) {
5979                 conn->dst_type = bdaddr_type;
5980                 conn->resp_addr_type = bdaddr_type;
5981                 bacpy(&conn->resp_addr, bdaddr);
5982
5983                 /* Check if the controller has set a Local RPA then it must be
5984                  * used instead or hdev->rpa.
5985                  */
5986                 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5987                         conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5988                         bacpy(&conn->init_addr, local_rpa);
5989                 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5990                         conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5991                         bacpy(&conn->init_addr, &conn->hdev->rpa);
5992                 } else {
5993                         hci_copy_identity_address(conn->hdev, &conn->init_addr,
5994                                                   &conn->init_addr_type);
5995                 }
5996         } else {
5997                 conn->resp_addr_type = conn->hdev->adv_addr_type;
5998                 /* Check if the controller has set a Local RPA then it must be
5999                  * used instead or hdev->rpa.
6000                  */
6001                 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
6002                         conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
6003                         bacpy(&conn->resp_addr, local_rpa);
6004                 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
6005                         /* In case of ext adv, resp_addr will be updated in
6006                          * Adv Terminated event.
6007                          */
6008                         if (!ext_adv_capable(conn->hdev))
6009                                 bacpy(&conn->resp_addr,
6010                                       &conn->hdev->random_addr);
6011                 } else {
6012                         bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
6013                 }
6014
6015                 conn->init_addr_type = bdaddr_type;
6016                 bacpy(&conn->init_addr, bdaddr);
6017
6018                 /* For incoming connections, set the default minimum
6019                  * and maximum connection interval. They will be used
6020                  * to check if the parameters are in range and if not
6021                  * trigger the connection update procedure.
6022                  */
6023                 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
6024                 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
6025         }
6026 }
6027
6028 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
6029                                  bdaddr_t *bdaddr, u8 bdaddr_type,
6030                                  bdaddr_t *local_rpa, u8 role, u16 handle,
6031                                  u16 interval, u16 latency,
6032                                  u16 supervision_timeout)
6033 {
6034         struct hci_conn_params *params;
6035         struct hci_conn *conn;
6036         struct smp_irk *irk;
6037         u8 addr_type;
6038
6039         hci_dev_lock(hdev);
6040
6041         /* All controllers implicitly stop advertising in the event of a
6042          * connection, so ensure that the state bit is cleared.
6043          */
6044         hci_dev_clear_flag(hdev, HCI_LE_ADV);
6045
6046         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
6047         if (!conn) {
6048                 /* In case of error status and there is no connection pending
6049                  * just unlock as there is nothing to cleanup.
6050                  */
6051                 if (status)
6052                         goto unlock;
6053
6054                 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
6055                 if (!conn) {
6056                         bt_dev_err(hdev, "no memory for new connection");
6057                         goto unlock;
6058                 }
6059
6060                 conn->dst_type = bdaddr_type;
6061
6062                 /* If we didn't have a hci_conn object previously
6063                  * but we're in central role this must be something
6064                  * initiated using an accept list. Since accept list based
6065                  * connections are not "first class citizens" we don't
6066                  * have full tracking of them. Therefore, we go ahead
6067                  * with a "best effort" approach of determining the
6068                  * initiator address based on the HCI_PRIVACY flag.
6069                  */
6070                 if (conn->out) {
6071                         conn->resp_addr_type = bdaddr_type;
6072                         bacpy(&conn->resp_addr, bdaddr);
6073                         if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6074                                 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
6075                                 bacpy(&conn->init_addr, &hdev->rpa);
6076                         } else {
6077                                 hci_copy_identity_address(hdev,
6078                                                           &conn->init_addr,
6079                                                           &conn->init_addr_type);
6080                         }
6081                 }
6082         } else {
6083 #ifdef TIZEN_BT
6084                 /* LE auto connect */
6085                 bacpy(&conn->dst, bdaddr);
6086 #endif
6087                 cancel_delayed_work(&conn->le_conn_timeout);
6088         }
6089
6090         /* The HCI_LE_Connection_Complete event is only sent once per connection.
6091          * Processing it more than once per connection can corrupt kernel memory.
6092          *
6093          * As the connection handle is set here for the first time, it indicates
6094          * whether the connection is already set up.
6095          */
6096         if (conn->handle != HCI_CONN_HANDLE_UNSET) {
6097                 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
6098                 goto unlock;
6099         }
6100
6101         le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
6102
6103         /* Lookup the identity address from the stored connection
6104          * address and address type.
6105          *
6106          * When establishing connections to an identity address, the
6107          * connection procedure will store the resolvable random
6108          * address first. Now if it can be converted back into the
6109          * identity address, start using the identity address from
6110          * now on.
6111          */
6112         irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
6113         if (irk) {
6114                 bacpy(&conn->dst, &irk->bdaddr);
6115                 conn->dst_type = irk->addr_type;
6116         }
6117
6118         conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
6119
6120         if (handle > HCI_CONN_HANDLE_MAX) {
6121                 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle,
6122                            HCI_CONN_HANDLE_MAX);
6123                 status = HCI_ERROR_INVALID_PARAMETERS;
6124         }
6125
6126         /* All connection failure handling is taken care of by the
6127          * hci_conn_failed function which is triggered by the HCI
6128          * request completion callbacks used for connecting.
6129          */
6130         if (status)
6131                 goto unlock;
6132
6133         /* Drop the connection if it has been aborted */
6134         if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
6135                 hci_conn_drop(conn);
6136                 goto unlock;
6137         }
6138
6139         if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
6140                 addr_type = BDADDR_LE_PUBLIC;
6141         else
6142                 addr_type = BDADDR_LE_RANDOM;
6143
6144         /* Drop the connection if the device is blocked */
6145         if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
6146                 hci_conn_drop(conn);
6147                 goto unlock;
6148         }
6149
6150         if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
6151                 mgmt_device_connected(hdev, conn, NULL, 0);
6152
6153         conn->sec_level = BT_SECURITY_LOW;
6154         conn->handle = handle;
6155         conn->state = BT_CONFIG;
6156
6157         /* Store current advertising instance as connection advertising instance
6158          * when sotfware rotation is in use so it can be re-enabled when
6159          * disconnected.
6160          */
6161         if (!ext_adv_capable(hdev))
6162                 conn->adv_instance = hdev->cur_adv_instance;
6163
6164         conn->le_conn_interval = interval;
6165         conn->le_conn_latency = latency;
6166         conn->le_supv_timeout = supervision_timeout;
6167
6168         hci_debugfs_create_conn(conn);
6169         hci_conn_add_sysfs(conn);
6170
6171         /* The remote features procedure is defined for central
6172          * role only. So only in case of an initiated connection
6173          * request the remote features.
6174          *
6175          * If the local controller supports peripheral-initiated features
6176          * exchange, then requesting the remote features in peripheral
6177          * role is possible. Otherwise just transition into the
6178          * connected state without requesting the remote features.
6179          */
6180         if (conn->out ||
6181             (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
6182                 struct hci_cp_le_read_remote_features cp;
6183
6184                 cp.handle = __cpu_to_le16(conn->handle);
6185
6186                 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
6187                              sizeof(cp), &cp);
6188
6189                 hci_conn_hold(conn);
6190         } else {
6191                 conn->state = BT_CONNECTED;
6192                 hci_connect_cfm(conn, status);
6193         }
6194
6195         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
6196                                            conn->dst_type);
6197         if (params) {
6198                 list_del_init(&params->action);
6199                 if (params->conn) {
6200                         hci_conn_drop(params->conn);
6201                         hci_conn_put(params->conn);
6202                         params->conn = NULL;
6203                 }
6204         }
6205
6206 unlock:
6207         hci_update_passive_scan(hdev);
6208         hci_dev_unlock(hdev);
6209 }
6210
6211 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
6212                                      struct sk_buff *skb)
6213 {
6214         struct hci_ev_le_conn_complete *ev = data;
6215
6216         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6217
6218         le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6219                              NULL, ev->role, le16_to_cpu(ev->handle),
6220                              le16_to_cpu(ev->interval),
6221                              le16_to_cpu(ev->latency),
6222                              le16_to_cpu(ev->supervision_timeout));
6223 }
6224
6225 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
6226                                          struct sk_buff *skb)
6227 {
6228         struct hci_ev_le_enh_conn_complete *ev = data;
6229
6230         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6231
6232         le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6233                              &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
6234                              le16_to_cpu(ev->interval),
6235                              le16_to_cpu(ev->latency),
6236                              le16_to_cpu(ev->supervision_timeout));
6237 }
6238
6239 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
6240                                     struct sk_buff *skb)
6241 {
6242         struct hci_evt_le_ext_adv_set_term *ev = data;
6243         struct hci_conn *conn;
6244         struct adv_info *adv, *n;
6245
6246         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6247
6248         /* The Bluetooth Core 5.3 specification clearly states that this event
6249          * shall not be sent when the Host disables the advertising set. So in
6250          * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
6251          *
6252          * When the Host disables an advertising set, all cleanup is done via
6253          * its command callback and not needed to be duplicated here.
6254          */
6255         if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
6256                 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
6257                 return;
6258         }
6259
6260         hci_dev_lock(hdev);
6261
6262         adv = hci_find_adv_instance(hdev, ev->handle);
6263
6264         if (ev->status) {
6265                 if (!adv)
6266                         goto unlock;
6267
6268                 /* Remove advertising as it has been terminated */
6269                 hci_remove_adv_instance(hdev, ev->handle);
6270                 mgmt_advertising_removed(NULL, hdev, ev->handle);
6271
6272                 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
6273                         if (adv->enabled)
6274                                 goto unlock;
6275                 }
6276
6277                 /* We are no longer advertising, clear HCI_LE_ADV */
6278                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
6279                 goto unlock;
6280         }
6281
6282         if (adv)
6283                 adv->enabled = false;
6284
6285         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
6286         if (conn) {
6287                 /* Store handle in the connection so the correct advertising
6288                  * instance can be re-enabled when disconnected.
6289                  */
6290                 conn->adv_instance = ev->handle;
6291
6292                 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
6293                     bacmp(&conn->resp_addr, BDADDR_ANY))
6294                         goto unlock;
6295
6296                 if (!ev->handle) {
6297                         bacpy(&conn->resp_addr, &hdev->random_addr);
6298                         goto unlock;
6299                 }
6300
6301                 if (adv)
6302                         bacpy(&conn->resp_addr, &adv->random_addr);
6303         }
6304
6305 unlock:
6306         hci_dev_unlock(hdev);
6307 }
6308
6309 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
6310                                             struct sk_buff *skb)
6311 {
6312         struct hci_ev_le_conn_update_complete *ev = data;
6313         struct hci_conn *conn;
6314
6315         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6316
6317         if (ev->status)
6318                 return;
6319
6320         hci_dev_lock(hdev);
6321
6322         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6323         if (conn) {
6324 #ifdef TIZEN_BT
6325                 if (ev->status) {
6326                         hci_dev_unlock(hdev);
6327                         mgmt_le_conn_update_failed(hdev, &conn->dst,
6328                                 conn->type, conn->dst_type, ev->status);
6329                         return;
6330                 }
6331 #endif
6332                 conn->le_conn_interval = le16_to_cpu(ev->interval);
6333                 conn->le_conn_latency = le16_to_cpu(ev->latency);
6334                 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
6335         }
6336
6337         hci_dev_unlock(hdev);
6338
6339 #ifdef TIZEN_BT
6340         mgmt_le_conn_updated(hdev, &conn->dst, conn->type,
6341                                 conn->dst_type, conn->le_conn_interval,
6342                                 conn->le_conn_latency, conn->le_supv_timeout);
6343 #endif
6344 }
6345
6346 /* This function requires the caller holds hdev->lock */
6347 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
6348                                               bdaddr_t *addr,
6349                                               u8 addr_type, bool addr_resolved,
6350                                               u8 adv_type)
6351 {
6352         struct hci_conn *conn;
6353         struct hci_conn_params *params;
6354
6355         /* If the event is not connectable don't proceed further */
6356         if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
6357                 return NULL;
6358
6359         /* Ignore if the device is blocked or hdev is suspended */
6360         if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
6361             hdev->suspended)
6362                 return NULL;
6363
6364         /* Most controller will fail if we try to create new connections
6365          * while we have an existing one in peripheral role.
6366          */
6367         if (hdev->conn_hash.le_num_peripheral > 0 &&
6368             (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
6369              !(hdev->le_states[3] & 0x10)))
6370                 return NULL;
6371
6372         /* If we're not connectable only connect devices that we have in
6373          * our pend_le_conns list.
6374          */
6375         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
6376                                            addr_type);
6377         if (!params)
6378                 return NULL;
6379
6380         if (!params->explicit_connect) {
6381                 switch (params->auto_connect) {
6382                 case HCI_AUTO_CONN_DIRECT:
6383                         /* Only devices advertising with ADV_DIRECT_IND are
6384                          * triggering a connection attempt. This is allowing
6385                          * incoming connections from peripheral devices.
6386                          */
6387                         if (adv_type != LE_ADV_DIRECT_IND)
6388                                 return NULL;
6389                         break;
6390                 case HCI_AUTO_CONN_ALWAYS:
6391                         /* Devices advertising with ADV_IND or ADV_DIRECT_IND
6392                          * are triggering a connection attempt. This means
6393                          * that incoming connections from peripheral device are
6394                          * accepted and also outgoing connections to peripheral
6395                          * devices are established when found.
6396                          */
6397                         break;
6398                 default:
6399                         return NULL;
6400                 }
6401         }
6402
6403         conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
6404                               BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
6405                               HCI_ROLE_MASTER);
6406         if (!IS_ERR(conn)) {
6407                 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6408                  * by higher layer that tried to connect, if no then
6409                  * store the pointer since we don't really have any
6410                  * other owner of the object besides the params that
6411                  * triggered it. This way we can abort the connection if
6412                  * the parameters get removed and keep the reference
6413                  * count consistent once the connection is established.
6414                  */
6415
6416                 if (!params->explicit_connect)
6417                         params->conn = hci_conn_get(conn);
6418
6419                 return conn;
6420         }
6421
6422         switch (PTR_ERR(conn)) {
6423         case -EBUSY:
6424                 /* If hci_connect() returns -EBUSY it means there is already
6425                  * an LE connection attempt going on. Since controllers don't
6426                  * support more than one connection attempt at the time, we
6427                  * don't consider this an error case.
6428                  */
6429                 break;
6430         default:
6431                 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6432                 return NULL;
6433         }
6434
6435         return NULL;
6436 }
6437
6438 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6439                                u8 bdaddr_type, bdaddr_t *direct_addr,
6440                                u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
6441                                bool ext_adv, bool ctl_time, u64 instant)
6442 {
6443         struct discovery_state *d = &hdev->discovery;
6444         struct smp_irk *irk;
6445         struct hci_conn *conn;
6446         bool match, bdaddr_resolved;
6447         u32 flags;
6448         u8 *ptr;
6449
6450         switch (type) {
6451         case LE_ADV_IND:
6452         case LE_ADV_DIRECT_IND:
6453         case LE_ADV_SCAN_IND:
6454         case LE_ADV_NONCONN_IND:
6455         case LE_ADV_SCAN_RSP:
6456                 break;
6457         default:
6458                 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6459                                        "type: 0x%02x", type);
6460                 return;
6461         }
6462
6463         if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
6464                 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
6465                 return;
6466         }
6467
6468         /* Find the end of the data in case the report contains padded zero
6469          * bytes at the end causing an invalid length value.
6470          *
6471          * When data is NULL, len is 0 so there is no need for extra ptr
6472          * check as 'ptr < data + 0' is already false in such case.
6473          */
6474         for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6475                 if (ptr + 1 + *ptr > data + len)
6476                         break;
6477         }
6478
6479         /* Adjust for actual length. This handles the case when remote
6480          * device is advertising with incorrect data length.
6481          */
6482         len = ptr - data;
6483
6484         /* If the direct address is present, then this report is from
6485          * a LE Direct Advertising Report event. In that case it is
6486          * important to see if the address is matching the local
6487          * controller address.
6488          */
6489         if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6490                 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6491                                                   &bdaddr_resolved);
6492
6493                 /* Only resolvable random addresses are valid for these
6494                  * kind of reports and others can be ignored.
6495                  */
6496                 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6497                         return;
6498
6499                 /* If the controller is not using resolvable random
6500                  * addresses, then this report can be ignored.
6501                  */
6502                 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6503                         return;
6504
6505                 /* If the local IRK of the controller does not match
6506                  * with the resolvable random address provided, then
6507                  * this report can be ignored.
6508                  */
6509                 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6510                         return;
6511         }
6512
6513         /* Check if we need to convert to identity address */
6514         irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6515         if (irk) {
6516                 bdaddr = &irk->bdaddr;
6517                 bdaddr_type = irk->addr_type;
6518         }
6519
6520         bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6521
6522         /* Check if we have been requested to connect to this device.
6523          *
6524          * direct_addr is set only for directed advertising reports (it is NULL
6525          * for advertising reports) and is already verified to be RPA above.
6526          */
6527         conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6528                                      type);
6529         if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
6530                 /* Store report for later inclusion by
6531                  * mgmt_device_connected
6532                  */
6533                 memcpy(conn->le_adv_data, data, len);
6534                 conn->le_adv_data_len = len;
6535         }
6536
6537         if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6538                 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6539         else
6540                 flags = 0;
6541
6542         /* All scan results should be sent up for Mesh systems */
6543         if (hci_dev_test_flag(hdev, HCI_MESH)) {
6544                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6545                                   rssi, flags, data, len, NULL, 0, instant);
6546                 return;
6547         }
6548
6549         /* Passive scanning shouldn't trigger any device found events,
6550          * except for devices marked as CONN_REPORT for which we do send
6551          * device found events, or advertisement monitoring requested.
6552          */
6553         if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6554                 if (type == LE_ADV_DIRECT_IND)
6555                         return;
6556
6557 #ifndef TIZEN_BT
6558                 /* Handle all adv packet in platform */
6559                 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6560                                                bdaddr, bdaddr_type) &&
6561                     idr_is_empty(&hdev->adv_monitors_idr))
6562                         return;
6563 #endif
6564
6565 #ifdef TIZEN_BT
6566                 mgmt_le_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6567                                   rssi, flags, data, len, NULL, 0, type);
6568 #else
6569                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6570                                   rssi, flags, data, len, NULL, 0, 0);
6571 #endif
6572                 return;
6573         }
6574
6575         /* When receiving a scan response, then there is no way to
6576          * know if the remote device is connectable or not. However
6577          * since scan responses are merged with a previously seen
6578          * advertising report, the flags field from that report
6579          * will be used.
6580          *
6581          * In the unlikely case that a controller just sends a scan
6582          * response event that doesn't match the pending report, then
6583          * it is marked as a standalone SCAN_RSP.
6584          */
6585         if (type == LE_ADV_SCAN_RSP)
6586                 flags = MGMT_DEV_FOUND_SCAN_RSP;
6587
6588 #ifdef TIZEN_BT
6589         /* Disable adv ind and scan rsp merging */
6590         mgmt_le_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6591                                   rssi, flags, data, len, NULL, 0, type);
6592 #else
6593         /* If there's nothing pending either store the data from this
6594          * event or send an immediate device found event if the data
6595          * should not be stored for later.
6596          */
6597         if (!ext_adv && !has_pending_adv_report(hdev)) {
6598                 /* If the report will trigger a SCAN_REQ store it for
6599                  * later merging.
6600                  */
6601                 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6602                         store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6603                                                  rssi, flags, data, len);
6604                         return;
6605                 }
6606
6607                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6608                                   rssi, flags, data, len, NULL, 0, 0);
6609                 return;
6610         }
6611
6612         /* Check if the pending report is for the same device as the new one */
6613         match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6614                  bdaddr_type == d->last_adv_addr_type);
6615
6616         /* If the pending data doesn't match this report or this isn't a
6617          * scan response (e.g. we got a duplicate ADV_IND) then force
6618          * sending of the pending data.
6619          */
6620         if (type != LE_ADV_SCAN_RSP || !match) {
6621                 /* Send out whatever is in the cache, but skip duplicates */
6622                 if (!match)
6623                         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6624                                           d->last_adv_addr_type, NULL,
6625                                           d->last_adv_rssi, d->last_adv_flags,
6626                                           d->last_adv_data,
6627                                           d->last_adv_data_len, NULL, 0, 0);
6628
6629                 /* If the new report will trigger a SCAN_REQ store it for
6630                  * later merging.
6631                  */
6632                 if (!ext_adv && (type == LE_ADV_IND ||
6633                                  type == LE_ADV_SCAN_IND)) {
6634                         store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6635                                                  rssi, flags, data, len);
6636                         return;
6637                 }
6638
6639                 /* The advertising reports cannot be merged, so clear
6640                  * the pending report and send out a device found event.
6641                  */
6642                 clear_pending_adv_report(hdev);
6643                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6644                                   rssi, flags, data, len, NULL, 0, 0);
6645                 return;
6646         }
6647
6648         /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6649          * the new event is a SCAN_RSP. We can therefore proceed with
6650          * sending a merged device found event.
6651          */
6652         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6653                           d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6654                           d->last_adv_data, d->last_adv_data_len, data, len, 0);
6655         clear_pending_adv_report(hdev);
6656 #endif
6657 }
6658
6659 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6660                                   struct sk_buff *skb)
6661 {
6662         struct hci_ev_le_advertising_report *ev = data;
6663         u64 instant = jiffies;
6664
6665         if (!ev->num)
6666                 return;
6667
6668         hci_dev_lock(hdev);
6669
6670         while (ev->num--) {
6671                 struct hci_ev_le_advertising_info *info;
6672                 s8 rssi;
6673
6674                 info = hci_le_ev_skb_pull(hdev, skb,
6675                                           HCI_EV_LE_ADVERTISING_REPORT,
6676                                           sizeof(*info));
6677                 if (!info)
6678                         break;
6679
6680                 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6681                                         info->length + 1))
6682                         break;
6683
6684                 if (info->length <= HCI_MAX_AD_LENGTH) {
6685                         rssi = info->data[info->length];
6686                         process_adv_report(hdev, info->type, &info->bdaddr,
6687                                            info->bdaddr_type, NULL, 0, rssi,
6688                                            info->data, info->length, false,
6689                                            false, instant);
6690                 } else {
6691                         bt_dev_err(hdev, "Dropping invalid advertising data");
6692                 }
6693         }
6694
6695         hci_dev_unlock(hdev);
6696 }
6697
6698 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6699 {
6700         if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6701                 switch (evt_type) {
6702                 case LE_LEGACY_ADV_IND:
6703                         return LE_ADV_IND;
6704                 case LE_LEGACY_ADV_DIRECT_IND:
6705                         return LE_ADV_DIRECT_IND;
6706                 case LE_LEGACY_ADV_SCAN_IND:
6707                         return LE_ADV_SCAN_IND;
6708                 case LE_LEGACY_NONCONN_IND:
6709                         return LE_ADV_NONCONN_IND;
6710                 case LE_LEGACY_SCAN_RSP_ADV:
6711                 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6712                         return LE_ADV_SCAN_RSP;
6713                 }
6714
6715                 goto invalid;
6716         }
6717
6718         if (evt_type & LE_EXT_ADV_CONN_IND) {
6719                 if (evt_type & LE_EXT_ADV_DIRECT_IND)
6720                         return LE_ADV_DIRECT_IND;
6721
6722                 return LE_ADV_IND;
6723         }
6724
6725         if (evt_type & LE_EXT_ADV_SCAN_RSP)
6726                 return LE_ADV_SCAN_RSP;
6727
6728         if (evt_type & LE_EXT_ADV_SCAN_IND)
6729                 return LE_ADV_SCAN_IND;
6730
6731         if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6732             evt_type & LE_EXT_ADV_DIRECT_IND)
6733                 return LE_ADV_NONCONN_IND;
6734
6735 invalid:
6736         bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6737                                evt_type);
6738
6739         return LE_ADV_INVALID;
6740 }
6741
6742 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6743                                       struct sk_buff *skb)
6744 {
6745         struct hci_ev_le_ext_adv_report *ev = data;
6746         u64 instant = jiffies;
6747
6748         if (!ev->num)
6749                 return;
6750
6751         hci_dev_lock(hdev);
6752
6753         while (ev->num--) {
6754                 struct hci_ev_le_ext_adv_info *info;
6755                 u8 legacy_evt_type;
6756                 u16 evt_type;
6757
6758                 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6759                                           sizeof(*info));
6760                 if (!info)
6761                         break;
6762
6763                 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6764                                         info->length))
6765                         break;
6766
6767                 evt_type = __le16_to_cpu(info->type);
6768                 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6769                 if (legacy_evt_type != LE_ADV_INVALID) {
6770                         process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6771                                            info->bdaddr_type, NULL, 0,
6772                                            info->rssi, info->data, info->length,
6773                                            !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6774                                            false, instant);
6775                 }
6776         }
6777
6778         hci_dev_unlock(hdev);
6779 }
6780
6781 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6782 {
6783         struct hci_cp_le_pa_term_sync cp;
6784
6785         memset(&cp, 0, sizeof(cp));
6786         cp.handle = handle;
6787
6788         return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6789 }
6790
6791 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6792                                             struct sk_buff *skb)
6793 {
6794         struct hci_ev_le_pa_sync_established *ev = data;
6795         int mask = hdev->link_mode;
6796         __u8 flags = 0;
6797
6798         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6799
6800         if (ev->status)
6801                 return;
6802
6803         hci_dev_lock(hdev);
6804
6805         hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6806
6807         mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6808         if (!(mask & HCI_LM_ACCEPT))
6809                 hci_le_pa_term_sync(hdev, ev->handle);
6810
6811         hci_dev_unlock(hdev);
6812 }
6813
6814 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6815                                             struct sk_buff *skb)
6816 {
6817         struct hci_ev_le_remote_feat_complete *ev = data;
6818         struct hci_conn *conn;
6819
6820         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6821
6822         hci_dev_lock(hdev);
6823
6824         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6825         if (conn) {
6826                 if (!ev->status)
6827                         memcpy(conn->features[0], ev->features, 8);
6828
6829                 if (conn->state == BT_CONFIG) {
6830                         __u8 status;
6831
6832                         /* If the local controller supports peripheral-initiated
6833                          * features exchange, but the remote controller does
6834                          * not, then it is possible that the error code 0x1a
6835                          * for unsupported remote feature gets returned.
6836                          *
6837                          * In this specific case, allow the connection to
6838                          * transition into connected state and mark it as
6839                          * successful.
6840                          */
6841                         if (!conn->out && ev->status == 0x1a &&
6842                             (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6843                                 status = 0x00;
6844                         else
6845                                 status = ev->status;
6846
6847                         conn->state = BT_CONNECTED;
6848                         hci_connect_cfm(conn, status);
6849                         hci_conn_drop(conn);
6850                 }
6851         }
6852
6853         hci_dev_unlock(hdev);
6854 }
6855
6856 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6857                                    struct sk_buff *skb)
6858 {
6859         struct hci_ev_le_ltk_req *ev = data;
6860         struct hci_cp_le_ltk_reply cp;
6861         struct hci_cp_le_ltk_neg_reply neg;
6862         struct hci_conn *conn;
6863         struct smp_ltk *ltk;
6864
6865         bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6866
6867         hci_dev_lock(hdev);
6868
6869         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6870         if (conn == NULL)
6871                 goto not_found;
6872
6873         ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6874         if (!ltk)
6875                 goto not_found;
6876
6877         if (smp_ltk_is_sc(ltk)) {
6878                 /* With SC both EDiv and Rand are set to zero */
6879                 if (ev->ediv || ev->rand)
6880                         goto not_found;
6881         } else {
6882                 /* For non-SC keys check that EDiv and Rand match */
6883                 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6884                         goto not_found;
6885         }
6886
6887         memcpy(cp.ltk, ltk->val, ltk->enc_size);
6888         memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6889         cp.handle = cpu_to_le16(conn->handle);
6890
6891         conn->pending_sec_level = smp_ltk_sec_level(ltk);
6892
6893         conn->enc_key_size = ltk->enc_size;
6894
6895         hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6896
6897         /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6898          * temporary key used to encrypt a connection following
6899          * pairing. It is used during the Encrypted Session Setup to
6900          * distribute the keys. Later, security can be re-established
6901          * using a distributed LTK.
6902          */
6903         if (ltk->type == SMP_STK) {
6904                 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6905                 list_del_rcu(&ltk->list);
6906                 kfree_rcu(ltk, rcu);
6907         } else {
6908                 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6909         }
6910
6911         hci_dev_unlock(hdev);
6912
6913         return;
6914
6915 not_found:
6916         neg.handle = ev->handle;
6917         hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6918         hci_dev_unlock(hdev);
6919 }
6920
6921 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6922                                       u8 reason)
6923 {
6924         struct hci_cp_le_conn_param_req_neg_reply cp;
6925
6926         cp.handle = cpu_to_le16(handle);
6927         cp.reason = reason;
6928
6929         hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6930                      &cp);
6931 }
6932
6933 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6934                                              struct sk_buff *skb)
6935 {
6936         struct hci_ev_le_remote_conn_param_req *ev = data;
6937         struct hci_cp_le_conn_param_req_reply cp;
6938         struct hci_conn *hcon;
6939         u16 handle, min, max, latency, timeout;
6940
6941         bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6942
6943         handle = le16_to_cpu(ev->handle);
6944         min = le16_to_cpu(ev->interval_min);
6945         max = le16_to_cpu(ev->interval_max);
6946         latency = le16_to_cpu(ev->latency);
6947         timeout = le16_to_cpu(ev->timeout);
6948
6949         hcon = hci_conn_hash_lookup_handle(hdev, handle);
6950         if (!hcon || hcon->state != BT_CONNECTED)
6951                 return send_conn_param_neg_reply(hdev, handle,
6952                                                  HCI_ERROR_UNKNOWN_CONN_ID);
6953
6954         if (hci_check_conn_params(min, max, latency, timeout))
6955                 return send_conn_param_neg_reply(hdev, handle,
6956                                                  HCI_ERROR_INVALID_LL_PARAMS);
6957
6958         if (hcon->role == HCI_ROLE_MASTER) {
6959                 struct hci_conn_params *params;
6960                 u8 store_hint;
6961
6962                 hci_dev_lock(hdev);
6963
6964                 params = hci_conn_params_lookup(hdev, &hcon->dst,
6965                                                 hcon->dst_type);
6966                 if (params) {
6967                         params->conn_min_interval = min;
6968                         params->conn_max_interval = max;
6969                         params->conn_latency = latency;
6970                         params->supervision_timeout = timeout;
6971                         store_hint = 0x01;
6972                 } else {
6973                         store_hint = 0x00;
6974                 }
6975
6976                 hci_dev_unlock(hdev);
6977
6978                 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6979                                     store_hint, min, max, latency, timeout);
6980         }
6981
6982         cp.handle = ev->handle;
6983         cp.interval_min = ev->interval_min;
6984         cp.interval_max = ev->interval_max;
6985         cp.latency = ev->latency;
6986         cp.timeout = ev->timeout;
6987         cp.min_ce_len = 0;
6988         cp.max_ce_len = 0;
6989
6990         hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6991 }
6992
6993 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6994                                          struct sk_buff *skb)
6995 {
6996         struct hci_ev_le_direct_adv_report *ev = data;
6997         u64 instant = jiffies;
6998         int i;
6999
7000         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
7001                                 flex_array_size(ev, info, ev->num)))
7002                 return;
7003
7004         if (!ev->num)
7005                 return;
7006
7007         hci_dev_lock(hdev);
7008
7009         for (i = 0; i < ev->num; i++) {
7010                 struct hci_ev_le_direct_adv_info *info = &ev->info[i];
7011
7012                 process_adv_report(hdev, info->type, &info->bdaddr,
7013                                    info->bdaddr_type, &info->direct_addr,
7014                                    info->direct_addr_type, info->rssi, NULL, 0,
7015                                    false, false, instant);
7016         }
7017
7018         hci_dev_unlock(hdev);
7019 }
7020
7021 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
7022                                   struct sk_buff *skb)
7023 {
7024         struct hci_ev_le_phy_update_complete *ev = data;
7025         struct hci_conn *conn;
7026
7027         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7028
7029         if (ev->status)
7030                 return;
7031
7032         hci_dev_lock(hdev);
7033
7034         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
7035         if (!conn)
7036                 goto unlock;
7037
7038         conn->le_tx_phy = ev->tx_phy;
7039         conn->le_rx_phy = ev->rx_phy;
7040
7041 unlock:
7042         hci_dev_unlock(hdev);
7043 }
7044
7045 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
7046                                         struct sk_buff *skb)
7047 {
7048         struct hci_evt_le_cis_established *ev = data;
7049         struct hci_conn *conn;
7050         u16 handle = __le16_to_cpu(ev->handle);
7051
7052         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7053
7054         hci_dev_lock(hdev);
7055
7056         conn = hci_conn_hash_lookup_handle(hdev, handle);
7057         if (!conn) {
7058                 bt_dev_err(hdev,
7059                            "Unable to find connection with handle 0x%4.4x",
7060                            handle);
7061                 goto unlock;
7062         }
7063
7064         if (conn->type != ISO_LINK) {
7065                 bt_dev_err(hdev,
7066                            "Invalid connection link type handle 0x%4.4x",
7067                            handle);
7068                 goto unlock;
7069         }
7070
7071         if (conn->role == HCI_ROLE_SLAVE) {
7072                 __le32 interval;
7073
7074                 memset(&interval, 0, sizeof(interval));
7075
7076                 memcpy(&interval, ev->c_latency, sizeof(ev->c_latency));
7077                 conn->iso_qos.in.interval = le32_to_cpu(interval);
7078                 memcpy(&interval, ev->p_latency, sizeof(ev->p_latency));
7079                 conn->iso_qos.out.interval = le32_to_cpu(interval);
7080                 conn->iso_qos.in.latency = le16_to_cpu(ev->interval);
7081                 conn->iso_qos.out.latency = le16_to_cpu(ev->interval);
7082                 conn->iso_qos.in.sdu = le16_to_cpu(ev->c_mtu);
7083                 conn->iso_qos.out.sdu = le16_to_cpu(ev->p_mtu);
7084                 conn->iso_qos.in.phy = ev->c_phy;
7085                 conn->iso_qos.out.phy = ev->p_phy;
7086         }
7087
7088         if (!ev->status) {
7089                 conn->state = BT_CONNECTED;
7090                 hci_debugfs_create_conn(conn);
7091                 hci_conn_add_sysfs(conn);
7092                 hci_iso_setup_path(conn);
7093                 goto unlock;
7094         }
7095
7096         hci_connect_cfm(conn, ev->status);
7097         hci_conn_del(conn);
7098
7099 unlock:
7100         hci_dev_unlock(hdev);
7101 }
7102
7103 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
7104 {
7105         struct hci_cp_le_reject_cis cp;
7106
7107         memset(&cp, 0, sizeof(cp));
7108         cp.handle = handle;
7109         cp.reason = HCI_ERROR_REJ_BAD_ADDR;
7110         hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
7111 }
7112
7113 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
7114 {
7115         struct hci_cp_le_accept_cis cp;
7116
7117         memset(&cp, 0, sizeof(cp));
7118         cp.handle = handle;
7119         hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
7120 }
7121
7122 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
7123                                struct sk_buff *skb)
7124 {
7125         struct hci_evt_le_cis_req *ev = data;
7126         u16 acl_handle, cis_handle;
7127         struct hci_conn *acl, *cis;
7128         int mask;
7129         __u8 flags = 0;
7130
7131         acl_handle = __le16_to_cpu(ev->acl_handle);
7132         cis_handle = __le16_to_cpu(ev->cis_handle);
7133
7134         bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
7135                    acl_handle, cis_handle, ev->cig_id, ev->cis_id);
7136
7137         hci_dev_lock(hdev);
7138
7139         acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
7140         if (!acl)
7141                 goto unlock;
7142
7143         mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
7144         if (!(mask & HCI_LM_ACCEPT)) {
7145                 hci_le_reject_cis(hdev, ev->cis_handle);
7146                 goto unlock;
7147         }
7148
7149         cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
7150         if (!cis) {
7151                 cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE);
7152                 if (!cis) {
7153                         hci_le_reject_cis(hdev, ev->cis_handle);
7154                         goto unlock;
7155                 }
7156                 cis->handle = cis_handle;
7157         }
7158
7159         cis->iso_qos.cig = ev->cig_id;
7160         cis->iso_qos.cis = ev->cis_id;
7161
7162         if (!(flags & HCI_PROTO_DEFER)) {
7163                 hci_le_accept_cis(hdev, ev->cis_handle);
7164         } else {
7165                 cis->state = BT_CONNECT2;
7166                 hci_connect_cfm(cis, 0);
7167         }
7168
7169 unlock:
7170         hci_dev_unlock(hdev);
7171 }
7172
7173 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
7174                                            struct sk_buff *skb)
7175 {
7176         struct hci_evt_le_create_big_complete *ev = data;
7177         struct hci_conn *conn;
7178
7179         BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
7180
7181         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
7182                                 flex_array_size(ev, bis_handle, ev->num_bis)))
7183                 return;
7184
7185         hci_dev_lock(hdev);
7186
7187         conn = hci_conn_hash_lookup_big(hdev, ev->handle);
7188         if (!conn)
7189                 goto unlock;
7190
7191         if (conn->type != ISO_LINK) {
7192                 bt_dev_err(hdev,
7193                            "Invalid connection link type handle 0x%2.2x",
7194                            ev->handle);
7195                 goto unlock;
7196         }
7197
7198         if (ev->num_bis)
7199                 conn->handle = __le16_to_cpu(ev->bis_handle[0]);
7200
7201         if (!ev->status) {
7202                 conn->state = BT_CONNECTED;
7203                 hci_debugfs_create_conn(conn);
7204                 hci_conn_add_sysfs(conn);
7205                 hci_iso_setup_path(conn);
7206                 goto unlock;
7207         }
7208
7209         hci_connect_cfm(conn, ev->status);
7210         hci_conn_del(conn);
7211
7212 unlock:
7213         hci_dev_unlock(hdev);
7214 }
7215
7216 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
7217                                             struct sk_buff *skb)
7218 {
7219         struct hci_evt_le_big_sync_estabilished *ev = data;
7220         struct hci_conn *bis;
7221         int i;
7222
7223         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7224
7225         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7226                                 flex_array_size(ev, bis, ev->num_bis)))
7227                 return;
7228
7229         if (ev->status)
7230                 return;
7231
7232         hci_dev_lock(hdev);
7233
7234         for (i = 0; i < ev->num_bis; i++) {
7235                 u16 handle = le16_to_cpu(ev->bis[i]);
7236                 __le32 interval;
7237
7238                 bis = hci_conn_hash_lookup_handle(hdev, handle);
7239                 if (!bis) {
7240                         bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
7241                                            HCI_ROLE_SLAVE);
7242                         if (!bis)
7243                                 continue;
7244                         bis->handle = handle;
7245                 }
7246
7247                 bis->iso_qos.big = ev->handle;
7248                 memset(&interval, 0, sizeof(interval));
7249                 memcpy(&interval, ev->latency, sizeof(ev->latency));
7250                 bis->iso_qos.in.interval = le32_to_cpu(interval);
7251                 /* Convert ISO Interval (1.25 ms slots) to latency (ms) */
7252                 bis->iso_qos.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
7253                 bis->iso_qos.in.sdu = le16_to_cpu(ev->max_pdu);
7254
7255                 hci_iso_setup_path(bis);
7256         }
7257
7258         hci_dev_unlock(hdev);
7259 }
7260
7261 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7262                                            struct sk_buff *skb)
7263 {
7264         struct hci_evt_le_big_info_adv_report *ev = data;
7265         int mask = hdev->link_mode;
7266         __u8 flags = 0;
7267
7268         bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7269
7270         hci_dev_lock(hdev);
7271
7272         mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
7273         if (!(mask & HCI_LM_ACCEPT))
7274                 hci_le_pa_term_sync(hdev, ev->sync_handle);
7275
7276         hci_dev_unlock(hdev);
7277 }
7278
7279 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7280 [_op] = { \
7281         .func = _func, \
7282         .min_len = _min_len, \
7283         .max_len = _max_len, \
7284 }
7285
7286 #define HCI_LE_EV(_op, _func, _len) \
7287         HCI_LE_EV_VL(_op, _func, _len, _len)
7288
7289 #define HCI_LE_EV_STATUS(_op, _func) \
7290         HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7291
7292 /* Entries in this table shall have their position according to the subevent
7293  * opcode they handle so the use of the macros above is recommend since it does
7294  * attempt to initialize at its proper index using Designated Initializers that
7295  * way events without a callback function can be ommited.
7296  */
7297 static const struct hci_le_ev {
7298         void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7299         u16  min_len;
7300         u16  max_len;
7301 } hci_le_ev_table[U8_MAX + 1] = {
7302         /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7303         HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7304                   sizeof(struct hci_ev_le_conn_complete)),
7305         /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7306         HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7307                      sizeof(struct hci_ev_le_advertising_report),
7308                      HCI_MAX_EVENT_SIZE),
7309         /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7310         HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7311                   hci_le_conn_update_complete_evt,
7312                   sizeof(struct hci_ev_le_conn_update_complete)),
7313         /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7314         HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7315                   hci_le_remote_feat_complete_evt,
7316                   sizeof(struct hci_ev_le_remote_feat_complete)),
7317         /* [0x05 = HCI_EV_LE_LTK_REQ] */
7318         HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7319                   sizeof(struct hci_ev_le_ltk_req)),
7320         /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7321         HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7322                   hci_le_remote_conn_param_req_evt,
7323                   sizeof(struct hci_ev_le_remote_conn_param_req)),
7324         /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7325         HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7326                   hci_le_enh_conn_complete_evt,
7327                   sizeof(struct hci_ev_le_enh_conn_complete)),
7328         /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7329         HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7330                      sizeof(struct hci_ev_le_direct_adv_report),
7331                      HCI_MAX_EVENT_SIZE),
7332         /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7333         HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7334                   sizeof(struct hci_ev_le_phy_update_complete)),
7335         /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7336         HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7337                      sizeof(struct hci_ev_le_ext_adv_report),
7338                      HCI_MAX_EVENT_SIZE),
7339         /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7340         HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7341                   hci_le_pa_sync_estabilished_evt,
7342                   sizeof(struct hci_ev_le_pa_sync_established)),
7343         /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7344         HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7345                   sizeof(struct hci_evt_le_ext_adv_set_term)),
7346         /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7347         HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7348                   sizeof(struct hci_evt_le_cis_established)),
7349         /* [0x1a = HCI_EVT_LE_CIS_REQ] */
7350         HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7351                   sizeof(struct hci_evt_le_cis_req)),
7352         /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7353         HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7354                      hci_le_create_big_complete_evt,
7355                      sizeof(struct hci_evt_le_create_big_complete),
7356                      HCI_MAX_EVENT_SIZE),
7357         /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7358         HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7359                      hci_le_big_sync_established_evt,
7360                      sizeof(struct hci_evt_le_big_sync_estabilished),
7361                      HCI_MAX_EVENT_SIZE),
7362         /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7363         HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7364                      hci_le_big_info_adv_report_evt,
7365                      sizeof(struct hci_evt_le_big_info_adv_report),
7366                      HCI_MAX_EVENT_SIZE),
7367 };
7368
7369 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7370                             struct sk_buff *skb, u16 *opcode, u8 *status,
7371                             hci_req_complete_t *req_complete,
7372                             hci_req_complete_skb_t *req_complete_skb)
7373 {
7374         struct hci_ev_le_meta *ev = data;
7375         const struct hci_le_ev *subev;
7376
7377         bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7378
7379         /* Only match event if command OGF is for LE */
7380         if (hdev->sent_cmd &&
7381             hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
7382             hci_skb_event(hdev->sent_cmd) == ev->subevent) {
7383                 *opcode = hci_skb_opcode(hdev->sent_cmd);
7384                 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7385                                      req_complete_skb);
7386         }
7387
7388         subev = &hci_le_ev_table[ev->subevent];
7389         if (!subev->func)
7390                 return;
7391
7392         if (skb->len < subev->min_len) {
7393                 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7394                            ev->subevent, skb->len, subev->min_len);
7395                 return;
7396         }
7397
7398         /* Just warn if the length is over max_len size it still be
7399          * possible to partially parse the event so leave to callback to
7400          * decide if that is acceptable.
7401          */
7402         if (skb->len > subev->max_len)
7403                 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7404                             ev->subevent, skb->len, subev->max_len);
7405         data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7406         if (!data)
7407                 return;
7408
7409         subev->func(hdev, data, skb);
7410 }
7411
7412 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7413                                  u8 event, struct sk_buff *skb)
7414 {
7415         struct hci_ev_cmd_complete *ev;
7416         struct hci_event_hdr *hdr;
7417
7418         if (!skb)
7419                 return false;
7420
7421         hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7422         if (!hdr)
7423                 return false;
7424
7425         if (event) {
7426                 if (hdr->evt != event)
7427                         return false;
7428                 return true;
7429         }
7430
7431         /* Check if request ended in Command Status - no way to retrieve
7432          * any extra parameters in this case.
7433          */
7434         if (hdr->evt == HCI_EV_CMD_STATUS)
7435                 return false;
7436
7437         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7438                 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7439                            hdr->evt);
7440                 return false;
7441         }
7442
7443         ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7444         if (!ev)
7445                 return false;
7446
7447         if (opcode != __le16_to_cpu(ev->opcode)) {
7448                 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7449                        __le16_to_cpu(ev->opcode));
7450                 return false;
7451         }
7452
7453         return true;
7454 }
7455
7456 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7457                                   struct sk_buff *skb)
7458 {
7459         struct hci_ev_le_advertising_info *adv;
7460         struct hci_ev_le_direct_adv_info *direct_adv;
7461         struct hci_ev_le_ext_adv_info *ext_adv;
7462         const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7463         const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7464
7465         hci_dev_lock(hdev);
7466
7467         /* If we are currently suspended and this is the first BT event seen,
7468          * save the wake reason associated with the event.
7469          */
7470         if (!hdev->suspended || hdev->wake_reason)
7471                 goto unlock;
7472
7473         /* Default to remote wake. Values for wake_reason are documented in the
7474          * Bluez mgmt api docs.
7475          */
7476         hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7477
7478         /* Once configured for remote wakeup, we should only wake up for
7479          * reconnections. It's useful to see which device is waking us up so
7480          * keep track of the bdaddr of the connection event that woke us up.
7481          */
7482         if (event == HCI_EV_CONN_REQUEST) {
7483                 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7484                 hdev->wake_addr_type = BDADDR_BREDR;
7485         } else if (event == HCI_EV_CONN_COMPLETE) {
7486                 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7487                 hdev->wake_addr_type = BDADDR_BREDR;
7488         } else if (event == HCI_EV_LE_META) {
7489                 struct hci_ev_le_meta *le_ev = (void *)skb->data;
7490                 u8 subevent = le_ev->subevent;
7491                 u8 *ptr = &skb->data[sizeof(*le_ev)];
7492                 u8 num_reports = *ptr;
7493
7494                 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7495                      subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7496                      subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7497                     num_reports) {
7498                         adv = (void *)(ptr + 1);
7499                         direct_adv = (void *)(ptr + 1);
7500                         ext_adv = (void *)(ptr + 1);
7501
7502                         switch (subevent) {
7503                         case HCI_EV_LE_ADVERTISING_REPORT:
7504                                 bacpy(&hdev->wake_addr, &adv->bdaddr);
7505                                 hdev->wake_addr_type = adv->bdaddr_type;
7506                                 break;
7507                         case HCI_EV_LE_DIRECT_ADV_REPORT:
7508                                 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7509                                 hdev->wake_addr_type = direct_adv->bdaddr_type;
7510                                 break;
7511                         case HCI_EV_LE_EXT_ADV_REPORT:
7512                                 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7513                                 hdev->wake_addr_type = ext_adv->bdaddr_type;
7514                                 break;
7515                         }
7516                 }
7517         } else {
7518                 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7519         }
7520
7521 unlock:
7522         hci_dev_unlock(hdev);
7523 }
7524
7525 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7526 [_op] = { \
7527         .req = false, \
7528         .func = _func, \
7529         .min_len = _min_len, \
7530         .max_len = _max_len, \
7531 }
7532
7533 #define HCI_EV(_op, _func, _len) \
7534         HCI_EV_VL(_op, _func, _len, _len)
7535
7536 #define HCI_EV_STATUS(_op, _func) \
7537         HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7538
7539 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7540 [_op] = { \
7541         .req = true, \
7542         .func_req = _func, \
7543         .min_len = _min_len, \
7544         .max_len = _max_len, \
7545 }
7546
7547 #define HCI_EV_REQ(_op, _func, _len) \
7548         HCI_EV_REQ_VL(_op, _func, _len, _len)
7549
7550 /* Entries in this table shall have their position according to the event opcode
7551  * they handle so the use of the macros above is recommend since it does attempt
7552  * to initialize at its proper index using Designated Initializers that way
7553  * events without a callback function don't have entered.
7554  */
7555 static const struct hci_ev {
7556         bool req;
7557         union {
7558                 void (*func)(struct hci_dev *hdev, void *data,
7559                              struct sk_buff *skb);
7560                 void (*func_req)(struct hci_dev *hdev, void *data,
7561                                  struct sk_buff *skb, u16 *opcode, u8 *status,
7562                                  hci_req_complete_t *req_complete,
7563                                  hci_req_complete_skb_t *req_complete_skb);
7564         };
7565         u16  min_len;
7566         u16  max_len;
7567 } hci_ev_table[U8_MAX + 1] = {
7568         /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7569         HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7570         /* [0x02 = HCI_EV_INQUIRY_RESULT] */
7571         HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7572                   sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7573         /* [0x03 = HCI_EV_CONN_COMPLETE] */
7574         HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7575                sizeof(struct hci_ev_conn_complete)),
7576         /* [0x04 = HCI_EV_CONN_REQUEST] */
7577         HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7578                sizeof(struct hci_ev_conn_request)),
7579         /* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7580         HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7581                sizeof(struct hci_ev_disconn_complete)),
7582         /* [0x06 = HCI_EV_AUTH_COMPLETE] */
7583         HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7584                sizeof(struct hci_ev_auth_complete)),
7585         /* [0x07 = HCI_EV_REMOTE_NAME] */
7586         HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7587                sizeof(struct hci_ev_remote_name)),
7588         /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7589         HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7590                sizeof(struct hci_ev_encrypt_change)),
7591         /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7592         HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7593                hci_change_link_key_complete_evt,
7594                sizeof(struct hci_ev_change_link_key_complete)),
7595         /* [0x0b = HCI_EV_REMOTE_FEATURES] */
7596         HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7597                sizeof(struct hci_ev_remote_features)),
7598         /* [0x0e = HCI_EV_CMD_COMPLETE] */
7599         HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7600                       sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7601         /* [0x0f = HCI_EV_CMD_STATUS] */
7602         HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7603                    sizeof(struct hci_ev_cmd_status)),
7604         /* [0x10 = HCI_EV_CMD_STATUS] */
7605         HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7606                sizeof(struct hci_ev_hardware_error)),
7607         /* [0x12 = HCI_EV_ROLE_CHANGE] */
7608         HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7609                sizeof(struct hci_ev_role_change)),
7610         /* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7611         HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7612                   sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7613         /* [0x14 = HCI_EV_MODE_CHANGE] */
7614         HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7615                sizeof(struct hci_ev_mode_change)),
7616         /* [0x16 = HCI_EV_PIN_CODE_REQ] */
7617         HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7618                sizeof(struct hci_ev_pin_code_req)),
7619         /* [0x17 = HCI_EV_LINK_KEY_REQ] */
7620         HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7621                sizeof(struct hci_ev_link_key_req)),
7622         /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7623         HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7624                sizeof(struct hci_ev_link_key_notify)),
7625         /* [0x1c = HCI_EV_CLOCK_OFFSET] */
7626         HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7627                sizeof(struct hci_ev_clock_offset)),
7628         /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7629         HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7630                sizeof(struct hci_ev_pkt_type_change)),
7631         /* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7632         HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7633                sizeof(struct hci_ev_pscan_rep_mode)),
7634         /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7635         HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7636                   hci_inquiry_result_with_rssi_evt,
7637                   sizeof(struct hci_ev_inquiry_result_rssi),
7638                   HCI_MAX_EVENT_SIZE),
7639         /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7640         HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7641                sizeof(struct hci_ev_remote_ext_features)),
7642         /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7643         HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7644                sizeof(struct hci_ev_sync_conn_complete)),
7645         /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7646         HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7647                   hci_extended_inquiry_result_evt,
7648                   sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7649         /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7650         HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7651                sizeof(struct hci_ev_key_refresh_complete)),
7652         /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7653         HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7654                sizeof(struct hci_ev_io_capa_request)),
7655         /* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7656         HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7657                sizeof(struct hci_ev_io_capa_reply)),
7658         /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7659         HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7660                sizeof(struct hci_ev_user_confirm_req)),
7661         /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7662         HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7663                sizeof(struct hci_ev_user_passkey_req)),
7664         /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7665         HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7666                sizeof(struct hci_ev_remote_oob_data_request)),
7667         /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7668         HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7669                sizeof(struct hci_ev_simple_pair_complete)),
7670         /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7671         HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7672                sizeof(struct hci_ev_user_passkey_notify)),
7673         /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7674         HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7675                sizeof(struct hci_ev_keypress_notify)),
7676         /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7677         HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7678                sizeof(struct hci_ev_remote_host_features)),
7679         /* [0x3e = HCI_EV_LE_META] */
7680         HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7681                       sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7682 #if IS_ENABLED(CONFIG_BT_HS)
7683         /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
7684         HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
7685                sizeof(struct hci_ev_phy_link_complete)),
7686         /* [0x41 = HCI_EV_CHANNEL_SELECTED] */
7687         HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
7688                sizeof(struct hci_ev_channel_selected)),
7689         /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
7690         HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
7691                hci_disconn_loglink_complete_evt,
7692                sizeof(struct hci_ev_disconn_logical_link_complete)),
7693         /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
7694         HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
7695                sizeof(struct hci_ev_logical_link_complete)),
7696         /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
7697         HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
7698                hci_disconn_phylink_complete_evt,
7699                sizeof(struct hci_ev_disconn_phy_link_complete)),
7700 #endif
7701         /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
7702         HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
7703                sizeof(struct hci_ev_num_comp_blocks)),
7704 #ifdef TIZEN_BT
7705         /* [0xFF = HCI_EV_VENDOR_SPECIFIC] */
7706         HCI_EV(HCI_EV_VENDOR_SPECIFIC, hci_vendor_specific_evt,
7707                sizeof(struct hci_ev_vendor_specific)),
7708 #else
7709         /* [0xff = HCI_EV_VENDOR] */
7710         HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7711 #endif
7712 };
7713
7714 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7715                            u16 *opcode, u8 *status,
7716                            hci_req_complete_t *req_complete,
7717                            hci_req_complete_skb_t *req_complete_skb)
7718 {
7719         const struct hci_ev *ev = &hci_ev_table[event];
7720         void *data;
7721
7722         if (!ev->func)
7723                 return;
7724
7725         if (skb->len < ev->min_len) {
7726                 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7727                            event, skb->len, ev->min_len);
7728                 return;
7729         }
7730
7731         /* Just warn if the length is over max_len size it still be
7732          * possible to partially parse the event so leave to callback to
7733          * decide if that is acceptable.
7734          */
7735         if (skb->len > ev->max_len)
7736                 bt_dev_warn_ratelimited(hdev,
7737                                         "unexpected event 0x%2.2x length: %u > %u",
7738                                         event, skb->len, ev->max_len);
7739
7740         data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7741         if (!data)
7742                 return;
7743
7744         if (ev->req)
7745                 ev->func_req(hdev, data, skb, opcode, status, req_complete,
7746                              req_complete_skb);
7747         else
7748                 ev->func(hdev, data, skb);
7749 }
7750
7751 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7752 {
7753         struct hci_event_hdr *hdr = (void *) skb->data;
7754         hci_req_complete_t req_complete = NULL;
7755         hci_req_complete_skb_t req_complete_skb = NULL;
7756         struct sk_buff *orig_skb = NULL;
7757         u8 status = 0, event, req_evt = 0;
7758         u16 opcode = HCI_OP_NOP;
7759
7760         if (skb->len < sizeof(*hdr)) {
7761                 bt_dev_err(hdev, "Malformed HCI Event");
7762                 goto done;
7763         }
7764
7765         kfree_skb(hdev->recv_event);
7766         hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7767
7768         event = hdr->evt;
7769         if (!event) {
7770                 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7771                             event);
7772                 goto done;
7773         }
7774
7775         /* Only match event if command OGF is not for LE */
7776         if (hdev->sent_cmd &&
7777             hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
7778             hci_skb_event(hdev->sent_cmd) == event) {
7779                 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
7780                                      status, &req_complete, &req_complete_skb);
7781                 req_evt = event;
7782         }
7783
7784         /* If it looks like we might end up having to call
7785          * req_complete_skb, store a pristine copy of the skb since the
7786          * various handlers may modify the original one through
7787          * skb_pull() calls, etc.
7788          */
7789         if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7790             event == HCI_EV_CMD_COMPLETE)
7791                 orig_skb = skb_clone(skb, GFP_KERNEL);
7792
7793         skb_pull(skb, HCI_EVENT_HDR_SIZE);
7794
7795         /* Store wake reason if we're suspended */
7796         hci_store_wake_reason(hdev, event, skb);
7797
7798         bt_dev_dbg(hdev, "event 0x%2.2x", event);
7799
7800         hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7801                        &req_complete_skb);
7802
7803         if (req_complete) {
7804                 req_complete(hdev, status, opcode);
7805         } else if (req_complete_skb) {
7806                 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7807                         kfree_skb(orig_skb);
7808                         orig_skb = NULL;
7809                 }
7810                 req_complete_skb(hdev, status, opcode, orig_skb);
7811         }
7812
7813 done:
7814         kfree_skb(orig_skb);
7815         kfree_skb(skb);
7816         hdev->stat.evt_rx++;
7817 }