Bluetooth: Set le data length command and event
[platform/kernel/linux-starfive.git] / net / bluetooth / hci_event.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
38 #include "msft.h"
39 #include "eir.h"
40
41 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
42                  "\x00\x00\x00\x00\x00\x00\x00\x00"
43
44 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
45
46 /* Handle HCI Event packets */
47
48 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
49                              u8 ev, size_t len)
50 {
51         void *data;
52
53         data = skb_pull_data(skb, len);
54         if (!data)
55                 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
56
57         return data;
58 }
59
60 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
61                              u16 op, size_t len)
62 {
63         void *data;
64
65         data = skb_pull_data(skb, len);
66         if (!data)
67                 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
68
69         return data;
70 }
71
72 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
73                                 u8 ev, size_t len)
74 {
75         void *data;
76
77         data = skb_pull_data(skb, len);
78         if (!data)
79                 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
80
81         return data;
82 }
83
84 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
85                                 struct sk_buff *skb)
86 {
87         struct hci_ev_status *rp = data;
88
89         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
90
91         /* It is possible that we receive Inquiry Complete event right
92          * before we receive Inquiry Cancel Command Complete event, in
93          * which case the latter event should have status of Command
94          * Disallowed (0x0c). This should not be treated as error, since
95          * we actually achieve what Inquiry Cancel wants to achieve,
96          * which is to end the last Inquiry session.
97          */
98         if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
99                 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
100                 rp->status = 0x00;
101         }
102
103         if (rp->status)
104                 return rp->status;
105
106         clear_bit(HCI_INQUIRY, &hdev->flags);
107         smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
108         wake_up_bit(&hdev->flags, HCI_INQUIRY);
109
110         hci_dev_lock(hdev);
111         /* Set discovery state to stopped if we're not doing LE active
112          * scanning.
113          */
114         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
115             hdev->le_scan_type != LE_SCAN_ACTIVE)
116                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
117         hci_dev_unlock(hdev);
118
119         hci_conn_check_pending(hdev);
120
121         return rp->status;
122 }
123
124 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
125                               struct sk_buff *skb)
126 {
127         struct hci_ev_status *rp = data;
128
129         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
130
131         if (rp->status)
132                 return rp->status;
133
134         hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
135
136         return rp->status;
137 }
138
139 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
140                                    struct sk_buff *skb)
141 {
142         struct hci_ev_status *rp = data;
143
144         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
145
146         if (rp->status)
147                 return rp->status;
148
149         hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
150
151         hci_conn_check_pending(hdev);
152
153         return rp->status;
154 }
155
156 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
157                                         struct sk_buff *skb)
158 {
159         struct hci_ev_status *rp = data;
160
161         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
162
163         return rp->status;
164 }
165
166 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
167                                 struct sk_buff *skb)
168 {
169         struct hci_rp_role_discovery *rp = data;
170         struct hci_conn *conn;
171
172         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
173
174         if (rp->status)
175                 return rp->status;
176
177         hci_dev_lock(hdev);
178
179         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
180         if (conn)
181                 conn->role = rp->role;
182
183         hci_dev_unlock(hdev);
184
185         return rp->status;
186 }
187
188 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
189                                   struct sk_buff *skb)
190 {
191         struct hci_rp_read_link_policy *rp = data;
192         struct hci_conn *conn;
193
194         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
195
196         if (rp->status)
197                 return rp->status;
198
199         hci_dev_lock(hdev);
200
201         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
202         if (conn)
203                 conn->link_policy = __le16_to_cpu(rp->policy);
204
205         hci_dev_unlock(hdev);
206
207         return rp->status;
208 }
209
210 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
211                                    struct sk_buff *skb)
212 {
213         struct hci_rp_write_link_policy *rp = data;
214         struct hci_conn *conn;
215         void *sent;
216 #ifdef TIZEN_BT
217         struct hci_cp_write_link_policy cp;
218         struct hci_conn *sco_conn;
219 #endif
220
221         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
222
223         if (rp->status)
224                 return rp->status;
225
226         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
227         if (!sent)
228                 return rp->status;
229
230         hci_dev_lock(hdev);
231
232         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
233         if (conn)
234                 conn->link_policy = get_unaligned_le16(sent + 2);
235
236 #ifdef TIZEN_BT
237         sco_conn = hci_conn_hash_lookup_sco(hdev);
238         if (sco_conn && bacmp(&sco_conn->dst, &conn->dst) == 0 &&
239             conn->link_policy & HCI_LP_SNIFF) {
240                 BT_ERR("SNIFF is not allowed during sco connection");
241                 cp.handle = __cpu_to_le16(conn->handle);
242                 cp.policy = __cpu_to_le16(conn->link_policy & ~HCI_LP_SNIFF);
243                 hci_send_cmd(hdev, HCI_OP_WRITE_LINK_POLICY, sizeof(cp), &cp);
244         }
245 #endif
246
247         hci_dev_unlock(hdev);
248
249         return rp->status;
250 }
251
252 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
253                                       struct sk_buff *skb)
254 {
255         struct hci_rp_read_def_link_policy *rp = data;
256
257         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
258
259         if (rp->status)
260                 return rp->status;
261
262         hdev->link_policy = __le16_to_cpu(rp->policy);
263
264         return rp->status;
265 }
266
267 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
268                                        struct sk_buff *skb)
269 {
270         struct hci_ev_status *rp = data;
271         void *sent;
272
273         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
274
275         if (rp->status)
276                 return rp->status;
277
278         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
279         if (!sent)
280                 return rp->status;
281
282         hdev->link_policy = get_unaligned_le16(sent);
283
284         return rp->status;
285 }
286
287 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
288 {
289         struct hci_ev_status *rp = data;
290
291         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
292
293         clear_bit(HCI_RESET, &hdev->flags);
294
295         if (rp->status)
296                 return rp->status;
297
298         /* Reset all non-persistent flags */
299         hci_dev_clear_volatile_flags(hdev);
300
301         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
302
303         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
304         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
305
306         memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
307         hdev->adv_data_len = 0;
308
309         memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
310         hdev->scan_rsp_data_len = 0;
311
312         hdev->le_scan_type = LE_SCAN_PASSIVE;
313
314         hdev->ssp_debug_mode = 0;
315
316         hci_bdaddr_list_clear(&hdev->le_accept_list);
317         hci_bdaddr_list_clear(&hdev->le_resolv_list);
318
319         return rp->status;
320 }
321
322 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
323                                       struct sk_buff *skb)
324 {
325         struct hci_rp_read_stored_link_key *rp = data;
326         struct hci_cp_read_stored_link_key *sent;
327
328         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
329
330         sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
331         if (!sent)
332                 return rp->status;
333
334         if (!rp->status && sent->read_all == 0x01) {
335                 hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
336                 hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
337         }
338
339         return rp->status;
340 }
341
342 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
343                                         struct sk_buff *skb)
344 {
345         struct hci_rp_delete_stored_link_key *rp = data;
346         u16 num_keys;
347
348         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
349
350         if (rp->status)
351                 return rp->status;
352
353         num_keys = le16_to_cpu(rp->num_keys);
354
355         if (num_keys <= hdev->stored_num_keys)
356                 hdev->stored_num_keys -= num_keys;
357         else
358                 hdev->stored_num_keys = 0;
359
360         return rp->status;
361 }
362
363 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
364                                   struct sk_buff *skb)
365 {
366         struct hci_ev_status *rp = data;
367         void *sent;
368
369         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
370
371         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
372         if (!sent)
373                 return rp->status;
374
375         hci_dev_lock(hdev);
376
377         if (hci_dev_test_flag(hdev, HCI_MGMT))
378                 mgmt_set_local_name_complete(hdev, sent, rp->status);
379         else if (!rp->status)
380                 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
381
382         hci_dev_unlock(hdev);
383
384         return rp->status;
385 }
386
387 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
388                                  struct sk_buff *skb)
389 {
390         struct hci_rp_read_local_name *rp = data;
391
392         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
393
394         if (rp->status)
395                 return rp->status;
396
397         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
398             hci_dev_test_flag(hdev, HCI_CONFIG))
399                 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
400
401         return rp->status;
402 }
403
404 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
405                                    struct sk_buff *skb)
406 {
407         struct hci_ev_status *rp = data;
408         void *sent;
409
410         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
411
412         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
413         if (!sent)
414                 return rp->status;
415
416         hci_dev_lock(hdev);
417
418         if (!rp->status) {
419                 __u8 param = *((__u8 *) sent);
420
421                 if (param == AUTH_ENABLED)
422                         set_bit(HCI_AUTH, &hdev->flags);
423                 else
424                         clear_bit(HCI_AUTH, &hdev->flags);
425         }
426
427         if (hci_dev_test_flag(hdev, HCI_MGMT))
428                 mgmt_auth_enable_complete(hdev, rp->status);
429
430         hci_dev_unlock(hdev);
431
432         return rp->status;
433 }
434
435 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
436                                     struct sk_buff *skb)
437 {
438         struct hci_ev_status *rp = data;
439         __u8 param;
440         void *sent;
441
442         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
443
444         if (rp->status)
445                 return rp->status;
446
447         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
448         if (!sent)
449                 return rp->status;
450
451         param = *((__u8 *) sent);
452
453         if (param)
454                 set_bit(HCI_ENCRYPT, &hdev->flags);
455         else
456                 clear_bit(HCI_ENCRYPT, &hdev->flags);
457
458         return rp->status;
459 }
460
461 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
462                                    struct sk_buff *skb)
463 {
464         struct hci_ev_status *rp = data;
465         __u8 param;
466         void *sent;
467
468         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
469
470         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
471         if (!sent)
472                 return rp->status;
473
474         param = *((__u8 *) sent);
475
476         hci_dev_lock(hdev);
477
478         if (rp->status) {
479                 hdev->discov_timeout = 0;
480                 goto done;
481         }
482
483         if (param & SCAN_INQUIRY)
484                 set_bit(HCI_ISCAN, &hdev->flags);
485         else
486                 clear_bit(HCI_ISCAN, &hdev->flags);
487
488         if (param & SCAN_PAGE)
489                 set_bit(HCI_PSCAN, &hdev->flags);
490         else
491                 clear_bit(HCI_PSCAN, &hdev->flags);
492
493 done:
494         hci_dev_unlock(hdev);
495
496         return rp->status;
497 }
498
499 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
500                                   struct sk_buff *skb)
501 {
502         struct hci_ev_status *rp = data;
503         struct hci_cp_set_event_filter *cp;
504         void *sent;
505
506         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
507
508         if (rp->status)
509                 return rp->status;
510
511         sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
512         if (!sent)
513                 return rp->status;
514
515         cp = (struct hci_cp_set_event_filter *)sent;
516
517         if (cp->flt_type == HCI_FLT_CLEAR_ALL)
518                 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
519         else
520                 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
521
522         return rp->status;
523 }
524
525 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
526                                    struct sk_buff *skb)
527 {
528         struct hci_rp_read_class_of_dev *rp = data;
529
530         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
531
532         if (rp->status)
533                 return rp->status;
534
535         memcpy(hdev->dev_class, rp->dev_class, 3);
536
537         bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
538                    hdev->dev_class[1], hdev->dev_class[0]);
539
540         return rp->status;
541 }
542
543 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
544                                     struct sk_buff *skb)
545 {
546         struct hci_ev_status *rp = data;
547         void *sent;
548
549         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
550
551         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
552         if (!sent)
553                 return rp->status;
554
555         hci_dev_lock(hdev);
556
557         if (!rp->status)
558                 memcpy(hdev->dev_class, sent, 3);
559
560         if (hci_dev_test_flag(hdev, HCI_MGMT))
561                 mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
562
563         hci_dev_unlock(hdev);
564
565         return rp->status;
566 }
567
568 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
569                                     struct sk_buff *skb)
570 {
571         struct hci_rp_read_voice_setting *rp = data;
572         __u16 setting;
573
574         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
575
576         if (rp->status)
577                 return rp->status;
578
579         setting = __le16_to_cpu(rp->voice_setting);
580
581         if (hdev->voice_setting == setting)
582                 return rp->status;
583
584         hdev->voice_setting = setting;
585
586         bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
587
588         if (hdev->notify)
589                 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
590
591         return rp->status;
592 }
593
594 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
595                                      struct sk_buff *skb)
596 {
597         struct hci_ev_status *rp = data;
598         __u16 setting;
599         void *sent;
600
601         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
602
603         if (rp->status)
604                 return rp->status;
605
606         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
607         if (!sent)
608                 return rp->status;
609
610         setting = get_unaligned_le16(sent);
611
612         if (hdev->voice_setting == setting)
613                 return rp->status;
614
615         hdev->voice_setting = setting;
616
617         bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
618
619         if (hdev->notify)
620                 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
621
622         return rp->status;
623 }
624
625 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
626                                         struct sk_buff *skb)
627 {
628         struct hci_rp_read_num_supported_iac *rp = data;
629
630         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
631
632         if (rp->status)
633                 return rp->status;
634
635         hdev->num_iac = rp->num_iac;
636
637         bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
638
639         return rp->status;
640 }
641
642 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
643                                 struct sk_buff *skb)
644 {
645         struct hci_ev_status *rp = data;
646         struct hci_cp_write_ssp_mode *sent;
647
648         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
649
650         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
651         if (!sent)
652                 return rp->status;
653
654         hci_dev_lock(hdev);
655
656         if (!rp->status) {
657                 if (sent->mode)
658                         hdev->features[1][0] |= LMP_HOST_SSP;
659                 else
660                         hdev->features[1][0] &= ~LMP_HOST_SSP;
661         }
662
663         if (!rp->status) {
664                 if (sent->mode)
665                         hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
666                 else
667                         hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
668         }
669
670         hci_dev_unlock(hdev);
671
672         return rp->status;
673 }
674
675 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
676                                   struct sk_buff *skb)
677 {
678         struct hci_ev_status *rp = data;
679         struct hci_cp_write_sc_support *sent;
680
681         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
682
683         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
684         if (!sent)
685                 return rp->status;
686
687         hci_dev_lock(hdev);
688
689         if (!rp->status) {
690                 if (sent->support)
691                         hdev->features[1][0] |= LMP_HOST_SC;
692                 else
693                         hdev->features[1][0] &= ~LMP_HOST_SC;
694         }
695
696         if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
697                 if (sent->support)
698                         hci_dev_set_flag(hdev, HCI_SC_ENABLED);
699                 else
700                         hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
701         }
702
703         hci_dev_unlock(hdev);
704
705         return rp->status;
706 }
707
708 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
709                                     struct sk_buff *skb)
710 {
711         struct hci_rp_read_local_version *rp = data;
712
713         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
714
715         if (rp->status)
716                 return rp->status;
717
718         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
719             hci_dev_test_flag(hdev, HCI_CONFIG)) {
720                 hdev->hci_ver = rp->hci_ver;
721                 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
722                 hdev->lmp_ver = rp->lmp_ver;
723                 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
724                 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
725         }
726
727         return rp->status;
728 }
729
730 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
731                                    struct sk_buff *skb)
732 {
733         struct hci_rp_read_enc_key_size *rp = data;
734         struct hci_conn *conn;
735         u16 handle;
736         u8 status = rp->status;
737
738         bt_dev_dbg(hdev, "status 0x%2.2x", status);
739
740         handle = le16_to_cpu(rp->handle);
741
742         hci_dev_lock(hdev);
743
744         conn = hci_conn_hash_lookup_handle(hdev, handle);
745         if (!conn) {
746                 status = 0xFF;
747                 goto done;
748         }
749
750         /* While unexpected, the read_enc_key_size command may fail. The most
751          * secure approach is to then assume the key size is 0 to force a
752          * disconnection.
753          */
754         if (status) {
755                 bt_dev_err(hdev, "failed to read key size for handle %u",
756                            handle);
757                 conn->enc_key_size = 0;
758         } else {
759                 conn->enc_key_size = rp->key_size;
760                 status = 0;
761         }
762
763         hci_encrypt_cfm(conn, 0);
764
765 done:
766         hci_dev_unlock(hdev);
767
768         return status;
769 }
770
771 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
772                                      struct sk_buff *skb)
773 {
774         struct hci_rp_read_local_commands *rp = data;
775
776         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
777
778         if (rp->status)
779                 return rp->status;
780
781         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
782             hci_dev_test_flag(hdev, HCI_CONFIG))
783                 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
784
785         return rp->status;
786 }
787
788 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
789                                            struct sk_buff *skb)
790 {
791         struct hci_rp_read_auth_payload_to *rp = data;
792         struct hci_conn *conn;
793
794         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
795
796         if (rp->status)
797                 return rp->status;
798
799         hci_dev_lock(hdev);
800
801         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
802         if (conn)
803                 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
804
805         hci_dev_unlock(hdev);
806
807         return rp->status;
808 }
809
810 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
811                                             struct sk_buff *skb)
812 {
813         struct hci_rp_write_auth_payload_to *rp = data;
814         struct hci_conn *conn;
815         void *sent;
816
817         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
818
819         if (rp->status)
820                 return rp->status;
821
822         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
823         if (!sent)
824                 return rp->status;
825
826         hci_dev_lock(hdev);
827
828         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
829         if (conn)
830                 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
831
832         hci_dev_unlock(hdev);
833
834         return rp->status;
835 }
836
837 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
838                                      struct sk_buff *skb)
839 {
840         struct hci_rp_read_local_features *rp = data;
841
842         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
843
844         if (rp->status)
845                 return rp->status;
846
847         memcpy(hdev->features, rp->features, 8);
848
849         /* Adjust default settings according to features
850          * supported by device. */
851
852         if (hdev->features[0][0] & LMP_3SLOT)
853                 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
854
855         if (hdev->features[0][0] & LMP_5SLOT)
856                 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
857
858         if (hdev->features[0][1] & LMP_HV2) {
859                 hdev->pkt_type  |= (HCI_HV2);
860                 hdev->esco_type |= (ESCO_HV2);
861         }
862
863         if (hdev->features[0][1] & LMP_HV3) {
864                 hdev->pkt_type  |= (HCI_HV3);
865                 hdev->esco_type |= (ESCO_HV3);
866         }
867
868         if (lmp_esco_capable(hdev))
869                 hdev->esco_type |= (ESCO_EV3);
870
871         if (hdev->features[0][4] & LMP_EV4)
872                 hdev->esco_type |= (ESCO_EV4);
873
874         if (hdev->features[0][4] & LMP_EV5)
875                 hdev->esco_type |= (ESCO_EV5);
876
877         if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
878                 hdev->esco_type |= (ESCO_2EV3);
879
880         if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
881                 hdev->esco_type |= (ESCO_3EV3);
882
883         if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
884                 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
885
886         return rp->status;
887 }
888
889 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
890                                          struct sk_buff *skb)
891 {
892         struct hci_rp_read_local_ext_features *rp = data;
893
894         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
895
896         if (rp->status)
897                 return rp->status;
898
899         if (hdev->max_page < rp->max_page) {
900                 if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
901                              &hdev->quirks))
902                         bt_dev_warn(hdev, "broken local ext features page 2");
903                 else
904                         hdev->max_page = rp->max_page;
905         }
906
907         if (rp->page < HCI_MAX_PAGES)
908                 memcpy(hdev->features[rp->page], rp->features, 8);
909
910         return rp->status;
911 }
912
913 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
914                                         struct sk_buff *skb)
915 {
916         struct hci_rp_read_flow_control_mode *rp = data;
917
918         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
919
920         if (rp->status)
921                 return rp->status;
922
923         hdev->flow_ctl_mode = rp->mode;
924
925         return rp->status;
926 }
927
928 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
929                                   struct sk_buff *skb)
930 {
931         struct hci_rp_read_buffer_size *rp = data;
932
933         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
934
935         if (rp->status)
936                 return rp->status;
937
938         hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
939         hdev->sco_mtu  = rp->sco_mtu;
940         hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
941         hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
942
943         if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
944                 hdev->sco_mtu  = 64;
945                 hdev->sco_pkts = 8;
946         }
947
948         hdev->acl_cnt = hdev->acl_pkts;
949         hdev->sco_cnt = hdev->sco_pkts;
950
951         BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
952                hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
953
954         return rp->status;
955 }
956
957 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
958                               struct sk_buff *skb)
959 {
960         struct hci_rp_read_bd_addr *rp = data;
961
962         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
963
964         if (rp->status)
965                 return rp->status;
966
967         if (test_bit(HCI_INIT, &hdev->flags))
968                 bacpy(&hdev->bdaddr, &rp->bdaddr);
969
970         if (hci_dev_test_flag(hdev, HCI_SETUP))
971                 bacpy(&hdev->setup_addr, &rp->bdaddr);
972
973         return rp->status;
974 }
975
976 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
977                                          struct sk_buff *skb)
978 {
979         struct hci_rp_read_local_pairing_opts *rp = data;
980
981         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
982
983         if (rp->status)
984                 return rp->status;
985
986         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
987             hci_dev_test_flag(hdev, HCI_CONFIG)) {
988                 hdev->pairing_opts = rp->pairing_opts;
989                 hdev->max_enc_key_size = rp->max_key_size;
990         }
991
992         return rp->status;
993 }
994
995 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
996                                          struct sk_buff *skb)
997 {
998         struct hci_rp_read_page_scan_activity *rp = data;
999
1000         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1001
1002         if (rp->status)
1003                 return rp->status;
1004
1005         if (test_bit(HCI_INIT, &hdev->flags)) {
1006                 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
1007                 hdev->page_scan_window = __le16_to_cpu(rp->window);
1008         }
1009
1010         return rp->status;
1011 }
1012
1013 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1014                                           struct sk_buff *skb)
1015 {
1016         struct hci_ev_status *rp = data;
1017         struct hci_cp_write_page_scan_activity *sent;
1018
1019         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1020
1021         if (rp->status)
1022                 return rp->status;
1023
1024         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1025         if (!sent)
1026                 return rp->status;
1027
1028         hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1029         hdev->page_scan_window = __le16_to_cpu(sent->window);
1030
1031         return rp->status;
1032 }
1033
1034 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1035                                      struct sk_buff *skb)
1036 {
1037         struct hci_rp_read_page_scan_type *rp = data;
1038
1039         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1040
1041         if (rp->status)
1042                 return rp->status;
1043
1044         if (test_bit(HCI_INIT, &hdev->flags))
1045                 hdev->page_scan_type = rp->type;
1046
1047         return rp->status;
1048 }
1049
1050 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1051                                       struct sk_buff *skb)
1052 {
1053         struct hci_ev_status *rp = data;
1054         u8 *type;
1055
1056         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1057
1058         if (rp->status)
1059                 return rp->status;
1060
1061         type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1062         if (type)
1063                 hdev->page_scan_type = *type;
1064
1065         return rp->status;
1066 }
1067
1068 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
1069                                       struct sk_buff *skb)
1070 {
1071         struct hci_rp_read_data_block_size *rp = data;
1072
1073         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1074
1075         if (rp->status)
1076                 return rp->status;
1077
1078         hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
1079         hdev->block_len = __le16_to_cpu(rp->block_len);
1080         hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
1081
1082         hdev->block_cnt = hdev->num_blocks;
1083
1084         BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
1085                hdev->block_cnt, hdev->block_len);
1086
1087         return rp->status;
1088 }
1089
1090 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1091                             struct sk_buff *skb)
1092 {
1093         struct hci_rp_read_clock *rp = data;
1094         struct hci_cp_read_clock *cp;
1095         struct hci_conn *conn;
1096
1097         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1098
1099         if (rp->status)
1100                 return rp->status;
1101
1102         hci_dev_lock(hdev);
1103
1104         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1105         if (!cp)
1106                 goto unlock;
1107
1108         if (cp->which == 0x00) {
1109                 hdev->clock = le32_to_cpu(rp->clock);
1110                 goto unlock;
1111         }
1112
1113         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1114         if (conn) {
1115                 conn->clock = le32_to_cpu(rp->clock);
1116                 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1117         }
1118
1119 unlock:
1120         hci_dev_unlock(hdev);
1121         return rp->status;
1122 }
1123
1124 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
1125                                      struct sk_buff *skb)
1126 {
1127         struct hci_rp_read_local_amp_info *rp = data;
1128
1129         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1130
1131         if (rp->status)
1132                 return rp->status;
1133
1134         hdev->amp_status = rp->amp_status;
1135         hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
1136         hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
1137         hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
1138         hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
1139         hdev->amp_type = rp->amp_type;
1140         hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
1141         hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
1142         hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
1143         hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
1144
1145         return rp->status;
1146 }
1147
1148 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1149                                        struct sk_buff *skb)
1150 {
1151         struct hci_rp_read_inq_rsp_tx_power *rp = data;
1152
1153         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1154
1155         if (rp->status)
1156                 return rp->status;
1157
1158         hdev->inq_tx_power = rp->tx_power;
1159
1160         return rp->status;
1161 }
1162
1163 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1164                                              struct sk_buff *skb)
1165 {
1166         struct hci_rp_read_def_err_data_reporting *rp = data;
1167
1168         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1169
1170         if (rp->status)
1171                 return rp->status;
1172
1173         hdev->err_data_reporting = rp->err_data_reporting;
1174
1175         return rp->status;
1176 }
1177
1178 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1179                                               struct sk_buff *skb)
1180 {
1181         struct hci_ev_status *rp = data;
1182         struct hci_cp_write_def_err_data_reporting *cp;
1183
1184         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1185
1186         if (rp->status)
1187                 return rp->status;
1188
1189         cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1190         if (!cp)
1191                 return rp->status;
1192
1193         hdev->err_data_reporting = cp->err_data_reporting;
1194
1195         return rp->status;
1196 }
1197
1198 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1199                                 struct sk_buff *skb)
1200 {
1201         struct hci_rp_pin_code_reply *rp = data;
1202         struct hci_cp_pin_code_reply *cp;
1203         struct hci_conn *conn;
1204
1205         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1206
1207         hci_dev_lock(hdev);
1208
1209         if (hci_dev_test_flag(hdev, HCI_MGMT))
1210                 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1211
1212         if (rp->status)
1213                 goto unlock;
1214
1215         cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1216         if (!cp)
1217                 goto unlock;
1218
1219         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1220         if (conn)
1221                 conn->pin_length = cp->pin_len;
1222
1223 unlock:
1224         hci_dev_unlock(hdev);
1225         return rp->status;
1226 }
1227
1228 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1229                                     struct sk_buff *skb)
1230 {
1231         struct hci_rp_pin_code_neg_reply *rp = data;
1232
1233         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1234
1235         hci_dev_lock(hdev);
1236
1237         if (hci_dev_test_flag(hdev, HCI_MGMT))
1238                 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1239                                                  rp->status);
1240
1241         hci_dev_unlock(hdev);
1242
1243         return rp->status;
1244 }
1245
1246 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1247                                      struct sk_buff *skb)
1248 {
1249         struct hci_rp_le_read_buffer_size *rp = data;
1250
1251         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1252
1253         if (rp->status)
1254                 return rp->status;
1255
1256         hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1257         hdev->le_pkts = rp->le_max_pkt;
1258
1259         hdev->le_cnt = hdev->le_pkts;
1260
1261         BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1262
1263         return rp->status;
1264 }
1265
1266 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1267                                         struct sk_buff *skb)
1268 {
1269         struct hci_rp_le_read_local_features *rp = data;
1270
1271         BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1272
1273         if (rp->status)
1274                 return rp->status;
1275
1276         memcpy(hdev->le_features, rp->features, 8);
1277
1278         return rp->status;
1279 }
1280
1281 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1282                                       struct sk_buff *skb)
1283 {
1284         struct hci_rp_le_read_adv_tx_power *rp = data;
1285
1286         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1287
1288         if (rp->status)
1289                 return rp->status;
1290
1291         hdev->adv_tx_power = rp->tx_power;
1292
1293         return rp->status;
1294 }
1295
1296 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1297                                     struct sk_buff *skb)
1298 {
1299         struct hci_rp_user_confirm_reply *rp = data;
1300
1301         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1302
1303         hci_dev_lock(hdev);
1304
1305         if (hci_dev_test_flag(hdev, HCI_MGMT))
1306                 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1307                                                  rp->status);
1308
1309         hci_dev_unlock(hdev);
1310
1311         return rp->status;
1312 }
1313
1314 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1315                                         struct sk_buff *skb)
1316 {
1317         struct hci_rp_user_confirm_reply *rp = data;
1318
1319         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1320
1321         hci_dev_lock(hdev);
1322
1323         if (hci_dev_test_flag(hdev, HCI_MGMT))
1324                 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1325                                                      ACL_LINK, 0, rp->status);
1326
1327         hci_dev_unlock(hdev);
1328
1329         return rp->status;
1330 }
1331
1332 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1333                                     struct sk_buff *skb)
1334 {
1335         struct hci_rp_user_confirm_reply *rp = data;
1336
1337         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1338
1339         hci_dev_lock(hdev);
1340
1341         if (hci_dev_test_flag(hdev, HCI_MGMT))
1342                 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1343                                                  0, rp->status);
1344
1345         hci_dev_unlock(hdev);
1346
1347         return rp->status;
1348 }
1349
1350 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1351                                         struct sk_buff *skb)
1352 {
1353         struct hci_rp_user_confirm_reply *rp = data;
1354
1355         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1356
1357         hci_dev_lock(hdev);
1358
1359         if (hci_dev_test_flag(hdev, HCI_MGMT))
1360                 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1361                                                      ACL_LINK, 0, rp->status);
1362
1363         hci_dev_unlock(hdev);
1364
1365         return rp->status;
1366 }
1367
1368 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1369                                      struct sk_buff *skb)
1370 {
1371         struct hci_rp_read_local_oob_data *rp = data;
1372
1373         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1374
1375         return rp->status;
1376 }
1377
1378 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1379                                          struct sk_buff *skb)
1380 {
1381         struct hci_rp_read_local_oob_ext_data *rp = data;
1382
1383         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1384
1385         return rp->status;
1386 }
1387
1388 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1389                                     struct sk_buff *skb)
1390 {
1391         struct hci_ev_status *rp = data;
1392         bdaddr_t *sent;
1393
1394         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1395
1396         if (rp->status)
1397                 return rp->status;
1398
1399         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1400         if (!sent)
1401                 return rp->status;
1402
1403         hci_dev_lock(hdev);
1404
1405         bacpy(&hdev->random_addr, sent);
1406
1407         if (!bacmp(&hdev->rpa, sent)) {
1408                 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1409                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1410                                    secs_to_jiffies(hdev->rpa_timeout));
1411         }
1412
1413         hci_dev_unlock(hdev);
1414
1415         return rp->status;
1416 }
1417
1418 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1419                                     struct sk_buff *skb)
1420 {
1421         struct hci_ev_status *rp = data;
1422         struct hci_cp_le_set_default_phy *cp;
1423
1424         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1425
1426         if (rp->status)
1427                 return rp->status;
1428
1429         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1430         if (!cp)
1431                 return rp->status;
1432
1433         hci_dev_lock(hdev);
1434
1435         hdev->le_tx_def_phys = cp->tx_phys;
1436         hdev->le_rx_def_phys = cp->rx_phys;
1437
1438         hci_dev_unlock(hdev);
1439
1440         return rp->status;
1441 }
1442
1443 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1444                                             struct sk_buff *skb)
1445 {
1446         struct hci_ev_status *rp = data;
1447         struct hci_cp_le_set_adv_set_rand_addr *cp;
1448         struct adv_info *adv;
1449
1450         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1451
1452         if (rp->status)
1453                 return rp->status;
1454
1455         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1456         /* Update only in case the adv instance since handle 0x00 shall be using
1457          * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1458          * non-extended adverting.
1459          */
1460         if (!cp || !cp->handle)
1461                 return rp->status;
1462
1463         hci_dev_lock(hdev);
1464
1465         adv = hci_find_adv_instance(hdev, cp->handle);
1466         if (adv) {
1467                 bacpy(&adv->random_addr, &cp->bdaddr);
1468                 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1469                         adv->rpa_expired = false;
1470                         queue_delayed_work(hdev->workqueue,
1471                                            &adv->rpa_expired_cb,
1472                                            secs_to_jiffies(hdev->rpa_timeout));
1473                 }
1474         }
1475
1476         hci_dev_unlock(hdev);
1477
1478         return rp->status;
1479 }
1480
1481 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1482                                    struct sk_buff *skb)
1483 {
1484         struct hci_ev_status *rp = data;
1485         u8 *instance;
1486         int err;
1487
1488         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1489
1490         if (rp->status)
1491                 return rp->status;
1492
1493         instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1494         if (!instance)
1495                 return rp->status;
1496
1497         hci_dev_lock(hdev);
1498
1499         err = hci_remove_adv_instance(hdev, *instance);
1500         if (!err)
1501                 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1502                                          *instance);
1503
1504         hci_dev_unlock(hdev);
1505
1506         return rp->status;
1507 }
1508
1509 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1510                                    struct sk_buff *skb)
1511 {
1512         struct hci_ev_status *rp = data;
1513         struct adv_info *adv, *n;
1514         int err;
1515
1516         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1517
1518         if (rp->status)
1519                 return rp->status;
1520
1521         if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1522                 return rp->status;
1523
1524         hci_dev_lock(hdev);
1525
1526         list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1527                 u8 instance = adv->instance;
1528
1529                 err = hci_remove_adv_instance(hdev, instance);
1530                 if (!err)
1531                         mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1532                                                  hdev, instance);
1533         }
1534
1535         hci_dev_unlock(hdev);
1536
1537         return rp->status;
1538 }
1539
1540 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1541                                         struct sk_buff *skb)
1542 {
1543         struct hci_rp_le_read_transmit_power *rp = data;
1544
1545         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1546
1547         if (rp->status)
1548                 return rp->status;
1549
1550         hdev->min_le_tx_power = rp->min_le_tx_power;
1551         hdev->max_le_tx_power = rp->max_le_tx_power;
1552
1553         return rp->status;
1554 }
1555
1556 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1557                                      struct sk_buff *skb)
1558 {
1559         struct hci_ev_status *rp = data;
1560         struct hci_cp_le_set_privacy_mode *cp;
1561         struct hci_conn_params *params;
1562
1563         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1564
1565         if (rp->status)
1566                 return rp->status;
1567
1568         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1569         if (!cp)
1570                 return rp->status;
1571
1572         hci_dev_lock(hdev);
1573
1574         params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1575         if (params)
1576                 params->privacy_mode = cp->mode;
1577
1578         hci_dev_unlock(hdev);
1579
1580         return rp->status;
1581 }
1582
1583 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1584                                    struct sk_buff *skb)
1585 {
1586         struct hci_ev_status *rp = data;
1587         __u8 *sent;
1588
1589         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1590
1591         if (rp->status)
1592                 return rp->status;
1593
1594         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1595         if (!sent)
1596                 return rp->status;
1597
1598         hci_dev_lock(hdev);
1599
1600         /* If we're doing connection initiation as peripheral. Set a
1601          * timeout in case something goes wrong.
1602          */
1603         if (*sent) {
1604                 struct hci_conn *conn;
1605
1606                 hci_dev_set_flag(hdev, HCI_LE_ADV);
1607
1608                 conn = hci_lookup_le_connect(hdev);
1609                 if (conn)
1610                         queue_delayed_work(hdev->workqueue,
1611                                            &conn->le_conn_timeout,
1612                                            conn->conn_timeout);
1613         } else {
1614                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1615         }
1616
1617         hci_dev_unlock(hdev);
1618
1619         return rp->status;
1620 }
1621
1622 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1623                                        struct sk_buff *skb)
1624 {
1625         struct hci_cp_le_set_ext_adv_enable *cp;
1626         struct hci_cp_ext_adv_set *set;
1627         struct adv_info *adv = NULL, *n;
1628         struct hci_ev_status *rp = data;
1629
1630         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1631
1632         if (rp->status)
1633                 return rp->status;
1634
1635         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1636         if (!cp)
1637                 return rp->status;
1638
1639         set = (void *)cp->data;
1640
1641         hci_dev_lock(hdev);
1642
1643         if (cp->num_of_sets)
1644                 adv = hci_find_adv_instance(hdev, set->handle);
1645
1646         if (cp->enable) {
1647                 struct hci_conn *conn;
1648
1649                 hci_dev_set_flag(hdev, HCI_LE_ADV);
1650
1651                 if (adv)
1652                         adv->enabled = true;
1653
1654                 conn = hci_lookup_le_connect(hdev);
1655                 if (conn)
1656                         queue_delayed_work(hdev->workqueue,
1657                                            &conn->le_conn_timeout,
1658                                            conn->conn_timeout);
1659         } else {
1660                 if (cp->num_of_sets) {
1661                         if (adv)
1662                                 adv->enabled = false;
1663
1664                         /* If just one instance was disabled check if there are
1665                          * any other instance enabled before clearing HCI_LE_ADV
1666                          */
1667                         list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1668                                                  list) {
1669                                 if (adv->enabled)
1670                                         goto unlock;
1671                         }
1672                 } else {
1673                         /* All instances shall be considered disabled */
1674                         list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1675                                                  list)
1676                                 adv->enabled = false;
1677                 }
1678
1679                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1680         }
1681
1682 unlock:
1683         hci_dev_unlock(hdev);
1684         return rp->status;
1685 }
1686
1687 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1688                                    struct sk_buff *skb)
1689 {
1690         struct hci_cp_le_set_scan_param *cp;
1691         struct hci_ev_status *rp = data;
1692
1693         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1694
1695         if (rp->status)
1696                 return rp->status;
1697
1698         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1699         if (!cp)
1700                 return rp->status;
1701
1702         hci_dev_lock(hdev);
1703
1704         hdev->le_scan_type = cp->type;
1705
1706         hci_dev_unlock(hdev);
1707
1708         return rp->status;
1709 }
1710
1711 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1712                                        struct sk_buff *skb)
1713 {
1714         struct hci_cp_le_set_ext_scan_params *cp;
1715         struct hci_ev_status *rp = data;
1716         struct hci_cp_le_scan_phy_params *phy_param;
1717
1718         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1719
1720         if (rp->status)
1721                 return rp->status;
1722
1723         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1724         if (!cp)
1725                 return rp->status;
1726
1727         phy_param = (void *)cp->data;
1728
1729         hci_dev_lock(hdev);
1730
1731         hdev->le_scan_type = phy_param->type;
1732
1733         hci_dev_unlock(hdev);
1734
1735         return rp->status;
1736 }
1737
1738 static bool has_pending_adv_report(struct hci_dev *hdev)
1739 {
1740         struct discovery_state *d = &hdev->discovery;
1741
1742         return bacmp(&d->last_adv_addr, BDADDR_ANY);
1743 }
1744
1745 static void clear_pending_adv_report(struct hci_dev *hdev)
1746 {
1747         struct discovery_state *d = &hdev->discovery;
1748
1749         bacpy(&d->last_adv_addr, BDADDR_ANY);
1750         d->last_adv_data_len = 0;
1751 }
1752
1753 #ifndef TIZEN_BT
1754 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1755                                      u8 bdaddr_type, s8 rssi, u32 flags,
1756                                      u8 *data, u8 len)
1757 {
1758         struct discovery_state *d = &hdev->discovery;
1759
1760         if (len > HCI_MAX_AD_LENGTH)
1761                 return;
1762
1763         bacpy(&d->last_adv_addr, bdaddr);
1764         d->last_adv_addr_type = bdaddr_type;
1765         d->last_adv_rssi = rssi;
1766         d->last_adv_flags = flags;
1767         memcpy(d->last_adv_data, data, len);
1768         d->last_adv_data_len = len;
1769 }
1770 #endif
1771
1772 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1773 {
1774         hci_dev_lock(hdev);
1775
1776         switch (enable) {
1777         case LE_SCAN_ENABLE:
1778                 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1779                 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1780                         clear_pending_adv_report(hdev);
1781                 if (hci_dev_test_flag(hdev, HCI_MESH))
1782                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1783                 break;
1784
1785         case LE_SCAN_DISABLE:
1786                 /* We do this here instead of when setting DISCOVERY_STOPPED
1787                  * since the latter would potentially require waiting for
1788                  * inquiry to stop too.
1789                  */
1790                 if (has_pending_adv_report(hdev)) {
1791                         struct discovery_state *d = &hdev->discovery;
1792
1793                         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1794                                           d->last_adv_addr_type, NULL,
1795                                           d->last_adv_rssi, d->last_adv_flags,
1796                                           d->last_adv_data,
1797                                           d->last_adv_data_len, NULL, 0, 0);
1798                 }
1799
1800                 /* Cancel this timer so that we don't try to disable scanning
1801                  * when it's already disabled.
1802                  */
1803                 cancel_delayed_work(&hdev->le_scan_disable);
1804
1805                 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1806
1807                 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1808                  * interrupted scanning due to a connect request. Mark
1809                  * therefore discovery as stopped.
1810                  */
1811                 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1812 #ifndef TIZEN_BT /* The below line is kernel bug. */
1813                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1814 #else
1815                         hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
1816 #endif
1817                 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1818                          hdev->discovery.state == DISCOVERY_FINDING)
1819                         queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1820
1821                 break;
1822
1823         default:
1824                 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1825                            enable);
1826                 break;
1827         }
1828
1829         hci_dev_unlock(hdev);
1830 }
1831
1832 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1833                                     struct sk_buff *skb)
1834 {
1835         struct hci_cp_le_set_scan_enable *cp;
1836         struct hci_ev_status *rp = data;
1837
1838         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1839
1840         if (rp->status)
1841                 return rp->status;
1842
1843         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1844         if (!cp)
1845                 return rp->status;
1846
1847         le_set_scan_enable_complete(hdev, cp->enable);
1848
1849         return rp->status;
1850 }
1851
1852 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1853                                         struct sk_buff *skb)
1854 {
1855         struct hci_cp_le_set_ext_scan_enable *cp;
1856         struct hci_ev_status *rp = data;
1857
1858         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1859
1860         if (rp->status)
1861                 return rp->status;
1862
1863         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1864         if (!cp)
1865                 return rp->status;
1866
1867         le_set_scan_enable_complete(hdev, cp->enable);
1868
1869         return rp->status;
1870 }
1871
1872 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1873                                       struct sk_buff *skb)
1874 {
1875         struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1876
1877         bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1878                    rp->num_of_sets);
1879
1880         if (rp->status)
1881                 return rp->status;
1882
1883         hdev->le_num_of_adv_sets = rp->num_of_sets;
1884
1885         return rp->status;
1886 }
1887
1888 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1889                                           struct sk_buff *skb)
1890 {
1891         struct hci_rp_le_read_accept_list_size *rp = data;
1892
1893         bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1894
1895         if (rp->status)
1896                 return rp->status;
1897
1898         hdev->le_accept_list_size = rp->size;
1899
1900         return rp->status;
1901 }
1902
1903 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1904                                       struct sk_buff *skb)
1905 {
1906         struct hci_ev_status *rp = data;
1907
1908         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1909
1910         if (rp->status)
1911                 return rp->status;
1912
1913         hci_dev_lock(hdev);
1914         hci_bdaddr_list_clear(&hdev->le_accept_list);
1915         hci_dev_unlock(hdev);
1916
1917         return rp->status;
1918 }
1919
1920 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1921                                        struct sk_buff *skb)
1922 {
1923         struct hci_cp_le_add_to_accept_list *sent;
1924         struct hci_ev_status *rp = data;
1925
1926         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1927
1928         if (rp->status)
1929                 return rp->status;
1930
1931         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1932         if (!sent)
1933                 return rp->status;
1934
1935         hci_dev_lock(hdev);
1936         hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1937                             sent->bdaddr_type);
1938         hci_dev_unlock(hdev);
1939
1940         return rp->status;
1941 }
1942
1943 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1944                                          struct sk_buff *skb)
1945 {
1946         struct hci_cp_le_del_from_accept_list *sent;
1947         struct hci_ev_status *rp = data;
1948
1949         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1950
1951         if (rp->status)
1952                 return rp->status;
1953
1954         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1955         if (!sent)
1956                 return rp->status;
1957
1958         hci_dev_lock(hdev);
1959         hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1960                             sent->bdaddr_type);
1961         hci_dev_unlock(hdev);
1962
1963         return rp->status;
1964 }
1965
1966 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1967                                           struct sk_buff *skb)
1968 {
1969         struct hci_rp_le_read_supported_states *rp = data;
1970
1971         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1972
1973         if (rp->status)
1974                 return rp->status;
1975
1976         memcpy(hdev->le_states, rp->le_states, 8);
1977
1978         return rp->status;
1979 }
1980
1981 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1982                                       struct sk_buff *skb)
1983 {
1984         struct hci_rp_le_read_def_data_len *rp = data;
1985
1986         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1987
1988 #ifdef TIZEN_BT
1989         hci_dev_lock(hdev);
1990 #else
1991         if (rp->status)
1992                 return rp->status;
1993 #endif
1994
1995         hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1996         hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1997
1998 #ifdef TIZEN_BT
1999         mgmt_le_read_host_suggested_data_length_complete(hdev, rp->status);
2000
2001         hci_dev_unlock(hdev);
2002 #endif
2003
2004         return rp->status;
2005 }
2006
2007 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
2008                                        struct sk_buff *skb)
2009 {
2010         struct hci_cp_le_write_def_data_len *sent;
2011         struct hci_ev_status *rp = data;
2012
2013         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2014
2015         if (rp->status)
2016 #ifndef TIZEN_BT
2017                 return rp->status;
2018 #else
2019                 goto unblock;
2020 #endif
2021
2022         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
2023         if (!sent)
2024 #ifndef TIZEN_BT
2025                 return rp->status;
2026 #else
2027                 goto unblock;
2028 #endif
2029
2030         hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
2031         hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
2032
2033         return rp->status;
2034 #ifdef TIZEN_BT
2035 unblock:
2036         mgmt_le_write_host_suggested_data_length_complete(hdev, rp->status);
2037         return rp->status;
2038 #endif
2039 }
2040
2041 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
2042                                        struct sk_buff *skb)
2043 {
2044         struct hci_cp_le_add_to_resolv_list *sent;
2045         struct hci_ev_status *rp = data;
2046
2047         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2048
2049         if (rp->status)
2050                 return rp->status;
2051
2052         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
2053         if (!sent)
2054                 return rp->status;
2055
2056         hci_dev_lock(hdev);
2057         hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2058                                 sent->bdaddr_type, sent->peer_irk,
2059                                 sent->local_irk);
2060         hci_dev_unlock(hdev);
2061
2062         return rp->status;
2063 }
2064
2065 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2066                                          struct sk_buff *skb)
2067 {
2068         struct hci_cp_le_del_from_resolv_list *sent;
2069         struct hci_ev_status *rp = data;
2070
2071         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2072
2073         if (rp->status)
2074                 return rp->status;
2075
2076         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2077         if (!sent)
2078                 return rp->status;
2079
2080         hci_dev_lock(hdev);
2081         hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2082                             sent->bdaddr_type);
2083         hci_dev_unlock(hdev);
2084
2085         return rp->status;
2086 }
2087
2088 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2089                                       struct sk_buff *skb)
2090 {
2091         struct hci_ev_status *rp = data;
2092
2093         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2094
2095         if (rp->status)
2096                 return rp->status;
2097
2098         hci_dev_lock(hdev);
2099         hci_bdaddr_list_clear(&hdev->le_resolv_list);
2100         hci_dev_unlock(hdev);
2101
2102         return rp->status;
2103 }
2104
2105 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2106                                           struct sk_buff *skb)
2107 {
2108         struct hci_rp_le_read_resolv_list_size *rp = data;
2109
2110         bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2111
2112         if (rp->status)
2113                 return rp->status;
2114
2115         hdev->le_resolv_list_size = rp->size;
2116
2117         return rp->status;
2118 }
2119
2120 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2121                                                struct sk_buff *skb)
2122 {
2123         struct hci_ev_status *rp = data;
2124         __u8 *sent;
2125
2126         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2127
2128         if (rp->status)
2129                 return rp->status;
2130
2131         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2132         if (!sent)
2133                 return rp->status;
2134
2135         hci_dev_lock(hdev);
2136
2137         if (*sent)
2138                 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2139         else
2140                 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2141
2142         hci_dev_unlock(hdev);
2143
2144         return rp->status;
2145 }
2146
2147 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2148                                       struct sk_buff *skb)
2149 {
2150         struct hci_rp_le_read_max_data_len *rp = data;
2151
2152         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2153
2154 #ifndef TIZEN_BT
2155         if (rp->status)
2156                 return rp->status;
2157 #else
2158         hci_dev_lock(hdev);
2159 #endif
2160
2161         hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2162         hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2163         hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2164         hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2165
2166 #ifdef TIZEN_BT
2167         mgmt_le_read_maximum_data_length_complete(hdev, rp->status);
2168         hci_dev_unlock(hdev);
2169 #endif
2170
2171         return rp->status;
2172 }
2173
2174 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2175                                          struct sk_buff *skb)
2176 {
2177         struct hci_cp_write_le_host_supported *sent;
2178         struct hci_ev_status *rp = data;
2179
2180         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2181
2182         if (rp->status)
2183                 return rp->status;
2184
2185         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2186         if (!sent)
2187                 return rp->status;
2188
2189         hci_dev_lock(hdev);
2190
2191         if (sent->le) {
2192                 hdev->features[1][0] |= LMP_HOST_LE;
2193                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2194         } else {
2195                 hdev->features[1][0] &= ~LMP_HOST_LE;
2196                 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2197                 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2198         }
2199
2200         if (sent->simul)
2201                 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2202         else
2203                 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2204
2205         hci_dev_unlock(hdev);
2206
2207         return rp->status;
2208 }
2209
2210 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2211                                struct sk_buff *skb)
2212 {
2213         struct hci_cp_le_set_adv_param *cp;
2214         struct hci_ev_status *rp = data;
2215
2216         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2217
2218         if (rp->status)
2219                 return rp->status;
2220
2221         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2222         if (!cp)
2223                 return rp->status;
2224
2225         hci_dev_lock(hdev);
2226         hdev->adv_addr_type = cp->own_address_type;
2227         hci_dev_unlock(hdev);
2228
2229         return rp->status;
2230 }
2231
2232 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2233                                    struct sk_buff *skb)
2234 {
2235         struct hci_rp_le_set_ext_adv_params *rp = data;
2236         struct hci_cp_le_set_ext_adv_params *cp;
2237         struct adv_info *adv_instance;
2238
2239         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2240
2241         if (rp->status)
2242                 return rp->status;
2243
2244         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2245         if (!cp)
2246                 return rp->status;
2247
2248         hci_dev_lock(hdev);
2249         hdev->adv_addr_type = cp->own_addr_type;
2250         if (!cp->handle) {
2251                 /* Store in hdev for instance 0 */
2252                 hdev->adv_tx_power = rp->tx_power;
2253         } else {
2254                 adv_instance = hci_find_adv_instance(hdev, cp->handle);
2255                 if (adv_instance)
2256                         adv_instance->tx_power = rp->tx_power;
2257         }
2258         /* Update adv data as tx power is known now */
2259         hci_update_adv_data(hdev, cp->handle);
2260
2261         hci_dev_unlock(hdev);
2262
2263         return rp->status;
2264 }
2265
2266 #ifdef TIZEN_BT
2267 static u8 hci_cc_enable_rssi(struct hci_dev *hdev, void *data,
2268                              struct sk_buff *skb)
2269 {
2270         struct hci_cc_rsp_enable_rssi *rp = data;
2271
2272         BT_DBG("hci_cc_enable_rssi - %s status 0x%2.2x Event_LE_ext_Opcode 0x%2.2x",
2273                hdev->name, rp->status, rp->le_ext_opcode);
2274
2275         mgmt_enable_rssi_cc(hdev, rp, rp->status);
2276
2277         return rp->status;
2278 }
2279
2280 static u8 hci_cc_get_raw_rssi(struct hci_dev *hdev, void *data,
2281                               struct sk_buff *skb)
2282 {
2283         struct hci_cc_rp_get_raw_rssi *rp = data;
2284
2285         BT_DBG("hci_cc_get_raw_rssi- %s Get Raw Rssi Response[%2.2x %4.4x %2.2X]",
2286                hdev->name, rp->status, rp->conn_handle, rp->rssi_dbm);
2287
2288         mgmt_raw_rssi_response(hdev, rp, rp->status);
2289
2290         return rp->status;
2291 }
2292
2293 static void hci_vendor_ext_rssi_link_alert_evt(struct hci_dev *hdev,
2294                                                struct sk_buff *skb)
2295 {
2296         struct hci_ev_vendor_specific_rssi_alert *ev = (void *)skb->data;
2297
2298         BT_DBG("RSSI event LE_RSSI_LINK_ALERT %X", LE_RSSI_LINK_ALERT);
2299
2300         mgmt_rssi_alert_evt(hdev, ev->conn_handle, ev->alert_type,
2301                             ev->rssi_dbm);
2302 }
2303
2304 static void hci_vendor_specific_group_ext_evt(struct hci_dev *hdev,
2305                                               struct sk_buff *skb)
2306 {
2307         struct hci_ev_ext_vendor_specific *ev = (void *)skb->data;
2308         __u8 event_le_ext_sub_code;
2309
2310         BT_DBG("RSSI event LE_META_VENDOR_SPECIFIC_GROUP_EVENT: %X",
2311                LE_META_VENDOR_SPECIFIC_GROUP_EVENT);
2312
2313         skb_pull(skb, sizeof(*ev));
2314         event_le_ext_sub_code = ev->event_le_ext_sub_code;
2315
2316         switch (event_le_ext_sub_code) {
2317         case LE_RSSI_LINK_ALERT:
2318                 hci_vendor_ext_rssi_link_alert_evt(hdev, skb);
2319                 break;
2320
2321         default:
2322                 break;
2323         }
2324 }
2325
2326 static void hci_vendor_multi_adv_state_change_evt(struct hci_dev *hdev,
2327                                                   struct sk_buff *skb)
2328 {
2329         struct hci_ev_vendor_specific_multi_adv_state *ev = (void *)skb->data;
2330
2331         BT_DBG("LE_MULTI_ADV_STATE_CHANGE_SUB_EVENT");
2332
2333         mgmt_multi_adv_state_change_evt(hdev, ev->adv_instance,
2334                                         ev->state_change_reason,
2335                                         ev->connection_handle);
2336 }
2337
2338 static void hci_vendor_specific_evt(struct hci_dev *hdev, void *data,
2339                                     struct sk_buff *skb)
2340 {
2341         struct hci_ev_vendor_specific *ev = (void *)skb->data;
2342         __u8 event_sub_code;
2343
2344         BT_DBG("hci_vendor_specific_evt");
2345
2346         skb_pull(skb, sizeof(*ev));
2347         event_sub_code = ev->event_sub_code;
2348
2349         switch (event_sub_code) {
2350         case LE_META_VENDOR_SPECIFIC_GROUP_EVENT:
2351                 hci_vendor_specific_group_ext_evt(hdev, skb);
2352                 break;
2353
2354         case LE_MULTI_ADV_STATE_CHANGE_SUB_EVENT:
2355                 hci_vendor_multi_adv_state_change_evt(hdev, skb);
2356                 break;
2357
2358         default:
2359                 break;
2360         }
2361 }
2362
2363 static void hci_le_data_length_changed_complete_evt(struct hci_dev *hdev,
2364                                                     void *data,
2365                                                     struct sk_buff *skb)
2366 {
2367         struct hci_ev_le_data_len_change *ev = (void *)skb->data;
2368         struct hci_conn *conn;
2369
2370         BT_DBG("%s status", hdev->name);
2371
2372         hci_dev_lock(hdev);
2373
2374         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2375         if (conn) {
2376                 conn->tx_len = le16_to_cpu(ev->tx_len);
2377                 conn->tx_time = le16_to_cpu(ev->tx_time);
2378                 conn->rx_len = le16_to_cpu(ev->rx_len);
2379                 conn->rx_time = le16_to_cpu(ev->rx_time);
2380         }
2381
2382         mgmt_le_data_length_change_complete(hdev, &conn->dst,
2383                                             conn->tx_len, conn->tx_time,
2384                                             conn->rx_len, conn->rx_time);
2385
2386         hci_dev_unlock(hdev);
2387 }
2388 #endif
2389
2390 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2391                            struct sk_buff *skb)
2392 {
2393         struct hci_rp_read_rssi *rp = data;
2394         struct hci_conn *conn;
2395
2396         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2397
2398         if (rp->status)
2399                 return rp->status;
2400
2401         hci_dev_lock(hdev);
2402
2403         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2404         if (conn)
2405                 conn->rssi = rp->rssi;
2406
2407         hci_dev_unlock(hdev);
2408
2409         return rp->status;
2410 }
2411
2412 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2413                                struct sk_buff *skb)
2414 {
2415         struct hci_cp_read_tx_power *sent;
2416         struct hci_rp_read_tx_power *rp = data;
2417         struct hci_conn *conn;
2418
2419         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2420
2421         if (rp->status)
2422                 return rp->status;
2423
2424         sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2425         if (!sent)
2426                 return rp->status;
2427
2428         hci_dev_lock(hdev);
2429
2430         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2431         if (!conn)
2432                 goto unlock;
2433
2434         switch (sent->type) {
2435         case 0x00:
2436                 conn->tx_power = rp->tx_power;
2437                 break;
2438         case 0x01:
2439                 conn->max_tx_power = rp->tx_power;
2440                 break;
2441         }
2442
2443 unlock:
2444         hci_dev_unlock(hdev);
2445         return rp->status;
2446 }
2447
2448 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2449                                       struct sk_buff *skb)
2450 {
2451         struct hci_ev_status *rp = data;
2452         u8 *mode;
2453
2454         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2455
2456         if (rp->status)
2457                 return rp->status;
2458
2459         mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2460         if (mode)
2461                 hdev->ssp_debug_mode = *mode;
2462
2463         return rp->status;
2464 }
2465
2466 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2467 {
2468         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2469
2470         if (status) {
2471                 hci_conn_check_pending(hdev);
2472                 return;
2473         }
2474
2475         set_bit(HCI_INQUIRY, &hdev->flags);
2476 }
2477
2478 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2479 {
2480         struct hci_cp_create_conn *cp;
2481         struct hci_conn *conn;
2482
2483         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2484
2485         cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2486         if (!cp)
2487                 return;
2488
2489         hci_dev_lock(hdev);
2490
2491         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2492
2493         bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2494
2495         if (status) {
2496                 if (conn && conn->state == BT_CONNECT) {
2497                         if (status != 0x0c || conn->attempt > 2) {
2498                                 conn->state = BT_CLOSED;
2499                                 hci_connect_cfm(conn, status);
2500                                 hci_conn_del(conn);
2501                         } else
2502                                 conn->state = BT_CONNECT2;
2503                 }
2504         } else {
2505                 if (!conn) {
2506                         conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
2507                                             HCI_ROLE_MASTER);
2508                         if (!conn)
2509                                 bt_dev_err(hdev, "no memory for new connection");
2510                 }
2511         }
2512
2513         hci_dev_unlock(hdev);
2514 }
2515
2516 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2517 {
2518         struct hci_cp_add_sco *cp;
2519         struct hci_conn *acl, *sco;
2520         __u16 handle;
2521
2522         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2523
2524         if (!status)
2525                 return;
2526
2527         cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2528         if (!cp)
2529                 return;
2530
2531         handle = __le16_to_cpu(cp->handle);
2532
2533         bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2534
2535         hci_dev_lock(hdev);
2536
2537         acl = hci_conn_hash_lookup_handle(hdev, handle);
2538         if (acl) {
2539                 sco = acl->link;
2540                 if (sco) {
2541                         sco->state = BT_CLOSED;
2542
2543                         hci_connect_cfm(sco, status);
2544                         hci_conn_del(sco);
2545                 }
2546         }
2547
2548         hci_dev_unlock(hdev);
2549 }
2550
2551 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2552 {
2553         struct hci_cp_auth_requested *cp;
2554         struct hci_conn *conn;
2555
2556         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2557
2558         if (!status)
2559                 return;
2560
2561         cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2562         if (!cp)
2563                 return;
2564
2565         hci_dev_lock(hdev);
2566
2567         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2568         if (conn) {
2569                 if (conn->state == BT_CONFIG) {
2570                         hci_connect_cfm(conn, status);
2571                         hci_conn_drop(conn);
2572                 }
2573         }
2574
2575         hci_dev_unlock(hdev);
2576 }
2577
2578 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2579 {
2580         struct hci_cp_set_conn_encrypt *cp;
2581         struct hci_conn *conn;
2582
2583         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2584
2585         if (!status)
2586                 return;
2587
2588         cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2589         if (!cp)
2590                 return;
2591
2592         hci_dev_lock(hdev);
2593
2594         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2595         if (conn) {
2596                 if (conn->state == BT_CONFIG) {
2597                         hci_connect_cfm(conn, status);
2598                         hci_conn_drop(conn);
2599                 }
2600         }
2601
2602         hci_dev_unlock(hdev);
2603 }
2604
2605 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2606                                     struct hci_conn *conn)
2607 {
2608         if (conn->state != BT_CONFIG || !conn->out)
2609                 return 0;
2610
2611         if (conn->pending_sec_level == BT_SECURITY_SDP)
2612                 return 0;
2613
2614         /* Only request authentication for SSP connections or non-SSP
2615          * devices with sec_level MEDIUM or HIGH or if MITM protection
2616          * is requested.
2617          */
2618         if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2619             conn->pending_sec_level != BT_SECURITY_FIPS &&
2620             conn->pending_sec_level != BT_SECURITY_HIGH &&
2621             conn->pending_sec_level != BT_SECURITY_MEDIUM)
2622                 return 0;
2623
2624         return 1;
2625 }
2626
2627 static int hci_resolve_name(struct hci_dev *hdev,
2628                                    struct inquiry_entry *e)
2629 {
2630         struct hci_cp_remote_name_req cp;
2631
2632         memset(&cp, 0, sizeof(cp));
2633
2634         bacpy(&cp.bdaddr, &e->data.bdaddr);
2635         cp.pscan_rep_mode = e->data.pscan_rep_mode;
2636         cp.pscan_mode = e->data.pscan_mode;
2637         cp.clock_offset = e->data.clock_offset;
2638
2639         return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2640 }
2641
2642 static bool hci_resolve_next_name(struct hci_dev *hdev)
2643 {
2644         struct discovery_state *discov = &hdev->discovery;
2645         struct inquiry_entry *e;
2646
2647         if (list_empty(&discov->resolve))
2648                 return false;
2649
2650         /* We should stop if we already spent too much time resolving names. */
2651         if (time_after(jiffies, discov->name_resolve_timeout)) {
2652                 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2653                 return false;
2654         }
2655
2656         e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2657         if (!e)
2658                 return false;
2659
2660         if (hci_resolve_name(hdev, e) == 0) {
2661                 e->name_state = NAME_PENDING;
2662                 return true;
2663         }
2664
2665         return false;
2666 }
2667
2668 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2669                                    bdaddr_t *bdaddr, u8 *name, u8 name_len)
2670 {
2671         struct discovery_state *discov = &hdev->discovery;
2672         struct inquiry_entry *e;
2673
2674 #ifdef TIZEN_BT
2675         /* Update the mgmt connected state if necessary. Be careful with
2676          * conn objects that exist but are not (yet) connected however.
2677          * Only those in BT_CONFIG or BT_CONNECTED states can be
2678          * considered connected.
2679          */
2680         if (conn &&
2681             (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) {
2682                 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2683                         mgmt_device_connected(hdev, conn, 0, name, name_len);
2684                 else
2685                         mgmt_device_name_update(hdev, bdaddr, name, name_len);
2686         }
2687 #else
2688         if (conn &&
2689             (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2690             !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2691                 mgmt_device_connected(hdev, conn, name, name_len);
2692 #endif
2693
2694         if (discov->state == DISCOVERY_STOPPED)
2695                 return;
2696
2697         if (discov->state == DISCOVERY_STOPPING)
2698                 goto discov_complete;
2699
2700         if (discov->state != DISCOVERY_RESOLVING)
2701                 return;
2702
2703         e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2704         /* If the device was not found in a list of found devices names of which
2705          * are pending. there is no need to continue resolving a next name as it
2706          * will be done upon receiving another Remote Name Request Complete
2707          * Event */
2708         if (!e)
2709                 return;
2710
2711         list_del(&e->list);
2712
2713         e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2714         mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2715                          name, name_len);
2716
2717         if (hci_resolve_next_name(hdev))
2718                 return;
2719
2720 discov_complete:
2721         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2722 }
2723
2724 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2725 {
2726         struct hci_cp_remote_name_req *cp;
2727         struct hci_conn *conn;
2728
2729         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2730
2731         /* If successful wait for the name req complete event before
2732          * checking for the need to do authentication */
2733         if (!status)
2734                 return;
2735
2736         cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2737         if (!cp)
2738                 return;
2739
2740         hci_dev_lock(hdev);
2741
2742         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2743
2744         if (hci_dev_test_flag(hdev, HCI_MGMT))
2745                 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2746
2747         if (!conn)
2748                 goto unlock;
2749
2750         if (!hci_outgoing_auth_needed(hdev, conn))
2751                 goto unlock;
2752
2753         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2754                 struct hci_cp_auth_requested auth_cp;
2755
2756                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2757
2758                 auth_cp.handle = __cpu_to_le16(conn->handle);
2759                 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2760                              sizeof(auth_cp), &auth_cp);
2761         }
2762
2763 unlock:
2764         hci_dev_unlock(hdev);
2765 }
2766
2767 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2768 {
2769         struct hci_cp_read_remote_features *cp;
2770         struct hci_conn *conn;
2771
2772         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2773
2774         if (!status)
2775                 return;
2776
2777         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2778         if (!cp)
2779                 return;
2780
2781         hci_dev_lock(hdev);
2782
2783         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2784         if (conn) {
2785                 if (conn->state == BT_CONFIG) {
2786                         hci_connect_cfm(conn, status);
2787                         hci_conn_drop(conn);
2788                 }
2789         }
2790
2791         hci_dev_unlock(hdev);
2792 }
2793
2794 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2795 {
2796         struct hci_cp_read_remote_ext_features *cp;
2797         struct hci_conn *conn;
2798
2799         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2800
2801         if (!status)
2802                 return;
2803
2804         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2805         if (!cp)
2806                 return;
2807
2808         hci_dev_lock(hdev);
2809
2810         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2811         if (conn) {
2812                 if (conn->state == BT_CONFIG) {
2813                         hci_connect_cfm(conn, status);
2814                         hci_conn_drop(conn);
2815                 }
2816         }
2817
2818         hci_dev_unlock(hdev);
2819 }
2820
2821 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2822 {
2823         struct hci_cp_setup_sync_conn *cp;
2824         struct hci_conn *acl, *sco;
2825         __u16 handle;
2826
2827         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2828
2829         if (!status)
2830                 return;
2831
2832         cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2833         if (!cp)
2834                 return;
2835
2836         handle = __le16_to_cpu(cp->handle);
2837
2838         bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2839
2840         hci_dev_lock(hdev);
2841
2842         acl = hci_conn_hash_lookup_handle(hdev, handle);
2843         if (acl) {
2844                 sco = acl->link;
2845                 if (sco) {
2846                         sco->state = BT_CLOSED;
2847
2848                         hci_connect_cfm(sco, status);
2849                         hci_conn_del(sco);
2850                 }
2851         }
2852
2853         hci_dev_unlock(hdev);
2854 }
2855
2856 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2857 {
2858         struct hci_cp_enhanced_setup_sync_conn *cp;
2859         struct hci_conn *acl, *sco;
2860         __u16 handle;
2861
2862         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2863
2864         if (!status)
2865                 return;
2866
2867         cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2868         if (!cp)
2869                 return;
2870
2871         handle = __le16_to_cpu(cp->handle);
2872
2873         bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2874
2875         hci_dev_lock(hdev);
2876
2877         acl = hci_conn_hash_lookup_handle(hdev, handle);
2878         if (acl) {
2879                 sco = acl->link;
2880                 if (sco) {
2881                         sco->state = BT_CLOSED;
2882
2883                         hci_connect_cfm(sco, status);
2884                         hci_conn_del(sco);
2885                 }
2886         }
2887
2888         hci_dev_unlock(hdev);
2889 }
2890
2891 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2892 {
2893         struct hci_cp_sniff_mode *cp;
2894         struct hci_conn *conn;
2895
2896         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2897
2898         if (!status)
2899                 return;
2900
2901         cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2902         if (!cp)
2903                 return;
2904
2905         hci_dev_lock(hdev);
2906
2907         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2908         if (conn) {
2909                 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2910
2911                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2912                         hci_sco_setup(conn, status);
2913         }
2914
2915         hci_dev_unlock(hdev);
2916 }
2917
2918 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2919 {
2920         struct hci_cp_exit_sniff_mode *cp;
2921         struct hci_conn *conn;
2922
2923         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2924
2925         if (!status)
2926                 return;
2927
2928         cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2929         if (!cp)
2930                 return;
2931
2932         hci_dev_lock(hdev);
2933
2934         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2935         if (conn) {
2936                 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2937
2938                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2939                         hci_sco_setup(conn, status);
2940         }
2941
2942         hci_dev_unlock(hdev);
2943 }
2944
2945 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2946 {
2947         struct hci_cp_disconnect *cp;
2948         struct hci_conn_params *params;
2949         struct hci_conn *conn;
2950         bool mgmt_conn;
2951
2952         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2953
2954         /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2955          * otherwise cleanup the connection immediately.
2956          */
2957         if (!status && !hdev->suspended)
2958                 return;
2959
2960         cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2961         if (!cp)
2962                 return;
2963
2964         hci_dev_lock(hdev);
2965
2966         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2967         if (!conn)
2968                 goto unlock;
2969
2970         if (status) {
2971                 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2972                                        conn->dst_type, status);
2973
2974                 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2975                         hdev->cur_adv_instance = conn->adv_instance;
2976                         hci_enable_advertising(hdev);
2977                 }
2978
2979                 goto done;
2980         }
2981
2982         mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2983
2984         if (conn->type == ACL_LINK) {
2985                 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2986                         hci_remove_link_key(hdev, &conn->dst);
2987         }
2988
2989         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2990         if (params) {
2991                 switch (params->auto_connect) {
2992                 case HCI_AUTO_CONN_LINK_LOSS:
2993                         if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2994                                 break;
2995                         fallthrough;
2996
2997                 case HCI_AUTO_CONN_DIRECT:
2998                 case HCI_AUTO_CONN_ALWAYS:
2999                         list_del_init(&params->action);
3000                         list_add(&params->action, &hdev->pend_le_conns);
3001                         break;
3002
3003                 default:
3004                         break;
3005                 }
3006         }
3007
3008         mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3009                                  cp->reason, mgmt_conn);
3010
3011         hci_disconn_cfm(conn, cp->reason);
3012
3013 done:
3014         /* If the disconnection failed for any reason, the upper layer
3015          * does not retry to disconnect in current implementation.
3016          * Hence, we need to do some basic cleanup here and re-enable
3017          * advertising if necessary.
3018          */
3019         hci_conn_del(conn);
3020 unlock:
3021         hci_dev_unlock(hdev);
3022 }
3023
3024 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
3025 {
3026         /* When using controller based address resolution, then the new
3027          * address types 0x02 and 0x03 are used. These types need to be
3028          * converted back into either public address or random address type
3029          */
3030         switch (type) {
3031         case ADDR_LE_DEV_PUBLIC_RESOLVED:
3032                 if (resolved)
3033                         *resolved = true;
3034                 return ADDR_LE_DEV_PUBLIC;
3035         case ADDR_LE_DEV_RANDOM_RESOLVED:
3036                 if (resolved)
3037                         *resolved = true;
3038                 return ADDR_LE_DEV_RANDOM;
3039         }
3040
3041         if (resolved)
3042                 *resolved = false;
3043         return type;
3044 }
3045
3046 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
3047                               u8 peer_addr_type, u8 own_address_type,
3048                               u8 filter_policy)
3049 {
3050         struct hci_conn *conn;
3051
3052         conn = hci_conn_hash_lookup_le(hdev, peer_addr,
3053                                        peer_addr_type);
3054         if (!conn)
3055                 return;
3056
3057         own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
3058
3059         /* Store the initiator and responder address information which
3060          * is needed for SMP. These values will not change during the
3061          * lifetime of the connection.
3062          */
3063         conn->init_addr_type = own_address_type;
3064         if (own_address_type == ADDR_LE_DEV_RANDOM)
3065                 bacpy(&conn->init_addr, &hdev->random_addr);
3066         else
3067                 bacpy(&conn->init_addr, &hdev->bdaddr);
3068
3069         conn->resp_addr_type = peer_addr_type;
3070         bacpy(&conn->resp_addr, peer_addr);
3071 }
3072
3073 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
3074 {
3075         struct hci_cp_le_create_conn *cp;
3076
3077         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3078
3079         /* All connection failure handling is taken care of by the
3080          * hci_conn_failed function which is triggered by the HCI
3081          * request completion callbacks used for connecting.
3082          */
3083         if (status)
3084                 return;
3085
3086         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
3087         if (!cp)
3088                 return;
3089
3090         hci_dev_lock(hdev);
3091
3092         cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
3093                           cp->own_address_type, cp->filter_policy);
3094
3095         hci_dev_unlock(hdev);
3096 }
3097
3098 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
3099 {
3100         struct hci_cp_le_ext_create_conn *cp;
3101
3102         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3103
3104         /* All connection failure handling is taken care of by the
3105          * hci_conn_failed function which is triggered by the HCI
3106          * request completion callbacks used for connecting.
3107          */
3108         if (status)
3109                 return;
3110
3111         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
3112         if (!cp)
3113                 return;
3114
3115         hci_dev_lock(hdev);
3116
3117         cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
3118                           cp->own_addr_type, cp->filter_policy);
3119
3120         hci_dev_unlock(hdev);
3121 }
3122
3123 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
3124 {
3125         struct hci_cp_le_read_remote_features *cp;
3126         struct hci_conn *conn;
3127
3128         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3129
3130         if (!status)
3131                 return;
3132
3133         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
3134         if (!cp)
3135                 return;
3136
3137         hci_dev_lock(hdev);
3138
3139         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3140         if (conn) {
3141                 if (conn->state == BT_CONFIG) {
3142                         hci_connect_cfm(conn, status);
3143                         hci_conn_drop(conn);
3144                 }
3145         }
3146
3147         hci_dev_unlock(hdev);
3148 }
3149
3150 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
3151 {
3152         struct hci_cp_le_start_enc *cp;
3153         struct hci_conn *conn;
3154
3155         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3156
3157         if (!status)
3158                 return;
3159
3160         hci_dev_lock(hdev);
3161
3162         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
3163         if (!cp)
3164                 goto unlock;
3165
3166         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3167         if (!conn)
3168                 goto unlock;
3169
3170         if (conn->state != BT_CONNECTED)
3171                 goto unlock;
3172
3173         hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3174         hci_conn_drop(conn);
3175
3176 unlock:
3177         hci_dev_unlock(hdev);
3178 }
3179
3180 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
3181 {
3182         struct hci_cp_switch_role *cp;
3183         struct hci_conn *conn;
3184
3185         BT_DBG("%s status 0x%2.2x", hdev->name, status);
3186
3187         if (!status)
3188                 return;
3189
3190         cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
3191         if (!cp)
3192                 return;
3193
3194         hci_dev_lock(hdev);
3195
3196         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
3197         if (conn)
3198                 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3199
3200         hci_dev_unlock(hdev);
3201 }
3202
3203 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
3204                                      struct sk_buff *skb)
3205 {
3206         struct hci_ev_status *ev = data;
3207         struct discovery_state *discov = &hdev->discovery;
3208         struct inquiry_entry *e;
3209
3210         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3211
3212         hci_conn_check_pending(hdev);
3213
3214         if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
3215                 return;
3216
3217         smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
3218         wake_up_bit(&hdev->flags, HCI_INQUIRY);
3219
3220         if (!hci_dev_test_flag(hdev, HCI_MGMT))
3221                 return;
3222
3223         hci_dev_lock(hdev);
3224
3225         if (discov->state != DISCOVERY_FINDING)
3226                 goto unlock;
3227
3228         if (list_empty(&discov->resolve)) {
3229                 /* When BR/EDR inquiry is active and no LE scanning is in
3230                  * progress, then change discovery state to indicate completion.
3231                  *
3232                  * When running LE scanning and BR/EDR inquiry simultaneously
3233                  * and the LE scan already finished, then change the discovery
3234                  * state to indicate completion.
3235                  */
3236                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3237                     !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3238                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3239                 goto unlock;
3240         }
3241
3242         e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3243         if (e && hci_resolve_name(hdev, e) == 0) {
3244                 e->name_state = NAME_PENDING;
3245                 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3246                 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3247         } else {
3248                 /* When BR/EDR inquiry is active and no LE scanning is in
3249                  * progress, then change discovery state to indicate completion.
3250                  *
3251                  * When running LE scanning and BR/EDR inquiry simultaneously
3252                  * and the LE scan already finished, then change the discovery
3253                  * state to indicate completion.
3254                  */
3255                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3256                     !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3257                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3258         }
3259
3260 unlock:
3261         hci_dev_unlock(hdev);
3262 }
3263
3264 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3265                                    struct sk_buff *skb)
3266 {
3267         struct hci_ev_inquiry_result *ev = edata;
3268         struct inquiry_data data;
3269         int i;
3270
3271         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3272                              flex_array_size(ev, info, ev->num)))
3273                 return;
3274
3275         bt_dev_dbg(hdev, "num %d", ev->num);
3276
3277         if (!ev->num)
3278                 return;
3279
3280         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3281                 return;
3282
3283         hci_dev_lock(hdev);
3284
3285         for (i = 0; i < ev->num; i++) {
3286                 struct inquiry_info *info = &ev->info[i];
3287                 u32 flags;
3288
3289                 bacpy(&data.bdaddr, &info->bdaddr);
3290                 data.pscan_rep_mode     = info->pscan_rep_mode;
3291                 data.pscan_period_mode  = info->pscan_period_mode;
3292                 data.pscan_mode         = info->pscan_mode;
3293                 memcpy(data.dev_class, info->dev_class, 3);
3294                 data.clock_offset       = info->clock_offset;
3295                 data.rssi               = HCI_RSSI_INVALID;
3296                 data.ssp_mode           = 0x00;
3297
3298                 flags = hci_inquiry_cache_update(hdev, &data, false);
3299
3300                 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3301                                   info->dev_class, HCI_RSSI_INVALID,
3302                                   flags, NULL, 0, NULL, 0, 0);
3303         }
3304
3305         hci_dev_unlock(hdev);
3306 }
3307
3308 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3309                                   struct sk_buff *skb)
3310 {
3311         struct hci_ev_conn_complete *ev = data;
3312         struct hci_conn *conn;
3313         u8 status = ev->status;
3314
3315         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3316
3317         hci_dev_lock(hdev);
3318
3319         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3320         if (!conn) {
3321                 /* In case of error status and there is no connection pending
3322                  * just unlock as there is nothing to cleanup.
3323                  */
3324                 if (ev->status)
3325                         goto unlock;
3326
3327                 /* Connection may not exist if auto-connected. Check the bredr
3328                  * allowlist to see if this device is allowed to auto connect.
3329                  * If link is an ACL type, create a connection class
3330                  * automatically.
3331                  *
3332                  * Auto-connect will only occur if the event filter is
3333                  * programmed with a given address. Right now, event filter is
3334                  * only used during suspend.
3335                  */
3336                 if (ev->link_type == ACL_LINK &&
3337                     hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3338                                                       &ev->bdaddr,
3339                                                       BDADDR_BREDR)) {
3340                         conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3341                                             HCI_ROLE_SLAVE);
3342                         if (!conn) {
3343                                 bt_dev_err(hdev, "no memory for new conn");
3344                                 goto unlock;
3345                         }
3346                 } else {
3347                         if (ev->link_type != SCO_LINK)
3348                                 goto unlock;
3349
3350                         conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3351                                                        &ev->bdaddr);
3352                         if (!conn)
3353                                 goto unlock;
3354
3355                         conn->type = SCO_LINK;
3356                 }
3357         }
3358
3359         /* The HCI_Connection_Complete event is only sent once per connection.
3360          * Processing it more than once per connection can corrupt kernel memory.
3361          *
3362          * As the connection handle is set here for the first time, it indicates
3363          * whether the connection is already set up.
3364          */
3365         if (conn->handle != HCI_CONN_HANDLE_UNSET) {
3366                 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3367                 goto unlock;
3368         }
3369
3370         if (!status) {
3371                 conn->handle = __le16_to_cpu(ev->handle);
3372                 if (conn->handle > HCI_CONN_HANDLE_MAX) {
3373                         bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
3374                                    conn->handle, HCI_CONN_HANDLE_MAX);
3375                         status = HCI_ERROR_INVALID_PARAMETERS;
3376                         goto done;
3377                 }
3378
3379                 if (conn->type == ACL_LINK) {
3380                         conn->state = BT_CONFIG;
3381                         hci_conn_hold(conn);
3382
3383                         if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3384                             !hci_find_link_key(hdev, &ev->bdaddr))
3385                                 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3386                         else
3387                                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3388                 } else
3389                         conn->state = BT_CONNECTED;
3390
3391                 hci_debugfs_create_conn(conn);
3392                 hci_conn_add_sysfs(conn);
3393
3394                 if (test_bit(HCI_AUTH, &hdev->flags))
3395                         set_bit(HCI_CONN_AUTH, &conn->flags);
3396
3397                 if (test_bit(HCI_ENCRYPT, &hdev->flags))
3398                         set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3399
3400                 /* Get remote features */
3401                 if (conn->type == ACL_LINK) {
3402                         struct hci_cp_read_remote_features cp;
3403                         cp.handle = ev->handle;
3404                         hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3405                                      sizeof(cp), &cp);
3406
3407                         hci_update_scan(hdev);
3408                 }
3409
3410                 /* Set packet type for incoming connection */
3411                 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3412                         struct hci_cp_change_conn_ptype cp;
3413                         cp.handle = ev->handle;
3414                         cp.pkt_type = cpu_to_le16(conn->pkt_type);
3415                         hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3416                                      &cp);
3417                 }
3418
3419 #ifdef TIZEN_BT
3420                 if (get_link_mode(conn) & HCI_LM_MASTER)
3421                         hci_conn_change_supervision_timeout(conn,
3422                                         LINK_SUPERVISION_TIMEOUT);
3423 #endif
3424         }
3425
3426         if (conn->type == ACL_LINK)
3427                 hci_sco_setup(conn, ev->status);
3428
3429 done:
3430         if (status) {
3431                 hci_conn_failed(conn, status);
3432         } else if (ev->link_type == SCO_LINK) {
3433                 switch (conn->setting & SCO_AIRMODE_MASK) {
3434                 case SCO_AIRMODE_CVSD:
3435                         if (hdev->notify)
3436                                 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3437                         break;
3438                 }
3439
3440                 hci_connect_cfm(conn, status);
3441         }
3442
3443 unlock:
3444         hci_dev_unlock(hdev);
3445
3446         hci_conn_check_pending(hdev);
3447 }
3448
3449 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3450 {
3451         struct hci_cp_reject_conn_req cp;
3452
3453         bacpy(&cp.bdaddr, bdaddr);
3454         cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3455         hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3456 }
3457
3458 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3459                                  struct sk_buff *skb)
3460 {
3461         struct hci_ev_conn_request *ev = data;
3462         int mask = hdev->link_mode;
3463         struct inquiry_entry *ie;
3464         struct hci_conn *conn;
3465         __u8 flags = 0;
3466
3467         bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3468
3469         mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3470                                       &flags);
3471
3472         if (!(mask & HCI_LM_ACCEPT)) {
3473                 hci_reject_conn(hdev, &ev->bdaddr);
3474                 return;
3475         }
3476
3477         hci_dev_lock(hdev);
3478
3479         if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3480                                    BDADDR_BREDR)) {
3481                 hci_reject_conn(hdev, &ev->bdaddr);
3482                 goto unlock;
3483         }
3484
3485         /* Require HCI_CONNECTABLE or an accept list entry to accept the
3486          * connection. These features are only touched through mgmt so
3487          * only do the checks if HCI_MGMT is set.
3488          */
3489         if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3490             !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3491             !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3492                                                BDADDR_BREDR)) {
3493                 hci_reject_conn(hdev, &ev->bdaddr);
3494                 goto unlock;
3495         }
3496
3497         /* Connection accepted */
3498
3499         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3500         if (ie)
3501                 memcpy(ie->data.dev_class, ev->dev_class, 3);
3502
3503 #ifdef TIZEN_BT
3504                 if ((ev->link_type == SCO_LINK || ev->link_type == ESCO_LINK) &&
3505                     hci_conn_hash_lookup_sco(hdev)) {
3506                         struct hci_cp_reject_conn_req cp;
3507
3508                         bacpy(&cp.bdaddr, &ev->bdaddr);
3509                         cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
3510                         hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ,
3511                                      sizeof(cp), &cp);
3512                         hci_dev_unlock(hdev);
3513                         return;
3514                 }
3515 #endif
3516
3517         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3518                         &ev->bdaddr);
3519         if (!conn) {
3520                 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3521                                     HCI_ROLE_SLAVE);
3522                 if (!conn) {
3523                         bt_dev_err(hdev, "no memory for new connection");
3524                         goto unlock;
3525                 }
3526         }
3527
3528         memcpy(conn->dev_class, ev->dev_class, 3);
3529
3530         hci_dev_unlock(hdev);
3531
3532         if (ev->link_type == ACL_LINK ||
3533             (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3534                 struct hci_cp_accept_conn_req cp;
3535                 conn->state = BT_CONNECT;
3536
3537                 bacpy(&cp.bdaddr, &ev->bdaddr);
3538
3539                 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3540                         cp.role = 0x00; /* Become central */
3541                 else
3542                         cp.role = 0x01; /* Remain peripheral */
3543
3544                 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3545         } else if (!(flags & HCI_PROTO_DEFER)) {
3546                 struct hci_cp_accept_sync_conn_req cp;
3547                 conn->state = BT_CONNECT;
3548
3549                 bacpy(&cp.bdaddr, &ev->bdaddr);
3550                 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3551
3552                 cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
3553                 cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
3554                 cp.max_latency    = cpu_to_le16(0xffff);
3555                 cp.content_format = cpu_to_le16(hdev->voice_setting);
3556                 cp.retrans_effort = 0xff;
3557
3558                 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3559                              &cp);
3560         } else {
3561                 conn->state = BT_CONNECT2;
3562                 hci_connect_cfm(conn, 0);
3563         }
3564
3565         return;
3566 unlock:
3567         hci_dev_unlock(hdev);
3568 }
3569
3570 static u8 hci_to_mgmt_reason(u8 err)
3571 {
3572         switch (err) {
3573         case HCI_ERROR_CONNECTION_TIMEOUT:
3574                 return MGMT_DEV_DISCONN_TIMEOUT;
3575         case HCI_ERROR_REMOTE_USER_TERM:
3576         case HCI_ERROR_REMOTE_LOW_RESOURCES:
3577         case HCI_ERROR_REMOTE_POWER_OFF:
3578                 return MGMT_DEV_DISCONN_REMOTE;
3579         case HCI_ERROR_LOCAL_HOST_TERM:
3580                 return MGMT_DEV_DISCONN_LOCAL_HOST;
3581         default:
3582                 return MGMT_DEV_DISCONN_UNKNOWN;
3583         }
3584 }
3585
3586 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3587                                      struct sk_buff *skb)
3588 {
3589         struct hci_ev_disconn_complete *ev = data;
3590         u8 reason;
3591         struct hci_conn_params *params;
3592         struct hci_conn *conn;
3593         bool mgmt_connected;
3594
3595         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3596
3597         hci_dev_lock(hdev);
3598
3599         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3600         if (!conn)
3601                 goto unlock;
3602
3603         if (ev->status) {
3604                 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3605                                        conn->dst_type, ev->status);
3606                 goto unlock;
3607         }
3608
3609         conn->state = BT_CLOSED;
3610
3611         mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3612
3613         if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3614                 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3615         else
3616                 reason = hci_to_mgmt_reason(ev->reason);
3617
3618         mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3619                                 reason, mgmt_connected);
3620
3621         if (conn->type == ACL_LINK) {
3622                 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3623                         hci_remove_link_key(hdev, &conn->dst);
3624
3625                 hci_update_scan(hdev);
3626         }
3627
3628         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3629         if (params) {
3630                 switch (params->auto_connect) {
3631                 case HCI_AUTO_CONN_LINK_LOSS:
3632                         if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3633                                 break;
3634                         fallthrough;
3635
3636                 case HCI_AUTO_CONN_DIRECT:
3637                 case HCI_AUTO_CONN_ALWAYS:
3638                         list_del_init(&params->action);
3639                         list_add(&params->action, &hdev->pend_le_conns);
3640                         hci_update_passive_scan(hdev);
3641                         break;
3642
3643                 default:
3644                         break;
3645                 }
3646         }
3647
3648         hci_disconn_cfm(conn, ev->reason);
3649
3650         /* Re-enable advertising if necessary, since it might
3651          * have been disabled by the connection. From the
3652          * HCI_LE_Set_Advertise_Enable command description in
3653          * the core specification (v4.0):
3654          * "The Controller shall continue advertising until the Host
3655          * issues an LE_Set_Advertise_Enable command with
3656          * Advertising_Enable set to 0x00 (Advertising is disabled)
3657          * or until a connection is created or until the Advertising
3658          * is timed out due to Directed Advertising."
3659          */
3660         if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3661                 hdev->cur_adv_instance = conn->adv_instance;
3662                 hci_enable_advertising(hdev);
3663         }
3664
3665         hci_conn_del(conn);
3666
3667 #ifdef TIZEN_BT
3668         if (type == ACL_LINK && !hci_conn_num(hdev, ACL_LINK)) {
3669                 int iscan;
3670                 int pscan;
3671
3672                 iscan = test_bit(HCI_ISCAN, &hdev->flags);
3673                 pscan = test_bit(HCI_PSCAN, &hdev->flags);
3674                 if (!iscan && !pscan) {
3675                         u8 scan_enable = SCAN_PAGE;
3676
3677                         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE,
3678                                      sizeof(scan_enable), &scan_enable);
3679                 }
3680         }
3681 #endif
3682
3683 unlock:
3684         hci_dev_unlock(hdev);
3685 }
3686
3687 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3688                                   struct sk_buff *skb)
3689 {
3690         struct hci_ev_auth_complete *ev = data;
3691         struct hci_conn *conn;
3692
3693         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3694
3695         hci_dev_lock(hdev);
3696
3697         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3698         if (!conn)
3699                 goto unlock;
3700
3701 #ifdef TIZEN_BT
3702         /*  PIN or Key Missing patch */
3703         BT_DBG("remote_auth %x, remote_cap %x, auth_type %x, io_capability %x",
3704                conn->remote_auth, conn->remote_cap,
3705                conn->auth_type, conn->io_capability);
3706
3707         if (ev->status == 0x06 && hci_conn_ssp_enabled(conn)) {
3708                 struct hci_cp_auth_requested cp;
3709
3710                 BT_DBG("Pin or key missing");
3711                 hci_remove_link_key(hdev, &conn->dst);
3712                 cp.handle = cpu_to_le16(conn->handle);
3713                 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
3714                              sizeof(cp), &cp);
3715                 goto unlock;
3716         }
3717 #endif
3718
3719         if (!ev->status) {
3720                 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3721
3722                 if (!hci_conn_ssp_enabled(conn) &&
3723                     test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
3724                         bt_dev_info(hdev, "re-auth of legacy device is not possible.");
3725                 } else {
3726                         set_bit(HCI_CONN_AUTH, &conn->flags);
3727                         conn->sec_level = conn->pending_sec_level;
3728                 }
3729         } else {
3730                 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3731                         set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3732
3733                 mgmt_auth_failed(conn, ev->status);
3734         }
3735
3736         clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3737         clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3738
3739         if (conn->state == BT_CONFIG) {
3740                 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3741                         struct hci_cp_set_conn_encrypt cp;
3742                         cp.handle  = ev->handle;
3743                         cp.encrypt = 0x01;
3744                         hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3745                                      &cp);
3746                 } else {
3747                         conn->state = BT_CONNECTED;
3748                         hci_connect_cfm(conn, ev->status);
3749                         hci_conn_drop(conn);
3750                 }
3751         } else {
3752                 hci_auth_cfm(conn, ev->status);
3753
3754                 hci_conn_hold(conn);
3755                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3756                 hci_conn_drop(conn);
3757         }
3758
3759         if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3760                 if (!ev->status) {
3761                         struct hci_cp_set_conn_encrypt cp;
3762                         cp.handle  = ev->handle;
3763                         cp.encrypt = 0x01;
3764                         hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3765                                      &cp);
3766                 } else {
3767                         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3768                         hci_encrypt_cfm(conn, ev->status);
3769                 }
3770         }
3771
3772 unlock:
3773         hci_dev_unlock(hdev);
3774 }
3775
3776 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3777                                 struct sk_buff *skb)
3778 {
3779         struct hci_ev_remote_name *ev = data;
3780         struct hci_conn *conn;
3781
3782         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3783
3784         hci_conn_check_pending(hdev);
3785
3786         hci_dev_lock(hdev);
3787
3788         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3789
3790         if (!hci_dev_test_flag(hdev, HCI_MGMT))
3791                 goto check_auth;
3792
3793         if (ev->status == 0)
3794                 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3795                                        strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3796         else
3797                 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3798
3799 check_auth:
3800         if (!conn)
3801                 goto unlock;
3802
3803         if (!hci_outgoing_auth_needed(hdev, conn))
3804                 goto unlock;
3805
3806         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3807                 struct hci_cp_auth_requested cp;
3808
3809                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3810
3811                 cp.handle = __cpu_to_le16(conn->handle);
3812                 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3813         }
3814
3815 unlock:
3816         hci_dev_unlock(hdev);
3817 }
3818
3819 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3820                                    struct sk_buff *skb)
3821 {
3822         struct hci_ev_encrypt_change *ev = data;
3823         struct hci_conn *conn;
3824
3825         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3826
3827         hci_dev_lock(hdev);
3828
3829         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3830         if (!conn)
3831                 goto unlock;
3832
3833         if (!ev->status) {
3834                 if (ev->encrypt) {
3835                         /* Encryption implies authentication */
3836                         set_bit(HCI_CONN_AUTH, &conn->flags);
3837                         set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3838                         conn->sec_level = conn->pending_sec_level;
3839
3840                         /* P-256 authentication key implies FIPS */
3841                         if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3842                                 set_bit(HCI_CONN_FIPS, &conn->flags);
3843
3844                         if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3845                             conn->type == LE_LINK)
3846                                 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3847                 } else {
3848                         clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3849                         clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3850                 }
3851         }
3852
3853         /* We should disregard the current RPA and generate a new one
3854          * whenever the encryption procedure fails.
3855          */
3856         if (ev->status && conn->type == LE_LINK) {
3857                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3858                 hci_adv_instances_set_rpa_expired(hdev, true);
3859         }
3860
3861         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3862
3863         /* Check link security requirements are met */
3864         if (!hci_conn_check_link_mode(conn))
3865                 ev->status = HCI_ERROR_AUTH_FAILURE;
3866
3867         if (ev->status && conn->state == BT_CONNECTED) {
3868                 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3869                         set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3870
3871                 /* Notify upper layers so they can cleanup before
3872                  * disconnecting.
3873                  */
3874                 hci_encrypt_cfm(conn, ev->status);
3875                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3876                 hci_conn_drop(conn);
3877                 goto unlock;
3878         }
3879
3880         /* Try reading the encryption key size for encrypted ACL links */
3881         if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3882                 struct hci_cp_read_enc_key_size cp;
3883
3884                 /* Only send HCI_Read_Encryption_Key_Size if the
3885                  * controller really supports it. If it doesn't, assume
3886                  * the default size (16).
3887                  */
3888                 if (!(hdev->commands[20] & 0x10)) {
3889                         conn->enc_key_size = HCI_LINK_KEY_SIZE;
3890                         goto notify;
3891                 }
3892
3893                 cp.handle = cpu_to_le16(conn->handle);
3894                 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3895                                  sizeof(cp), &cp)) {
3896                         bt_dev_err(hdev, "sending read key size failed");
3897                         conn->enc_key_size = HCI_LINK_KEY_SIZE;
3898                         goto notify;
3899                 }
3900
3901                 goto unlock;
3902         }
3903
3904         /* Set the default Authenticated Payload Timeout after
3905          * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3906          * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3907          * sent when the link is active and Encryption is enabled, the conn
3908          * type can be either LE or ACL and controller must support LMP Ping.
3909          * Ensure for AES-CCM encryption as well.
3910          */
3911         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3912             test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3913             ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3914              (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3915                 struct hci_cp_write_auth_payload_to cp;
3916
3917                 cp.handle = cpu_to_le16(conn->handle);
3918                 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3919                 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3920                              sizeof(cp), &cp);
3921         }
3922
3923 notify:
3924         hci_encrypt_cfm(conn, ev->status);
3925
3926 unlock:
3927         hci_dev_unlock(hdev);
3928 }
3929
3930 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3931                                              struct sk_buff *skb)
3932 {
3933         struct hci_ev_change_link_key_complete *ev = data;
3934         struct hci_conn *conn;
3935
3936         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3937
3938         hci_dev_lock(hdev);
3939
3940         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3941         if (conn) {
3942                 if (!ev->status)
3943                         set_bit(HCI_CONN_SECURE, &conn->flags);
3944
3945                 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3946
3947                 hci_key_change_cfm(conn, ev->status);
3948         }
3949
3950         hci_dev_unlock(hdev);
3951 }
3952
3953 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3954                                     struct sk_buff *skb)
3955 {
3956         struct hci_ev_remote_features *ev = data;
3957         struct hci_conn *conn;
3958
3959         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3960
3961         hci_dev_lock(hdev);
3962
3963         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3964         if (!conn)
3965                 goto unlock;
3966
3967         if (!ev->status)
3968                 memcpy(conn->features[0], ev->features, 8);
3969
3970         if (conn->state != BT_CONFIG)
3971                 goto unlock;
3972
3973         if (!ev->status && lmp_ext_feat_capable(hdev) &&
3974             lmp_ext_feat_capable(conn)) {
3975                 struct hci_cp_read_remote_ext_features cp;
3976                 cp.handle = ev->handle;
3977                 cp.page = 0x01;
3978                 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3979                              sizeof(cp), &cp);
3980                 goto unlock;
3981         }
3982
3983         if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3984                 struct hci_cp_remote_name_req cp;
3985                 memset(&cp, 0, sizeof(cp));
3986                 bacpy(&cp.bdaddr, &conn->dst);
3987                 cp.pscan_rep_mode = 0x02;
3988                 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3989         } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3990                 mgmt_device_connected(hdev, conn, NULL, 0);
3991
3992         if (!hci_outgoing_auth_needed(hdev, conn)) {
3993                 conn->state = BT_CONNECTED;
3994                 hci_connect_cfm(conn, ev->status);
3995                 hci_conn_drop(conn);
3996         }
3997
3998 unlock:
3999         hci_dev_unlock(hdev);
4000 }
4001
4002 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
4003 {
4004         cancel_delayed_work(&hdev->cmd_timer);
4005
4006         rcu_read_lock();
4007         if (!test_bit(HCI_RESET, &hdev->flags)) {
4008                 if (ncmd) {
4009                         cancel_delayed_work(&hdev->ncmd_timer);
4010                         atomic_set(&hdev->cmd_cnt, 1);
4011                 } else {
4012                         if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4013                                 queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
4014                                                    HCI_NCMD_TIMEOUT);
4015                 }
4016         }
4017         rcu_read_unlock();
4018 }
4019
4020 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
4021                                         struct sk_buff *skb)
4022 {
4023         struct hci_rp_le_read_buffer_size_v2 *rp = data;
4024
4025         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4026
4027         if (rp->status)
4028                 return rp->status;
4029
4030         hdev->le_mtu   = __le16_to_cpu(rp->acl_mtu);
4031         hdev->le_pkts  = rp->acl_max_pkt;
4032         hdev->iso_mtu  = __le16_to_cpu(rp->iso_mtu);
4033         hdev->iso_pkts = rp->iso_max_pkt;
4034
4035         hdev->le_cnt  = hdev->le_pkts;
4036         hdev->iso_cnt = hdev->iso_pkts;
4037
4038         BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
4039                hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
4040
4041         return rp->status;
4042 }
4043
4044 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
4045                                    struct sk_buff *skb)
4046 {
4047         struct hci_rp_le_set_cig_params *rp = data;
4048         struct hci_conn *conn;
4049         int i = 0;
4050
4051         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4052
4053         hci_dev_lock(hdev);
4054
4055         if (rp->status) {
4056                 while ((conn = hci_conn_hash_lookup_cig(hdev, rp->cig_id))) {
4057                         conn->state = BT_CLOSED;
4058                         hci_connect_cfm(conn, rp->status);
4059                         hci_conn_del(conn);
4060                 }
4061                 goto unlock;
4062         }
4063
4064         rcu_read_lock();
4065
4066         list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
4067                 if (conn->type != ISO_LINK || conn->iso_qos.cig != rp->cig_id ||
4068                     conn->state == BT_CONNECTED)
4069                         continue;
4070
4071                 conn->handle = __le16_to_cpu(rp->handle[i++]);
4072
4073                 bt_dev_dbg(hdev, "%p handle 0x%4.4x link %p", conn,
4074                            conn->handle, conn->link);
4075
4076                 /* Create CIS if LE is already connected */
4077                 if (conn->link && conn->link->state == BT_CONNECTED) {
4078                         rcu_read_unlock();
4079                         hci_le_create_cis(conn->link);
4080                         rcu_read_lock();
4081                 }
4082
4083                 if (i == rp->num_handles)
4084                         break;
4085         }
4086
4087         rcu_read_unlock();
4088
4089 unlock:
4090         hci_dev_unlock(hdev);
4091
4092         return rp->status;
4093 }
4094
4095 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
4096                                    struct sk_buff *skb)
4097 {
4098         struct hci_rp_le_setup_iso_path *rp = data;
4099         struct hci_cp_le_setup_iso_path *cp;
4100         struct hci_conn *conn;
4101
4102         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4103
4104         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
4105         if (!cp)
4106                 return rp->status;
4107
4108         hci_dev_lock(hdev);
4109
4110         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
4111         if (!conn)
4112                 goto unlock;
4113
4114         if (rp->status) {
4115                 hci_connect_cfm(conn, rp->status);
4116                 hci_conn_del(conn);
4117                 goto unlock;
4118         }
4119
4120         switch (cp->direction) {
4121         /* Input (Host to Controller) */
4122         case 0x00:
4123                 /* Only confirm connection if output only */
4124                 if (conn->iso_qos.out.sdu && !conn->iso_qos.in.sdu)
4125                         hci_connect_cfm(conn, rp->status);
4126                 break;
4127         /* Output (Controller to Host) */
4128         case 0x01:
4129                 /* Confirm connection since conn->iso_qos is always configured
4130                  * last.
4131                  */
4132                 hci_connect_cfm(conn, rp->status);
4133                 break;
4134         }
4135
4136 unlock:
4137         hci_dev_unlock(hdev);
4138         return rp->status;
4139 }
4140
4141 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
4142 {
4143         bt_dev_dbg(hdev, "status 0x%2.2x", status);
4144 }
4145
4146 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
4147                                    struct sk_buff *skb)
4148 {
4149         struct hci_ev_status *rp = data;
4150         struct hci_cp_le_set_per_adv_params *cp;
4151
4152         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4153
4154         if (rp->status)
4155                 return rp->status;
4156
4157         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
4158         if (!cp)
4159                 return rp->status;
4160
4161         /* TODO: set the conn state */
4162         return rp->status;
4163 }
4164
4165 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
4166                                        struct sk_buff *skb)
4167 {
4168         struct hci_ev_status *rp = data;
4169         __u8 *sent;
4170
4171         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4172
4173         if (rp->status)
4174                 return rp->status;
4175
4176         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
4177         if (!sent)
4178                 return rp->status;
4179
4180         hci_dev_lock(hdev);
4181
4182         if (*sent)
4183                 hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
4184         else
4185                 hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
4186
4187         hci_dev_unlock(hdev);
4188
4189         return rp->status;
4190 }
4191
4192 #define HCI_CC_VL(_op, _func, _min, _max) \
4193 { \
4194         .op = _op, \
4195         .func = _func, \
4196         .min_len = _min, \
4197         .max_len = _max, \
4198 }
4199
4200 #define HCI_CC(_op, _func, _len) \
4201         HCI_CC_VL(_op, _func, _len, _len)
4202
4203 #define HCI_CC_STATUS(_op, _func) \
4204         HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4205
4206 static const struct hci_cc {
4207         u16  op;
4208         u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4209         u16  min_len;
4210         u16  max_len;
4211 } hci_cc_table[] = {
4212         HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4213         HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4214         HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4215         HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
4216                       hci_cc_remote_name_req_cancel),
4217         HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4218                sizeof(struct hci_rp_role_discovery)),
4219         HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4220                sizeof(struct hci_rp_read_link_policy)),
4221         HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4222                sizeof(struct hci_rp_write_link_policy)),
4223         HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4224                sizeof(struct hci_rp_read_def_link_policy)),
4225         HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4226                       hci_cc_write_def_link_policy),
4227         HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4228         HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4229                sizeof(struct hci_rp_read_stored_link_key)),
4230         HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4231                sizeof(struct hci_rp_delete_stored_link_key)),
4232         HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4233         HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4234                sizeof(struct hci_rp_read_local_name)),
4235         HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4236         HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4237         HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4238         HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4239         HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4240                sizeof(struct hci_rp_read_class_of_dev)),
4241         HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4242         HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4243                sizeof(struct hci_rp_read_voice_setting)),
4244         HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4245         HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4246                sizeof(struct hci_rp_read_num_supported_iac)),
4247         HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4248         HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4249         HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4250                sizeof(struct hci_rp_read_auth_payload_to)),
4251         HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4252                sizeof(struct hci_rp_write_auth_payload_to)),
4253         HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4254                sizeof(struct hci_rp_read_local_version)),
4255         HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4256                sizeof(struct hci_rp_read_local_commands)),
4257         HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4258                sizeof(struct hci_rp_read_local_features)),
4259         HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4260                sizeof(struct hci_rp_read_local_ext_features)),
4261         HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4262                sizeof(struct hci_rp_read_buffer_size)),
4263         HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4264                sizeof(struct hci_rp_read_bd_addr)),
4265         HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4266                sizeof(struct hci_rp_read_local_pairing_opts)),
4267         HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4268                sizeof(struct hci_rp_read_page_scan_activity)),
4269         HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4270                       hci_cc_write_page_scan_activity),
4271         HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4272                sizeof(struct hci_rp_read_page_scan_type)),
4273         HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4274         HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
4275                sizeof(struct hci_rp_read_data_block_size)),
4276         HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
4277                sizeof(struct hci_rp_read_flow_control_mode)),
4278         HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
4279                sizeof(struct hci_rp_read_local_amp_info)),
4280         HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4281                sizeof(struct hci_rp_read_clock)),
4282         HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4283                sizeof(struct hci_rp_read_enc_key_size)),
4284         HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4285                sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4286         HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4287                hci_cc_read_def_err_data_reporting,
4288                sizeof(struct hci_rp_read_def_err_data_reporting)),
4289         HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4290                       hci_cc_write_def_err_data_reporting),
4291         HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4292                sizeof(struct hci_rp_pin_code_reply)),
4293         HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4294                sizeof(struct hci_rp_pin_code_neg_reply)),
4295         HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4296                sizeof(struct hci_rp_read_local_oob_data)),
4297         HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4298                sizeof(struct hci_rp_read_local_oob_ext_data)),
4299         HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4300                sizeof(struct hci_rp_le_read_buffer_size)),
4301         HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4302                sizeof(struct hci_rp_le_read_local_features)),
4303         HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4304                sizeof(struct hci_rp_le_read_adv_tx_power)),
4305         HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4306                sizeof(struct hci_rp_user_confirm_reply)),
4307         HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4308                sizeof(struct hci_rp_user_confirm_reply)),
4309         HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4310                sizeof(struct hci_rp_user_confirm_reply)),
4311         HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4312                sizeof(struct hci_rp_user_confirm_reply)),
4313         HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4314         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4315         HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4316         HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4317         HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4318                hci_cc_le_read_accept_list_size,
4319                sizeof(struct hci_rp_le_read_accept_list_size)),
4320         HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4321         HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4322                       hci_cc_le_add_to_accept_list),
4323         HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4324                       hci_cc_le_del_from_accept_list),
4325         HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4326                sizeof(struct hci_rp_le_read_supported_states)),
4327         HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4328                sizeof(struct hci_rp_le_read_def_data_len)),
4329         HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4330                       hci_cc_le_write_def_data_len),
4331         HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4332                       hci_cc_le_add_to_resolv_list),
4333         HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4334                       hci_cc_le_del_from_resolv_list),
4335         HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4336                       hci_cc_le_clear_resolv_list),
4337         HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4338                sizeof(struct hci_rp_le_read_resolv_list_size)),
4339         HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4340                       hci_cc_le_set_addr_resolution_enable),
4341         HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4342                sizeof(struct hci_rp_le_read_max_data_len)),
4343         HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4344                       hci_cc_write_le_host_supported),
4345         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4346         HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4347                sizeof(struct hci_rp_read_rssi)),
4348         HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4349                sizeof(struct hci_rp_read_tx_power)),
4350         HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4351         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4352                       hci_cc_le_set_ext_scan_param),
4353         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4354                       hci_cc_le_set_ext_scan_enable),
4355         HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4356         HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4357                hci_cc_le_read_num_adv_sets,
4358                sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4359         HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4360                sizeof(struct hci_rp_le_set_ext_adv_params)),
4361         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4362                       hci_cc_le_set_ext_adv_enable),
4363         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4364                       hci_cc_le_set_adv_set_random_addr),
4365         HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4366         HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4367         HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4368         HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4369                       hci_cc_le_set_per_adv_enable),
4370         HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4371                sizeof(struct hci_rp_le_read_transmit_power)),
4372 #ifdef TIZEN_BT
4373         HCI_CC(HCI_OP_ENABLE_RSSI, hci_cc_enable_rssi,
4374                sizeof(struct hci_cc_rsp_enable_rssi)),
4375         HCI_CC(HCI_OP_GET_RAW_RSSI, hci_cc_get_raw_rssi,
4376                sizeof(struct hci_cc_rp_get_raw_rssi)),
4377 #endif
4378         HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4379         HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4380                sizeof(struct hci_rp_le_read_buffer_size_v2)),
4381         HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4382                   sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4383         HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4384                sizeof(struct hci_rp_le_setup_iso_path)),
4385 };
4386
4387 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4388                       struct sk_buff *skb)
4389 {
4390         void *data;
4391
4392         if (skb->len < cc->min_len) {
4393                 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4394                            cc->op, skb->len, cc->min_len);
4395                 return HCI_ERROR_UNSPECIFIED;
4396         }
4397
4398         /* Just warn if the length is over max_len size it still be possible to
4399          * partially parse the cc so leave to callback to decide if that is
4400          * acceptable.
4401          */
4402         if (skb->len > cc->max_len)
4403                 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4404                             cc->op, skb->len, cc->max_len);
4405
4406         data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4407         if (!data)
4408                 return HCI_ERROR_UNSPECIFIED;
4409
4410         return cc->func(hdev, data, skb);
4411 }
4412
4413 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4414                                  struct sk_buff *skb, u16 *opcode, u8 *status,
4415                                  hci_req_complete_t *req_complete,
4416                                  hci_req_complete_skb_t *req_complete_skb)
4417 {
4418         struct hci_ev_cmd_complete *ev = data;
4419         int i;
4420
4421         *opcode = __le16_to_cpu(ev->opcode);
4422
4423         bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4424
4425         for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4426                 if (hci_cc_table[i].op == *opcode) {
4427                         *status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4428                         break;
4429                 }
4430         }
4431
4432         if (i == ARRAY_SIZE(hci_cc_table)) {
4433                 /* Unknown opcode, assume byte 0 contains the status, so
4434                  * that e.g. __hci_cmd_sync() properly returns errors
4435                  * for vendor specific commands send by HCI drivers.
4436                  * If a vendor doesn't actually follow this convention we may
4437                  * need to introduce a vendor CC table in order to properly set
4438                  * the status.
4439                  */
4440                 *status = skb->data[0];
4441         }
4442
4443         handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4444
4445         hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4446                              req_complete_skb);
4447
4448         if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4449                 bt_dev_err(hdev,
4450                            "unexpected event for opcode 0x%4.4x", *opcode);
4451                 return;
4452         }
4453
4454         if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4455                 queue_work(hdev->workqueue, &hdev->cmd_work);
4456 }
4457
4458 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4459 {
4460         struct hci_cp_le_create_cis *cp;
4461         int i;
4462
4463         bt_dev_dbg(hdev, "status 0x%2.2x", status);
4464
4465         if (!status)
4466                 return;
4467
4468         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4469         if (!cp)
4470                 return;
4471
4472         hci_dev_lock(hdev);
4473
4474         /* Remove connection if command failed */
4475         for (i = 0; cp->num_cis; cp->num_cis--, i++) {
4476                 struct hci_conn *conn;
4477                 u16 handle;
4478
4479                 handle = __le16_to_cpu(cp->cis[i].cis_handle);
4480
4481                 conn = hci_conn_hash_lookup_handle(hdev, handle);
4482                 if (conn) {
4483                         conn->state = BT_CLOSED;
4484                         hci_connect_cfm(conn, status);
4485                         hci_conn_del(conn);
4486                 }
4487         }
4488
4489         hci_dev_unlock(hdev);
4490 }
4491
4492 #define HCI_CS(_op, _func) \
4493 { \
4494         .op = _op, \
4495         .func = _func, \
4496 }
4497
4498 static const struct hci_cs {
4499         u16  op;
4500         void (*func)(struct hci_dev *hdev, __u8 status);
4501 } hci_cs_table[] = {
4502         HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4503         HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4504         HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4505         HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4506         HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4507         HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4508         HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4509         HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4510         HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4511                hci_cs_read_remote_ext_features),
4512         HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4513         HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4514                hci_cs_enhanced_setup_sync_conn),
4515         HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4516         HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4517         HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4518         HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4519         HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4520         HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4521         HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4522         HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4523         HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4524 };
4525
4526 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4527                                struct sk_buff *skb, u16 *opcode, u8 *status,
4528                                hci_req_complete_t *req_complete,
4529                                hci_req_complete_skb_t *req_complete_skb)
4530 {
4531         struct hci_ev_cmd_status *ev = data;
4532         int i;
4533
4534         *opcode = __le16_to_cpu(ev->opcode);
4535         *status = ev->status;
4536
4537         bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4538
4539         for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4540                 if (hci_cs_table[i].op == *opcode) {
4541                         hci_cs_table[i].func(hdev, ev->status);
4542                         break;
4543                 }
4544         }
4545
4546         handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4547
4548         /* Indicate request completion if the command failed. Also, if
4549          * we're not waiting for a special event and we get a success
4550          * command status we should try to flag the request as completed
4551          * (since for this kind of commands there will not be a command
4552          * complete event).
4553          */
4554         if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
4555                 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4556                                      req_complete_skb);
4557                 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4558                         bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4559                                    *opcode);
4560                         return;
4561                 }
4562         }
4563
4564         if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4565                 queue_work(hdev->workqueue, &hdev->cmd_work);
4566 }
4567
4568 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4569                                    struct sk_buff *skb)
4570 {
4571         struct hci_ev_hardware_error *ev = data;
4572
4573         bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4574
4575 #ifdef TIZEN_BT
4576         hci_dev_lock(hdev);
4577         mgmt_hardware_error(hdev, ev->code);
4578         hci_dev_unlock(hdev);
4579 #endif
4580         hdev->hw_error_code = ev->code;
4581
4582         queue_work(hdev->req_workqueue, &hdev->error_reset);
4583 }
4584
4585 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4586                                 struct sk_buff *skb)
4587 {
4588         struct hci_ev_role_change *ev = data;
4589         struct hci_conn *conn;
4590
4591         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4592
4593         hci_dev_lock(hdev);
4594
4595         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4596         if (conn) {
4597                 if (!ev->status)
4598                         conn->role = ev->role;
4599
4600                 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4601
4602                 hci_role_switch_cfm(conn, ev->status, ev->role);
4603 #ifdef TIZEN_BT
4604                 if (!ev->status && (get_link_mode(conn) & HCI_LM_MASTER))
4605                         hci_conn_change_supervision_timeout(conn,
4606                                         LINK_SUPERVISION_TIMEOUT);
4607 #endif
4608         }
4609
4610         hci_dev_unlock(hdev);
4611 }
4612
4613 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4614                                   struct sk_buff *skb)
4615 {
4616         struct hci_ev_num_comp_pkts *ev = data;
4617         int i;
4618
4619         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4620                              flex_array_size(ev, handles, ev->num)))
4621                 return;
4622
4623         if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4624                 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4625                 return;
4626         }
4627
4628         bt_dev_dbg(hdev, "num %d", ev->num);
4629
4630         for (i = 0; i < ev->num; i++) {
4631                 struct hci_comp_pkts_info *info = &ev->handles[i];
4632                 struct hci_conn *conn;
4633                 __u16  handle, count;
4634
4635                 handle = __le16_to_cpu(info->handle);
4636                 count  = __le16_to_cpu(info->count);
4637
4638                 conn = hci_conn_hash_lookup_handle(hdev, handle);
4639                 if (!conn)
4640                         continue;
4641
4642                 conn->sent -= count;
4643
4644                 switch (conn->type) {
4645                 case ACL_LINK:
4646                         hdev->acl_cnt += count;
4647                         if (hdev->acl_cnt > hdev->acl_pkts)
4648                                 hdev->acl_cnt = hdev->acl_pkts;
4649                         break;
4650
4651                 case LE_LINK:
4652                         if (hdev->le_pkts) {
4653                                 hdev->le_cnt += count;
4654                                 if (hdev->le_cnt > hdev->le_pkts)
4655                                         hdev->le_cnt = hdev->le_pkts;
4656                         } else {
4657                                 hdev->acl_cnt += count;
4658                                 if (hdev->acl_cnt > hdev->acl_pkts)
4659                                         hdev->acl_cnt = hdev->acl_pkts;
4660                         }
4661                         break;
4662
4663                 case SCO_LINK:
4664                         hdev->sco_cnt += count;
4665                         if (hdev->sco_cnt > hdev->sco_pkts)
4666                                 hdev->sco_cnt = hdev->sco_pkts;
4667                         break;
4668
4669                 case ISO_LINK:
4670                         if (hdev->iso_pkts) {
4671                                 hdev->iso_cnt += count;
4672                                 if (hdev->iso_cnt > hdev->iso_pkts)
4673                                         hdev->iso_cnt = hdev->iso_pkts;
4674                         } else if (hdev->le_pkts) {
4675                                 hdev->le_cnt += count;
4676                                 if (hdev->le_cnt > hdev->le_pkts)
4677                                         hdev->le_cnt = hdev->le_pkts;
4678                         } else {
4679                                 hdev->acl_cnt += count;
4680                                 if (hdev->acl_cnt > hdev->acl_pkts)
4681                                         hdev->acl_cnt = hdev->acl_pkts;
4682                         }
4683                         break;
4684
4685                 default:
4686                         bt_dev_err(hdev, "unknown type %d conn %p",
4687                                    conn->type, conn);
4688                         break;
4689                 }
4690         }
4691
4692         queue_work(hdev->workqueue, &hdev->tx_work);
4693 }
4694
4695 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4696                                                  __u16 handle)
4697 {
4698         struct hci_chan *chan;
4699
4700         switch (hdev->dev_type) {
4701         case HCI_PRIMARY:
4702                 return hci_conn_hash_lookup_handle(hdev, handle);
4703         case HCI_AMP:
4704                 chan = hci_chan_lookup_handle(hdev, handle);
4705                 if (chan)
4706                         return chan->conn;
4707                 break;
4708         default:
4709                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4710                 break;
4711         }
4712
4713         return NULL;
4714 }
4715
4716 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
4717                                     struct sk_buff *skb)
4718 {
4719         struct hci_ev_num_comp_blocks *ev = data;
4720         int i;
4721
4722         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
4723                              flex_array_size(ev, handles, ev->num_hndl)))
4724                 return;
4725
4726         if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4727                 bt_dev_err(hdev, "wrong event for mode %d",
4728                            hdev->flow_ctl_mode);
4729                 return;
4730         }
4731
4732         bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
4733                    ev->num_hndl);
4734
4735         for (i = 0; i < ev->num_hndl; i++) {
4736                 struct hci_comp_blocks_info *info = &ev->handles[i];
4737                 struct hci_conn *conn = NULL;
4738                 __u16  handle, block_count;
4739
4740                 handle = __le16_to_cpu(info->handle);
4741                 block_count = __le16_to_cpu(info->blocks);
4742
4743                 conn = __hci_conn_lookup_handle(hdev, handle);
4744                 if (!conn)
4745                         continue;
4746
4747                 conn->sent -= block_count;
4748
4749                 switch (conn->type) {
4750                 case ACL_LINK:
4751                 case AMP_LINK:
4752                         hdev->block_cnt += block_count;
4753                         if (hdev->block_cnt > hdev->num_blocks)
4754                                 hdev->block_cnt = hdev->num_blocks;
4755                         break;
4756
4757                 default:
4758                         bt_dev_err(hdev, "unknown type %d conn %p",
4759                                    conn->type, conn);
4760                         break;
4761                 }
4762         }
4763
4764         queue_work(hdev->workqueue, &hdev->tx_work);
4765 }
4766
4767 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4768                                 struct sk_buff *skb)
4769 {
4770         struct hci_ev_mode_change *ev = data;
4771         struct hci_conn *conn;
4772
4773         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4774
4775         hci_dev_lock(hdev);
4776
4777         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4778         if (conn) {
4779                 conn->mode = ev->mode;
4780
4781                 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4782                                         &conn->flags)) {
4783                         if (conn->mode == HCI_CM_ACTIVE)
4784                                 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4785                         else
4786                                 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4787                 }
4788
4789                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4790                         hci_sco_setup(conn, ev->status);
4791         }
4792
4793         hci_dev_unlock(hdev);
4794 }
4795
4796 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4797                                      struct sk_buff *skb)
4798 {
4799         struct hci_ev_pin_code_req *ev = data;
4800         struct hci_conn *conn;
4801
4802         bt_dev_dbg(hdev, "");
4803
4804         hci_dev_lock(hdev);
4805
4806         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4807         if (!conn)
4808                 goto unlock;
4809
4810         if (conn->state == BT_CONNECTED) {
4811                 hci_conn_hold(conn);
4812                 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4813                 hci_conn_drop(conn);
4814         }
4815
4816         if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4817             !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4818                 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4819                              sizeof(ev->bdaddr), &ev->bdaddr);
4820         } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4821                 u8 secure;
4822
4823                 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4824                         secure = 1;
4825                 else
4826                         secure = 0;
4827
4828                 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4829         }
4830
4831 unlock:
4832         hci_dev_unlock(hdev);
4833 }
4834
4835 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4836 {
4837         if (key_type == HCI_LK_CHANGED_COMBINATION)
4838                 return;
4839
4840         conn->pin_length = pin_len;
4841         conn->key_type = key_type;
4842
4843         switch (key_type) {
4844         case HCI_LK_LOCAL_UNIT:
4845         case HCI_LK_REMOTE_UNIT:
4846         case HCI_LK_DEBUG_COMBINATION:
4847                 return;
4848         case HCI_LK_COMBINATION:
4849                 if (pin_len == 16)
4850                         conn->pending_sec_level = BT_SECURITY_HIGH;
4851                 else
4852                         conn->pending_sec_level = BT_SECURITY_MEDIUM;
4853                 break;
4854         case HCI_LK_UNAUTH_COMBINATION_P192:
4855         case HCI_LK_UNAUTH_COMBINATION_P256:
4856                 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4857                 break;
4858         case HCI_LK_AUTH_COMBINATION_P192:
4859                 conn->pending_sec_level = BT_SECURITY_HIGH;
4860                 break;
4861         case HCI_LK_AUTH_COMBINATION_P256:
4862                 conn->pending_sec_level = BT_SECURITY_FIPS;
4863                 break;
4864         }
4865 }
4866
4867 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4868                                      struct sk_buff *skb)
4869 {
4870         struct hci_ev_link_key_req *ev = data;
4871         struct hci_cp_link_key_reply cp;
4872         struct hci_conn *conn;
4873         struct link_key *key;
4874
4875         bt_dev_dbg(hdev, "");
4876
4877         if (!hci_dev_test_flag(hdev, HCI_MGMT))
4878                 return;
4879
4880         hci_dev_lock(hdev);
4881
4882         key = hci_find_link_key(hdev, &ev->bdaddr);
4883         if (!key) {
4884                 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4885                 goto not_found;
4886         }
4887
4888         bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4889
4890         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4891         if (conn) {
4892                 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4893
4894                 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4895                      key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4896                     conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4897                         bt_dev_dbg(hdev, "ignoring unauthenticated key");
4898                         goto not_found;
4899                 }
4900
4901                 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4902                     (conn->pending_sec_level == BT_SECURITY_HIGH ||
4903                      conn->pending_sec_level == BT_SECURITY_FIPS)) {
4904                         bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4905                         goto not_found;
4906                 }
4907
4908                 conn_set_key(conn, key->type, key->pin_len);
4909         }
4910
4911         bacpy(&cp.bdaddr, &ev->bdaddr);
4912         memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4913
4914         hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4915
4916         hci_dev_unlock(hdev);
4917
4918         return;
4919
4920 not_found:
4921         hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4922         hci_dev_unlock(hdev);
4923 }
4924
4925 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4926                                     struct sk_buff *skb)
4927 {
4928         struct hci_ev_link_key_notify *ev = data;
4929         struct hci_conn *conn;
4930         struct link_key *key;
4931         bool persistent;
4932         u8 pin_len = 0;
4933
4934         bt_dev_dbg(hdev, "");
4935
4936         hci_dev_lock(hdev);
4937
4938         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4939         if (!conn)
4940                 goto unlock;
4941
4942         hci_conn_hold(conn);
4943         conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4944         hci_conn_drop(conn);
4945
4946         set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4947         conn_set_key(conn, ev->key_type, conn->pin_length);
4948
4949         if (!hci_dev_test_flag(hdev, HCI_MGMT))
4950                 goto unlock;
4951
4952         key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4953                                 ev->key_type, pin_len, &persistent);
4954         if (!key)
4955                 goto unlock;
4956
4957         /* Update connection information since adding the key will have
4958          * fixed up the type in the case of changed combination keys.
4959          */
4960         if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4961                 conn_set_key(conn, key->type, key->pin_len);
4962
4963         mgmt_new_link_key(hdev, key, persistent);
4964
4965         /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4966          * is set. If it's not set simply remove the key from the kernel
4967          * list (we've still notified user space about it but with
4968          * store_hint being 0).
4969          */
4970         if (key->type == HCI_LK_DEBUG_COMBINATION &&
4971             !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4972                 list_del_rcu(&key->list);
4973                 kfree_rcu(key, rcu);
4974                 goto unlock;
4975         }
4976
4977         if (persistent)
4978                 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4979         else
4980                 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4981
4982 unlock:
4983         hci_dev_unlock(hdev);
4984 }
4985
4986 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4987                                  struct sk_buff *skb)
4988 {
4989         struct hci_ev_clock_offset *ev = data;
4990         struct hci_conn *conn;
4991
4992         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4993
4994         hci_dev_lock(hdev);
4995
4996         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4997         if (conn && !ev->status) {
4998                 struct inquiry_entry *ie;
4999
5000                 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
5001                 if (ie) {
5002                         ie->data.clock_offset = ev->clock_offset;
5003                         ie->timestamp = jiffies;
5004                 }
5005         }
5006
5007         hci_dev_unlock(hdev);
5008 }
5009
5010 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
5011                                     struct sk_buff *skb)
5012 {
5013         struct hci_ev_pkt_type_change *ev = data;
5014         struct hci_conn *conn;
5015
5016         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5017
5018         hci_dev_lock(hdev);
5019
5020         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5021         if (conn && !ev->status)
5022                 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
5023
5024         hci_dev_unlock(hdev);
5025 }
5026
5027 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
5028                                    struct sk_buff *skb)
5029 {
5030         struct hci_ev_pscan_rep_mode *ev = data;
5031         struct inquiry_entry *ie;
5032
5033         bt_dev_dbg(hdev, "");
5034
5035         hci_dev_lock(hdev);
5036
5037         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5038         if (ie) {
5039                 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
5040                 ie->timestamp = jiffies;
5041         }
5042
5043         hci_dev_unlock(hdev);
5044 }
5045
5046 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
5047                                              struct sk_buff *skb)
5048 {
5049         struct hci_ev_inquiry_result_rssi *ev = edata;
5050         struct inquiry_data data;
5051         int i;
5052
5053         bt_dev_dbg(hdev, "num_rsp %d", ev->num);
5054
5055         if (!ev->num)
5056                 return;
5057
5058         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5059                 return;
5060
5061         hci_dev_lock(hdev);
5062
5063         if (skb->len == array_size(ev->num,
5064                                    sizeof(struct inquiry_info_rssi_pscan))) {
5065                 struct inquiry_info_rssi_pscan *info;
5066
5067                 for (i = 0; i < ev->num; i++) {
5068                         u32 flags;
5069
5070                         info = hci_ev_skb_pull(hdev, skb,
5071                                                HCI_EV_INQUIRY_RESULT_WITH_RSSI,
5072                                                sizeof(*info));
5073                         if (!info) {
5074                                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
5075                                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
5076                                 goto unlock;
5077                         }
5078
5079                         bacpy(&data.bdaddr, &info->bdaddr);
5080                         data.pscan_rep_mode     = info->pscan_rep_mode;
5081                         data.pscan_period_mode  = info->pscan_period_mode;
5082                         data.pscan_mode         = info->pscan_mode;
5083                         memcpy(data.dev_class, info->dev_class, 3);
5084                         data.clock_offset       = info->clock_offset;
5085                         data.rssi               = info->rssi;
5086                         data.ssp_mode           = 0x00;
5087
5088                         flags = hci_inquiry_cache_update(hdev, &data, false);
5089
5090                         mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5091                                           info->dev_class, info->rssi,
5092                                           flags, NULL, 0, NULL, 0, 0);
5093                 }
5094         } else if (skb->len == array_size(ev->num,
5095                                           sizeof(struct inquiry_info_rssi))) {
5096                 struct inquiry_info_rssi *info;
5097
5098                 for (i = 0; i < ev->num; i++) {
5099                         u32 flags;
5100
5101                         info = hci_ev_skb_pull(hdev, skb,
5102                                                HCI_EV_INQUIRY_RESULT_WITH_RSSI,
5103                                                sizeof(*info));
5104                         if (!info) {
5105                                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
5106                                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
5107                                 goto unlock;
5108                         }
5109
5110                         bacpy(&data.bdaddr, &info->bdaddr);
5111                         data.pscan_rep_mode     = info->pscan_rep_mode;
5112                         data.pscan_period_mode  = info->pscan_period_mode;
5113                         data.pscan_mode         = 0x00;
5114                         memcpy(data.dev_class, info->dev_class, 3);
5115                         data.clock_offset       = info->clock_offset;
5116                         data.rssi               = info->rssi;
5117                         data.ssp_mode           = 0x00;
5118
5119                         flags = hci_inquiry_cache_update(hdev, &data, false);
5120
5121                         mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5122                                           info->dev_class, info->rssi,
5123                                           flags, NULL, 0, NULL, 0, 0);
5124                 }
5125         } else {
5126                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
5127                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
5128         }
5129 unlock:
5130         hci_dev_unlock(hdev);
5131 }
5132
5133 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
5134                                         struct sk_buff *skb)
5135 {
5136         struct hci_ev_remote_ext_features *ev = data;
5137         struct hci_conn *conn;
5138
5139         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5140
5141         hci_dev_lock(hdev);
5142
5143         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5144         if (!conn)
5145                 goto unlock;
5146
5147         if (ev->page < HCI_MAX_PAGES)
5148                 memcpy(conn->features[ev->page], ev->features, 8);
5149
5150         if (!ev->status && ev->page == 0x01) {
5151                 struct inquiry_entry *ie;
5152
5153                 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
5154                 if (ie)
5155                         ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5156
5157                 if (ev->features[0] & LMP_HOST_SSP) {
5158                         set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5159                 } else {
5160                         /* It is mandatory by the Bluetooth specification that
5161                          * Extended Inquiry Results are only used when Secure
5162                          * Simple Pairing is enabled, but some devices violate
5163                          * this.
5164                          *
5165                          * To make these devices work, the internal SSP
5166                          * enabled flag needs to be cleared if the remote host
5167                          * features do not indicate SSP support */
5168                         clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5169                 }
5170
5171                 if (ev->features[0] & LMP_HOST_SC)
5172                         set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
5173         }
5174
5175         if (conn->state != BT_CONFIG)
5176                 goto unlock;
5177
5178         if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
5179                 struct hci_cp_remote_name_req cp;
5180                 memset(&cp, 0, sizeof(cp));
5181                 bacpy(&cp.bdaddr, &conn->dst);
5182                 cp.pscan_rep_mode = 0x02;
5183                 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
5184         } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5185                 mgmt_device_connected(hdev, conn, NULL, 0);
5186
5187         if (!hci_outgoing_auth_needed(hdev, conn)) {
5188                 conn->state = BT_CONNECTED;
5189                 hci_connect_cfm(conn, ev->status);
5190                 hci_conn_drop(conn);
5191         }
5192
5193 unlock:
5194         hci_dev_unlock(hdev);
5195 }
5196
5197 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
5198                                        struct sk_buff *skb)
5199 {
5200         struct hci_ev_sync_conn_complete *ev = data;
5201         struct hci_conn *conn;
5202         u8 status = ev->status;
5203
5204         switch (ev->link_type) {
5205         case SCO_LINK:
5206         case ESCO_LINK:
5207                 break;
5208         default:
5209                 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
5210                  * for HCI_Synchronous_Connection_Complete is limited to
5211                  * either SCO or eSCO
5212                  */
5213                 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
5214                 return;
5215         }
5216
5217         bt_dev_dbg(hdev, "status 0x%2.2x", status);
5218
5219         hci_dev_lock(hdev);
5220
5221         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
5222         if (!conn) {
5223                 if (ev->link_type == ESCO_LINK)
5224                         goto unlock;
5225
5226                 /* When the link type in the event indicates SCO connection
5227                  * and lookup of the connection object fails, then check
5228                  * if an eSCO connection object exists.
5229                  *
5230                  * The core limits the synchronous connections to either
5231                  * SCO or eSCO. The eSCO connection is preferred and tried
5232                  * to be setup first and until successfully established,
5233                  * the link type will be hinted as eSCO.
5234                  */
5235                 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
5236                 if (!conn)
5237                         goto unlock;
5238         }
5239
5240         /* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
5241          * Processing it more than once per connection can corrupt kernel memory.
5242          *
5243          * As the connection handle is set here for the first time, it indicates
5244          * whether the connection is already set up.
5245          */
5246         if (conn->handle != HCI_CONN_HANDLE_UNSET) {
5247                 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
5248                 goto unlock;
5249         }
5250
5251         switch (status) {
5252         case 0x00:
5253                 conn->handle = __le16_to_cpu(ev->handle);
5254                 if (conn->handle > HCI_CONN_HANDLE_MAX) {
5255                         bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
5256                                    conn->handle, HCI_CONN_HANDLE_MAX);
5257                         status = HCI_ERROR_INVALID_PARAMETERS;
5258                         conn->state = BT_CLOSED;
5259                         break;
5260                 }
5261
5262                 conn->state  = BT_CONNECTED;
5263                 conn->type   = ev->link_type;
5264
5265                 hci_debugfs_create_conn(conn);
5266                 hci_conn_add_sysfs(conn);
5267                 break;
5268
5269         case 0x10:      /* Connection Accept Timeout */
5270         case 0x0d:      /* Connection Rejected due to Limited Resources */
5271         case 0x11:      /* Unsupported Feature or Parameter Value */
5272         case 0x1c:      /* SCO interval rejected */
5273         case 0x1a:      /* Unsupported Remote Feature */
5274         case 0x1e:      /* Invalid LMP Parameters */
5275         case 0x1f:      /* Unspecified error */
5276         case 0x20:      /* Unsupported LMP Parameter value */
5277                 if (conn->out) {
5278                         conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5279                                         (hdev->esco_type & EDR_ESCO_MASK);
5280                         if (hci_setup_sync(conn, conn->link->handle))
5281                                 goto unlock;
5282                 }
5283                 fallthrough;
5284
5285         default:
5286                 conn->state = BT_CLOSED;
5287                 break;
5288         }
5289
5290         bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5291         /* Notify only in case of SCO over HCI transport data path which
5292          * is zero and non-zero value shall be non-HCI transport data path
5293          */
5294         if (conn->codec.data_path == 0 && hdev->notify) {
5295                 switch (ev->air_mode) {
5296                 case 0x02:
5297                         hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5298                         break;
5299                 case 0x03:
5300                         hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5301                         break;
5302                 }
5303         }
5304
5305         hci_connect_cfm(conn, status);
5306         if (status)
5307                 hci_conn_del(conn);
5308
5309 unlock:
5310         hci_dev_unlock(hdev);
5311 }
5312
5313 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5314 {
5315         size_t parsed = 0;
5316
5317         while (parsed < eir_len) {
5318                 u8 field_len = eir[0];
5319
5320                 if (field_len == 0)
5321                         return parsed;
5322
5323                 parsed += field_len + 1;
5324                 eir += field_len + 1;
5325         }
5326
5327         return eir_len;
5328 }
5329
5330 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5331                                             struct sk_buff *skb)
5332 {
5333         struct hci_ev_ext_inquiry_result *ev = edata;
5334         struct inquiry_data data;
5335         size_t eir_len;
5336         int i;
5337
5338         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5339                              flex_array_size(ev, info, ev->num)))
5340                 return;
5341
5342         bt_dev_dbg(hdev, "num %d", ev->num);
5343
5344         if (!ev->num)
5345                 return;
5346
5347         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5348                 return;
5349
5350         hci_dev_lock(hdev);
5351
5352         for (i = 0; i < ev->num; i++) {
5353                 struct extended_inquiry_info *info = &ev->info[i];
5354                 u32 flags;
5355                 bool name_known;
5356
5357                 bacpy(&data.bdaddr, &info->bdaddr);
5358                 data.pscan_rep_mode     = info->pscan_rep_mode;
5359                 data.pscan_period_mode  = info->pscan_period_mode;
5360                 data.pscan_mode         = 0x00;
5361                 memcpy(data.dev_class, info->dev_class, 3);
5362                 data.clock_offset       = info->clock_offset;
5363                 data.rssi               = info->rssi;
5364                 data.ssp_mode           = 0x01;
5365
5366                 if (hci_dev_test_flag(hdev, HCI_MGMT))
5367                         name_known = eir_get_data(info->data,
5368                                                   sizeof(info->data),
5369                                                   EIR_NAME_COMPLETE, NULL);
5370                 else
5371                         name_known = true;
5372
5373                 flags = hci_inquiry_cache_update(hdev, &data, name_known);
5374
5375                 eir_len = eir_get_length(info->data, sizeof(info->data));
5376
5377                 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5378                                   info->dev_class, info->rssi,
5379                                   flags, info->data, eir_len, NULL, 0, 0);
5380         }
5381
5382         hci_dev_unlock(hdev);
5383 }
5384
5385 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5386                                          struct sk_buff *skb)
5387 {
5388         struct hci_ev_key_refresh_complete *ev = data;
5389         struct hci_conn *conn;
5390
5391         bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5392                    __le16_to_cpu(ev->handle));
5393
5394         hci_dev_lock(hdev);
5395
5396         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5397         if (!conn)
5398                 goto unlock;
5399
5400         /* For BR/EDR the necessary steps are taken through the
5401          * auth_complete event.
5402          */
5403         if (conn->type != LE_LINK)
5404                 goto unlock;
5405
5406         if (!ev->status)
5407                 conn->sec_level = conn->pending_sec_level;
5408
5409         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5410
5411         if (ev->status && conn->state == BT_CONNECTED) {
5412                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5413                 hci_conn_drop(conn);
5414                 goto unlock;
5415         }
5416
5417         if (conn->state == BT_CONFIG) {
5418                 if (!ev->status)
5419                         conn->state = BT_CONNECTED;
5420
5421                 hci_connect_cfm(conn, ev->status);
5422                 hci_conn_drop(conn);
5423         } else {
5424                 hci_auth_cfm(conn, ev->status);
5425
5426                 hci_conn_hold(conn);
5427                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5428                 hci_conn_drop(conn);
5429         }
5430
5431 unlock:
5432         hci_dev_unlock(hdev);
5433 }
5434
5435 static u8 hci_get_auth_req(struct hci_conn *conn)
5436 {
5437 #ifdef TIZEN_BT
5438         if (conn->remote_auth == HCI_AT_GENERAL_BONDING_MITM) {
5439                 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5440                     conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5441                         return HCI_AT_GENERAL_BONDING_MITM;
5442         }
5443 #endif
5444
5445         /* If remote requests no-bonding follow that lead */
5446         if (conn->remote_auth == HCI_AT_NO_BONDING ||
5447             conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5448                 return conn->remote_auth | (conn->auth_type & 0x01);
5449
5450         /* If both remote and local have enough IO capabilities, require
5451          * MITM protection
5452          */
5453         if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5454             conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5455                 return conn->remote_auth | 0x01;
5456
5457         /* No MITM protection possible so ignore remote requirement */
5458         return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5459 }
5460
5461 static u8 bredr_oob_data_present(struct hci_conn *conn)
5462 {
5463         struct hci_dev *hdev = conn->hdev;
5464         struct oob_data *data;
5465
5466         data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5467         if (!data)
5468                 return 0x00;
5469
5470         if (bredr_sc_enabled(hdev)) {
5471                 /* When Secure Connections is enabled, then just
5472                  * return the present value stored with the OOB
5473                  * data. The stored value contains the right present
5474                  * information. However it can only be trusted when
5475                  * not in Secure Connection Only mode.
5476                  */
5477                 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5478                         return data->present;
5479
5480                 /* When Secure Connections Only mode is enabled, then
5481                  * the P-256 values are required. If they are not
5482                  * available, then do not declare that OOB data is
5483                  * present.
5484                  */
5485                 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
5486                     !memcmp(data->hash256, ZERO_KEY, 16))
5487                         return 0x00;
5488
5489                 return 0x02;
5490         }
5491
5492         /* When Secure Connections is not enabled or actually
5493          * not supported by the hardware, then check that if
5494          * P-192 data values are present.
5495          */
5496         if (!memcmp(data->rand192, ZERO_KEY, 16) ||
5497             !memcmp(data->hash192, ZERO_KEY, 16))
5498                 return 0x00;
5499
5500         return 0x01;
5501 }
5502
5503 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5504                                     struct sk_buff *skb)
5505 {
5506         struct hci_ev_io_capa_request *ev = data;
5507         struct hci_conn *conn;
5508
5509         bt_dev_dbg(hdev, "");
5510
5511         hci_dev_lock(hdev);
5512
5513         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5514         if (!conn)
5515                 goto unlock;
5516
5517         hci_conn_hold(conn);
5518
5519         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5520                 goto unlock;
5521
5522         /* Allow pairing if we're pairable, the initiators of the
5523          * pairing or if the remote is not requesting bonding.
5524          */
5525         if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5526             test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5527             (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5528                 struct hci_cp_io_capability_reply cp;
5529
5530                 bacpy(&cp.bdaddr, &ev->bdaddr);
5531                 /* Change the IO capability from KeyboardDisplay
5532                  * to DisplayYesNo as it is not supported by BT spec. */
5533                 cp.capability = (conn->io_capability == 0x04) ?
5534                                 HCI_IO_DISPLAY_YESNO : conn->io_capability;
5535
5536                 /* If we are initiators, there is no remote information yet */
5537                 if (conn->remote_auth == 0xff) {
5538                         /* Request MITM protection if our IO caps allow it
5539                          * except for the no-bonding case.
5540                          */
5541                         if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5542                             conn->auth_type != HCI_AT_NO_BONDING)
5543                                 conn->auth_type |= 0x01;
5544                 } else {
5545                         conn->auth_type = hci_get_auth_req(conn);
5546                 }
5547
5548                 /* If we're not bondable, force one of the non-bondable
5549                  * authentication requirement values.
5550                  */
5551                 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5552                         conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5553
5554                 cp.authentication = conn->auth_type;
5555                 cp.oob_data = bredr_oob_data_present(conn);
5556
5557                 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5558                              sizeof(cp), &cp);
5559         } else {
5560                 struct hci_cp_io_capability_neg_reply cp;
5561
5562                 bacpy(&cp.bdaddr, &ev->bdaddr);
5563                 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5564
5565                 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5566                              sizeof(cp), &cp);
5567         }
5568
5569 unlock:
5570         hci_dev_unlock(hdev);
5571 }
5572
5573 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5574                                   struct sk_buff *skb)
5575 {
5576         struct hci_ev_io_capa_reply *ev = data;
5577         struct hci_conn *conn;
5578
5579         bt_dev_dbg(hdev, "");
5580
5581         hci_dev_lock(hdev);
5582
5583         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5584         if (!conn)
5585                 goto unlock;
5586
5587         conn->remote_cap = ev->capability;
5588         conn->remote_auth = ev->authentication;
5589
5590 unlock:
5591         hci_dev_unlock(hdev);
5592 }
5593
5594 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5595                                          struct sk_buff *skb)
5596 {
5597         struct hci_ev_user_confirm_req *ev = data;
5598         int loc_mitm, rem_mitm, confirm_hint = 0;
5599         struct hci_conn *conn;
5600
5601         bt_dev_dbg(hdev, "");
5602
5603         hci_dev_lock(hdev);
5604
5605         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5606                 goto unlock;
5607
5608         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5609         if (!conn)
5610                 goto unlock;
5611
5612         loc_mitm = (conn->auth_type & 0x01);
5613         rem_mitm = (conn->remote_auth & 0x01);
5614
5615         /* If we require MITM but the remote device can't provide that
5616          * (it has NoInputNoOutput) then reject the confirmation
5617          * request. We check the security level here since it doesn't
5618          * necessarily match conn->auth_type.
5619          */
5620         if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5621             conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5622                 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5623                 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5624                              sizeof(ev->bdaddr), &ev->bdaddr);
5625                 goto unlock;
5626         }
5627
5628         /* If no side requires MITM protection; auto-accept */
5629         if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5630             (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5631
5632                 /* If we're not the initiators request authorization to
5633                  * proceed from user space (mgmt_user_confirm with
5634                  * confirm_hint set to 1). The exception is if neither
5635                  * side had MITM or if the local IO capability is
5636                  * NoInputNoOutput, in which case we do auto-accept
5637                  */
5638                 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5639                     conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5640                     (loc_mitm || rem_mitm)) {
5641                         bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5642                         confirm_hint = 1;
5643                         goto confirm;
5644                 }
5645
5646                 /* If there already exists link key in local host, leave the
5647                  * decision to user space since the remote device could be
5648                  * legitimate or malicious.
5649                  */
5650                 if (hci_find_link_key(hdev, &ev->bdaddr)) {
5651                         bt_dev_dbg(hdev, "Local host already has link key");
5652                         confirm_hint = 1;
5653                         goto confirm;
5654                 }
5655
5656                 BT_DBG("Auto-accept of user confirmation with %ums delay",
5657                        hdev->auto_accept_delay);
5658
5659                 if (hdev->auto_accept_delay > 0) {
5660                         int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5661                         queue_delayed_work(conn->hdev->workqueue,
5662                                            &conn->auto_accept_work, delay);
5663                         goto unlock;
5664                 }
5665
5666                 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5667                              sizeof(ev->bdaddr), &ev->bdaddr);
5668                 goto unlock;
5669         }
5670
5671 confirm:
5672         mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5673                                   le32_to_cpu(ev->passkey), confirm_hint);
5674
5675 unlock:
5676         hci_dev_unlock(hdev);
5677 }
5678
5679 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5680                                          struct sk_buff *skb)
5681 {
5682         struct hci_ev_user_passkey_req *ev = data;
5683
5684         bt_dev_dbg(hdev, "");
5685
5686         if (hci_dev_test_flag(hdev, HCI_MGMT))
5687                 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5688 }
5689
5690 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5691                                         struct sk_buff *skb)
5692 {
5693         struct hci_ev_user_passkey_notify *ev = data;
5694         struct hci_conn *conn;
5695
5696         bt_dev_dbg(hdev, "");
5697
5698         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5699         if (!conn)
5700                 return;
5701
5702         conn->passkey_notify = __le32_to_cpu(ev->passkey);
5703         conn->passkey_entered = 0;
5704
5705         if (hci_dev_test_flag(hdev, HCI_MGMT))
5706                 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5707                                          conn->dst_type, conn->passkey_notify,
5708                                          conn->passkey_entered);
5709 }
5710
5711 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5712                                     struct sk_buff *skb)
5713 {
5714         struct hci_ev_keypress_notify *ev = data;
5715         struct hci_conn *conn;
5716
5717         bt_dev_dbg(hdev, "");
5718
5719         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5720         if (!conn)
5721                 return;
5722
5723         switch (ev->type) {
5724         case HCI_KEYPRESS_STARTED:
5725                 conn->passkey_entered = 0;
5726                 return;
5727
5728         case HCI_KEYPRESS_ENTERED:
5729                 conn->passkey_entered++;
5730                 break;
5731
5732         case HCI_KEYPRESS_ERASED:
5733                 conn->passkey_entered--;
5734                 break;
5735
5736         case HCI_KEYPRESS_CLEARED:
5737                 conn->passkey_entered = 0;
5738                 break;
5739
5740         case HCI_KEYPRESS_COMPLETED:
5741                 return;
5742         }
5743
5744         if (hci_dev_test_flag(hdev, HCI_MGMT))
5745                 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5746                                          conn->dst_type, conn->passkey_notify,
5747                                          conn->passkey_entered);
5748 }
5749
5750 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5751                                          struct sk_buff *skb)
5752 {
5753         struct hci_ev_simple_pair_complete *ev = data;
5754         struct hci_conn *conn;
5755
5756         bt_dev_dbg(hdev, "");
5757
5758         hci_dev_lock(hdev);
5759
5760         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5761         if (!conn)
5762                 goto unlock;
5763
5764         /* Reset the authentication requirement to unknown */
5765         conn->remote_auth = 0xff;
5766
5767         /* To avoid duplicate auth_failed events to user space we check
5768          * the HCI_CONN_AUTH_PEND flag which will be set if we
5769          * initiated the authentication. A traditional auth_complete
5770          * event gets always produced as initiator and is also mapped to
5771          * the mgmt_auth_failed event */
5772         if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5773                 mgmt_auth_failed(conn, ev->status);
5774
5775         hci_conn_drop(conn);
5776
5777 unlock:
5778         hci_dev_unlock(hdev);
5779 }
5780
5781 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5782                                          struct sk_buff *skb)
5783 {
5784         struct hci_ev_remote_host_features *ev = data;
5785         struct inquiry_entry *ie;
5786         struct hci_conn *conn;
5787
5788         bt_dev_dbg(hdev, "");
5789
5790         hci_dev_lock(hdev);
5791
5792         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5793         if (conn)
5794                 memcpy(conn->features[1], ev->features, 8);
5795
5796         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5797         if (ie)
5798                 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5799
5800         hci_dev_unlock(hdev);
5801 }
5802
5803 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5804                                             struct sk_buff *skb)
5805 {
5806         struct hci_ev_remote_oob_data_request *ev = edata;
5807         struct oob_data *data;
5808
5809         bt_dev_dbg(hdev, "");
5810
5811         hci_dev_lock(hdev);
5812
5813         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5814                 goto unlock;
5815
5816         data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5817         if (!data) {
5818                 struct hci_cp_remote_oob_data_neg_reply cp;
5819
5820                 bacpy(&cp.bdaddr, &ev->bdaddr);
5821                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5822                              sizeof(cp), &cp);
5823                 goto unlock;
5824         }
5825
5826         if (bredr_sc_enabled(hdev)) {
5827                 struct hci_cp_remote_oob_ext_data_reply cp;
5828
5829                 bacpy(&cp.bdaddr, &ev->bdaddr);
5830                 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5831                         memset(cp.hash192, 0, sizeof(cp.hash192));
5832                         memset(cp.rand192, 0, sizeof(cp.rand192));
5833                 } else {
5834                         memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5835                         memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5836                 }
5837                 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5838                 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5839
5840                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5841                              sizeof(cp), &cp);
5842         } else {
5843                 struct hci_cp_remote_oob_data_reply cp;
5844
5845                 bacpy(&cp.bdaddr, &ev->bdaddr);
5846                 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5847                 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5848
5849                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5850                              sizeof(cp), &cp);
5851         }
5852
5853 unlock:
5854         hci_dev_unlock(hdev);
5855 }
5856
5857 #if IS_ENABLED(CONFIG_BT_HS)
5858 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
5859                                   struct sk_buff *skb)
5860 {
5861         struct hci_ev_channel_selected *ev = data;
5862         struct hci_conn *hcon;
5863
5864         bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
5865
5866         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5867         if (!hcon)
5868                 return;
5869
5870         amp_read_loc_assoc_final_data(hdev, hcon);
5871 }
5872
5873 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
5874                                       struct sk_buff *skb)
5875 {
5876         struct hci_ev_phy_link_complete *ev = data;
5877         struct hci_conn *hcon, *bredr_hcon;
5878
5879         bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
5880                    ev->status);
5881
5882         hci_dev_lock(hdev);
5883
5884         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5885         if (!hcon)
5886                 goto unlock;
5887
5888         if (!hcon->amp_mgr)
5889                 goto unlock;
5890
5891         if (ev->status) {
5892                 hci_conn_del(hcon);
5893                 goto unlock;
5894         }
5895
5896         bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5897
5898         hcon->state = BT_CONNECTED;
5899         bacpy(&hcon->dst, &bredr_hcon->dst);
5900
5901         hci_conn_hold(hcon);
5902         hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5903         hci_conn_drop(hcon);
5904
5905         hci_debugfs_create_conn(hcon);
5906         hci_conn_add_sysfs(hcon);
5907
5908         amp_physical_cfm(bredr_hcon, hcon);
5909
5910 unlock:
5911         hci_dev_unlock(hdev);
5912 }
5913
5914 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
5915                                      struct sk_buff *skb)
5916 {
5917         struct hci_ev_logical_link_complete *ev = data;
5918         struct hci_conn *hcon;
5919         struct hci_chan *hchan;
5920         struct amp_mgr *mgr;
5921
5922         bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5923                    le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
5924
5925         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5926         if (!hcon)
5927                 return;
5928
5929         /* Create AMP hchan */
5930         hchan = hci_chan_create(hcon);
5931         if (!hchan)
5932                 return;
5933
5934         hchan->handle = le16_to_cpu(ev->handle);
5935         hchan->amp = true;
5936
5937         BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5938
5939         mgr = hcon->amp_mgr;
5940         if (mgr && mgr->bredr_chan) {
5941                 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5942
5943                 l2cap_chan_lock(bredr_chan);
5944
5945                 bredr_chan->conn->mtu = hdev->block_mtu;
5946                 l2cap_logical_cfm(bredr_chan, hchan, 0);
5947                 hci_conn_hold(hcon);
5948
5949                 l2cap_chan_unlock(bredr_chan);
5950         }
5951 }
5952
5953 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
5954                                              struct sk_buff *skb)
5955 {
5956         struct hci_ev_disconn_logical_link_complete *ev = data;
5957         struct hci_chan *hchan;
5958
5959         bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
5960                    le16_to_cpu(ev->handle), ev->status);
5961
5962         if (ev->status)
5963                 return;
5964
5965         hci_dev_lock(hdev);
5966
5967         hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5968         if (!hchan || !hchan->amp)
5969                 goto unlock;
5970
5971         amp_destroy_logical_link(hchan, ev->reason);
5972
5973 unlock:
5974         hci_dev_unlock(hdev);
5975 }
5976
5977 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
5978                                              struct sk_buff *skb)
5979 {
5980         struct hci_ev_disconn_phy_link_complete *ev = data;
5981         struct hci_conn *hcon;
5982
5983         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5984
5985         if (ev->status)
5986                 return;
5987
5988         hci_dev_lock(hdev);
5989
5990         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5991         if (hcon && hcon->type == AMP_LINK) {
5992                 hcon->state = BT_CLOSED;
5993                 hci_disconn_cfm(hcon, ev->reason);
5994                 hci_conn_del(hcon);
5995         }
5996
5997         hci_dev_unlock(hdev);
5998 }
5999 #endif
6000
6001 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
6002                                 u8 bdaddr_type, bdaddr_t *local_rpa)
6003 {
6004         if (conn->out) {
6005                 conn->dst_type = bdaddr_type;
6006                 conn->resp_addr_type = bdaddr_type;
6007                 bacpy(&conn->resp_addr, bdaddr);
6008
6009                 /* Check if the controller has set a Local RPA then it must be
6010                  * used instead or hdev->rpa.
6011                  */
6012                 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
6013                         conn->init_addr_type = ADDR_LE_DEV_RANDOM;
6014                         bacpy(&conn->init_addr, local_rpa);
6015                 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
6016                         conn->init_addr_type = ADDR_LE_DEV_RANDOM;
6017                         bacpy(&conn->init_addr, &conn->hdev->rpa);
6018                 } else {
6019                         hci_copy_identity_address(conn->hdev, &conn->init_addr,
6020                                                   &conn->init_addr_type);
6021                 }
6022         } else {
6023                 conn->resp_addr_type = conn->hdev->adv_addr_type;
6024                 /* Check if the controller has set a Local RPA then it must be
6025                  * used instead or hdev->rpa.
6026                  */
6027                 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
6028                         conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
6029                         bacpy(&conn->resp_addr, local_rpa);
6030                 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
6031                         /* In case of ext adv, resp_addr will be updated in
6032                          * Adv Terminated event.
6033                          */
6034                         if (!ext_adv_capable(conn->hdev))
6035                                 bacpy(&conn->resp_addr,
6036                                       &conn->hdev->random_addr);
6037                 } else {
6038                         bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
6039                 }
6040
6041                 conn->init_addr_type = bdaddr_type;
6042                 bacpy(&conn->init_addr, bdaddr);
6043
6044                 /* For incoming connections, set the default minimum
6045                  * and maximum connection interval. They will be used
6046                  * to check if the parameters are in range and if not
6047                  * trigger the connection update procedure.
6048                  */
6049                 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
6050                 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
6051         }
6052 }
6053
6054 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
6055                                  bdaddr_t *bdaddr, u8 bdaddr_type,
6056                                  bdaddr_t *local_rpa, u8 role, u16 handle,
6057                                  u16 interval, u16 latency,
6058                                  u16 supervision_timeout)
6059 {
6060         struct hci_conn_params *params;
6061         struct hci_conn *conn;
6062         struct smp_irk *irk;
6063         u8 addr_type;
6064
6065         hci_dev_lock(hdev);
6066
6067         /* All controllers implicitly stop advertising in the event of a
6068          * connection, so ensure that the state bit is cleared.
6069          */
6070         hci_dev_clear_flag(hdev, HCI_LE_ADV);
6071
6072         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
6073         if (!conn) {
6074                 /* In case of error status and there is no connection pending
6075                  * just unlock as there is nothing to cleanup.
6076                  */
6077                 if (status)
6078                         goto unlock;
6079
6080                 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
6081                 if (!conn) {
6082                         bt_dev_err(hdev, "no memory for new connection");
6083                         goto unlock;
6084                 }
6085
6086                 conn->dst_type = bdaddr_type;
6087
6088                 /* If we didn't have a hci_conn object previously
6089                  * but we're in central role this must be something
6090                  * initiated using an accept list. Since accept list based
6091                  * connections are not "first class citizens" we don't
6092                  * have full tracking of them. Therefore, we go ahead
6093                  * with a "best effort" approach of determining the
6094                  * initiator address based on the HCI_PRIVACY flag.
6095                  */
6096                 if (conn->out) {
6097                         conn->resp_addr_type = bdaddr_type;
6098                         bacpy(&conn->resp_addr, bdaddr);
6099                         if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6100                                 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
6101                                 bacpy(&conn->init_addr, &hdev->rpa);
6102                         } else {
6103                                 hci_copy_identity_address(hdev,
6104                                                           &conn->init_addr,
6105                                                           &conn->init_addr_type);
6106                         }
6107                 }
6108         } else {
6109 #ifdef TIZEN_BT
6110                 /* LE auto connect */
6111                 bacpy(&conn->dst, bdaddr);
6112 #endif
6113                 cancel_delayed_work(&conn->le_conn_timeout);
6114         }
6115
6116         /* The HCI_LE_Connection_Complete event is only sent once per connection.
6117          * Processing it more than once per connection can corrupt kernel memory.
6118          *
6119          * As the connection handle is set here for the first time, it indicates
6120          * whether the connection is already set up.
6121          */
6122         if (conn->handle != HCI_CONN_HANDLE_UNSET) {
6123                 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
6124                 goto unlock;
6125         }
6126
6127         le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
6128
6129         /* Lookup the identity address from the stored connection
6130          * address and address type.
6131          *
6132          * When establishing connections to an identity address, the
6133          * connection procedure will store the resolvable random
6134          * address first. Now if it can be converted back into the
6135          * identity address, start using the identity address from
6136          * now on.
6137          */
6138         irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
6139         if (irk) {
6140                 bacpy(&conn->dst, &irk->bdaddr);
6141                 conn->dst_type = irk->addr_type;
6142         }
6143
6144         conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
6145
6146         if (handle > HCI_CONN_HANDLE_MAX) {
6147                 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle,
6148                            HCI_CONN_HANDLE_MAX);
6149                 status = HCI_ERROR_INVALID_PARAMETERS;
6150         }
6151
6152         /* All connection failure handling is taken care of by the
6153          * hci_conn_failed function which is triggered by the HCI
6154          * request completion callbacks used for connecting.
6155          */
6156         if (status)
6157                 goto unlock;
6158
6159         /* Drop the connection if it has been aborted */
6160         if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
6161                 hci_conn_drop(conn);
6162                 goto unlock;
6163         }
6164
6165         if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
6166                 addr_type = BDADDR_LE_PUBLIC;
6167         else
6168                 addr_type = BDADDR_LE_RANDOM;
6169
6170         /* Drop the connection if the device is blocked */
6171         if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
6172                 hci_conn_drop(conn);
6173                 goto unlock;
6174         }
6175
6176         if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
6177                 mgmt_device_connected(hdev, conn, NULL, 0);
6178
6179         conn->sec_level = BT_SECURITY_LOW;
6180         conn->handle = handle;
6181         conn->state = BT_CONFIG;
6182
6183         /* Store current advertising instance as connection advertising instance
6184          * when sotfware rotation is in use so it can be re-enabled when
6185          * disconnected.
6186          */
6187         if (!ext_adv_capable(hdev))
6188                 conn->adv_instance = hdev->cur_adv_instance;
6189
6190         conn->le_conn_interval = interval;
6191         conn->le_conn_latency = latency;
6192         conn->le_supv_timeout = supervision_timeout;
6193
6194         hci_debugfs_create_conn(conn);
6195         hci_conn_add_sysfs(conn);
6196
6197         /* The remote features procedure is defined for central
6198          * role only. So only in case of an initiated connection
6199          * request the remote features.
6200          *
6201          * If the local controller supports peripheral-initiated features
6202          * exchange, then requesting the remote features in peripheral
6203          * role is possible. Otherwise just transition into the
6204          * connected state without requesting the remote features.
6205          */
6206         if (conn->out ||
6207             (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
6208                 struct hci_cp_le_read_remote_features cp;
6209
6210                 cp.handle = __cpu_to_le16(conn->handle);
6211
6212                 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
6213                              sizeof(cp), &cp);
6214
6215                 hci_conn_hold(conn);
6216         } else {
6217                 conn->state = BT_CONNECTED;
6218                 hci_connect_cfm(conn, status);
6219         }
6220
6221         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
6222                                            conn->dst_type);
6223         if (params) {
6224                 list_del_init(&params->action);
6225                 if (params->conn) {
6226                         hci_conn_drop(params->conn);
6227                         hci_conn_put(params->conn);
6228                         params->conn = NULL;
6229                 }
6230         }
6231
6232 unlock:
6233         hci_update_passive_scan(hdev);
6234         hci_dev_unlock(hdev);
6235 }
6236
6237 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
6238                                      struct sk_buff *skb)
6239 {
6240         struct hci_ev_le_conn_complete *ev = data;
6241
6242         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6243
6244         le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6245                              NULL, ev->role, le16_to_cpu(ev->handle),
6246                              le16_to_cpu(ev->interval),
6247                              le16_to_cpu(ev->latency),
6248                              le16_to_cpu(ev->supervision_timeout));
6249 }
6250
6251 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
6252                                          struct sk_buff *skb)
6253 {
6254         struct hci_ev_le_enh_conn_complete *ev = data;
6255
6256         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6257
6258         le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6259                              &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
6260                              le16_to_cpu(ev->interval),
6261                              le16_to_cpu(ev->latency),
6262                              le16_to_cpu(ev->supervision_timeout));
6263 }
6264
6265 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
6266                                     struct sk_buff *skb)
6267 {
6268         struct hci_evt_le_ext_adv_set_term *ev = data;
6269         struct hci_conn *conn;
6270         struct adv_info *adv, *n;
6271
6272         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6273
6274         /* The Bluetooth Core 5.3 specification clearly states that this event
6275          * shall not be sent when the Host disables the advertising set. So in
6276          * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
6277          *
6278          * When the Host disables an advertising set, all cleanup is done via
6279          * its command callback and not needed to be duplicated here.
6280          */
6281         if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
6282                 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
6283                 return;
6284         }
6285
6286         hci_dev_lock(hdev);
6287
6288         adv = hci_find_adv_instance(hdev, ev->handle);
6289
6290         if (ev->status) {
6291                 if (!adv)
6292                         goto unlock;
6293
6294                 /* Remove advertising as it has been terminated */
6295                 hci_remove_adv_instance(hdev, ev->handle);
6296                 mgmt_advertising_removed(NULL, hdev, ev->handle);
6297
6298                 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
6299                         if (adv->enabled)
6300                                 goto unlock;
6301                 }
6302
6303                 /* We are no longer advertising, clear HCI_LE_ADV */
6304                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
6305                 goto unlock;
6306         }
6307
6308         if (adv)
6309                 adv->enabled = false;
6310
6311         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
6312         if (conn) {
6313                 /* Store handle in the connection so the correct advertising
6314                  * instance can be re-enabled when disconnected.
6315                  */
6316                 conn->adv_instance = ev->handle;
6317
6318                 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
6319                     bacmp(&conn->resp_addr, BDADDR_ANY))
6320                         goto unlock;
6321
6322                 if (!ev->handle) {
6323                         bacpy(&conn->resp_addr, &hdev->random_addr);
6324                         goto unlock;
6325                 }
6326
6327                 if (adv)
6328                         bacpy(&conn->resp_addr, &adv->random_addr);
6329         }
6330
6331 unlock:
6332         hci_dev_unlock(hdev);
6333 }
6334
6335 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
6336                                             struct sk_buff *skb)
6337 {
6338         struct hci_ev_le_conn_update_complete *ev = data;
6339         struct hci_conn *conn;
6340
6341         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6342
6343         if (ev->status)
6344                 return;
6345
6346         hci_dev_lock(hdev);
6347
6348         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6349         if (conn) {
6350 #ifdef TIZEN_BT
6351                 if (ev->status) {
6352                         hci_dev_unlock(hdev);
6353                         mgmt_le_conn_update_failed(hdev, &conn->dst,
6354                                 conn->type, conn->dst_type, ev->status);
6355                         return;
6356                 }
6357 #endif
6358                 conn->le_conn_interval = le16_to_cpu(ev->interval);
6359                 conn->le_conn_latency = le16_to_cpu(ev->latency);
6360                 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
6361         }
6362
6363         hci_dev_unlock(hdev);
6364
6365 #ifdef TIZEN_BT
6366         mgmt_le_conn_updated(hdev, &conn->dst, conn->type,
6367                                 conn->dst_type, conn->le_conn_interval,
6368                                 conn->le_conn_latency, conn->le_supv_timeout);
6369 #endif
6370 }
6371
6372 /* This function requires the caller holds hdev->lock */
6373 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
6374                                               bdaddr_t *addr,
6375                                               u8 addr_type, bool addr_resolved,
6376                                               u8 adv_type)
6377 {
6378         struct hci_conn *conn;
6379         struct hci_conn_params *params;
6380
6381         /* If the event is not connectable don't proceed further */
6382         if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
6383                 return NULL;
6384
6385         /* Ignore if the device is blocked or hdev is suspended */
6386         if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
6387             hdev->suspended)
6388                 return NULL;
6389
6390         /* Most controller will fail if we try to create new connections
6391          * while we have an existing one in peripheral role.
6392          */
6393         if (hdev->conn_hash.le_num_peripheral > 0 &&
6394             (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
6395              !(hdev->le_states[3] & 0x10)))
6396                 return NULL;
6397
6398         /* If we're not connectable only connect devices that we have in
6399          * our pend_le_conns list.
6400          */
6401         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
6402                                            addr_type);
6403         if (!params)
6404                 return NULL;
6405
6406         if (!params->explicit_connect) {
6407                 switch (params->auto_connect) {
6408                 case HCI_AUTO_CONN_DIRECT:
6409                         /* Only devices advertising with ADV_DIRECT_IND are
6410                          * triggering a connection attempt. This is allowing
6411                          * incoming connections from peripheral devices.
6412                          */
6413                         if (adv_type != LE_ADV_DIRECT_IND)
6414                                 return NULL;
6415                         break;
6416                 case HCI_AUTO_CONN_ALWAYS:
6417                         /* Devices advertising with ADV_IND or ADV_DIRECT_IND
6418                          * are triggering a connection attempt. This means
6419                          * that incoming connections from peripheral device are
6420                          * accepted and also outgoing connections to peripheral
6421                          * devices are established when found.
6422                          */
6423                         break;
6424                 default:
6425                         return NULL;
6426                 }
6427         }
6428
6429         conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
6430                               BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
6431                               HCI_ROLE_MASTER);
6432         if (!IS_ERR(conn)) {
6433                 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6434                  * by higher layer that tried to connect, if no then
6435                  * store the pointer since we don't really have any
6436                  * other owner of the object besides the params that
6437                  * triggered it. This way we can abort the connection if
6438                  * the parameters get removed and keep the reference
6439                  * count consistent once the connection is established.
6440                  */
6441
6442                 if (!params->explicit_connect)
6443                         params->conn = hci_conn_get(conn);
6444
6445                 return conn;
6446         }
6447
6448         switch (PTR_ERR(conn)) {
6449         case -EBUSY:
6450                 /* If hci_connect() returns -EBUSY it means there is already
6451                  * an LE connection attempt going on. Since controllers don't
6452                  * support more than one connection attempt at the time, we
6453                  * don't consider this an error case.
6454                  */
6455                 break;
6456         default:
6457                 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6458                 return NULL;
6459         }
6460
6461         return NULL;
6462 }
6463
6464 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6465                                u8 bdaddr_type, bdaddr_t *direct_addr,
6466                                u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
6467                                bool ext_adv, bool ctl_time, u64 instant)
6468 {
6469         struct discovery_state *d = &hdev->discovery;
6470         struct smp_irk *irk;
6471         struct hci_conn *conn;
6472         bool match, bdaddr_resolved;
6473         u32 flags;
6474         u8 *ptr;
6475
6476         switch (type) {
6477         case LE_ADV_IND:
6478         case LE_ADV_DIRECT_IND:
6479         case LE_ADV_SCAN_IND:
6480         case LE_ADV_NONCONN_IND:
6481         case LE_ADV_SCAN_RSP:
6482                 break;
6483         default:
6484                 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6485                                        "type: 0x%02x", type);
6486                 return;
6487         }
6488
6489         if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
6490                 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
6491                 return;
6492         }
6493
6494         /* Find the end of the data in case the report contains padded zero
6495          * bytes at the end causing an invalid length value.
6496          *
6497          * When data is NULL, len is 0 so there is no need for extra ptr
6498          * check as 'ptr < data + 0' is already false in such case.
6499          */
6500         for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6501                 if (ptr + 1 + *ptr > data + len)
6502                         break;
6503         }
6504
6505         /* Adjust for actual length. This handles the case when remote
6506          * device is advertising with incorrect data length.
6507          */
6508         len = ptr - data;
6509
6510         /* If the direct address is present, then this report is from
6511          * a LE Direct Advertising Report event. In that case it is
6512          * important to see if the address is matching the local
6513          * controller address.
6514          */
6515         if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6516                 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6517                                                   &bdaddr_resolved);
6518
6519                 /* Only resolvable random addresses are valid for these
6520                  * kind of reports and others can be ignored.
6521                  */
6522                 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6523                         return;
6524
6525                 /* If the controller is not using resolvable random
6526                  * addresses, then this report can be ignored.
6527                  */
6528                 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6529                         return;
6530
6531                 /* If the local IRK of the controller does not match
6532                  * with the resolvable random address provided, then
6533                  * this report can be ignored.
6534                  */
6535                 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6536                         return;
6537         }
6538
6539         /* Check if we need to convert to identity address */
6540         irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6541         if (irk) {
6542                 bdaddr = &irk->bdaddr;
6543                 bdaddr_type = irk->addr_type;
6544         }
6545
6546         bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6547
6548         /* Check if we have been requested to connect to this device.
6549          *
6550          * direct_addr is set only for directed advertising reports (it is NULL
6551          * for advertising reports) and is already verified to be RPA above.
6552          */
6553         conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6554                                      type);
6555         if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
6556                 /* Store report for later inclusion by
6557                  * mgmt_device_connected
6558                  */
6559                 memcpy(conn->le_adv_data, data, len);
6560                 conn->le_adv_data_len = len;
6561         }
6562
6563         if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6564                 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6565         else
6566                 flags = 0;
6567
6568         /* All scan results should be sent up for Mesh systems */
6569         if (hci_dev_test_flag(hdev, HCI_MESH)) {
6570                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6571                                   rssi, flags, data, len, NULL, 0, instant);
6572                 return;
6573         }
6574
6575         /* Passive scanning shouldn't trigger any device found events,
6576          * except for devices marked as CONN_REPORT for which we do send
6577          * device found events, or advertisement monitoring requested.
6578          */
6579         if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6580                 if (type == LE_ADV_DIRECT_IND)
6581                         return;
6582
6583 #ifndef TIZEN_BT
6584                 /* Handle all adv packet in platform */
6585                 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6586                                                bdaddr, bdaddr_type) &&
6587                     idr_is_empty(&hdev->adv_monitors_idr))
6588                         return;
6589 #endif
6590
6591 #ifdef TIZEN_BT
6592                 mgmt_le_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6593                                   rssi, flags, data, len, NULL, 0, type);
6594 #else
6595                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6596                                   rssi, flags, data, len, NULL, 0, 0);
6597 #endif
6598                 return;
6599         }
6600
6601         /* When receiving a scan response, then there is no way to
6602          * know if the remote device is connectable or not. However
6603          * since scan responses are merged with a previously seen
6604          * advertising report, the flags field from that report
6605          * will be used.
6606          *
6607          * In the unlikely case that a controller just sends a scan
6608          * response event that doesn't match the pending report, then
6609          * it is marked as a standalone SCAN_RSP.
6610          */
6611         if (type == LE_ADV_SCAN_RSP)
6612                 flags = MGMT_DEV_FOUND_SCAN_RSP;
6613
6614 #ifdef TIZEN_BT
6615         /* Disable adv ind and scan rsp merging */
6616         mgmt_le_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6617                                   rssi, flags, data, len, NULL, 0, type);
6618 #else
6619         /* If there's nothing pending either store the data from this
6620          * event or send an immediate device found event if the data
6621          * should not be stored for later.
6622          */
6623         if (!ext_adv && !has_pending_adv_report(hdev)) {
6624                 /* If the report will trigger a SCAN_REQ store it for
6625                  * later merging.
6626                  */
6627                 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6628                         store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6629                                                  rssi, flags, data, len);
6630                         return;
6631                 }
6632
6633                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6634                                   rssi, flags, data, len, NULL, 0, 0);
6635                 return;
6636         }
6637
6638         /* Check if the pending report is for the same device as the new one */
6639         match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6640                  bdaddr_type == d->last_adv_addr_type);
6641
6642         /* If the pending data doesn't match this report or this isn't a
6643          * scan response (e.g. we got a duplicate ADV_IND) then force
6644          * sending of the pending data.
6645          */
6646         if (type != LE_ADV_SCAN_RSP || !match) {
6647                 /* Send out whatever is in the cache, but skip duplicates */
6648                 if (!match)
6649                         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6650                                           d->last_adv_addr_type, NULL,
6651                                           d->last_adv_rssi, d->last_adv_flags,
6652                                           d->last_adv_data,
6653                                           d->last_adv_data_len, NULL, 0, 0);
6654
6655                 /* If the new report will trigger a SCAN_REQ store it for
6656                  * later merging.
6657                  */
6658                 if (!ext_adv && (type == LE_ADV_IND ||
6659                                  type == LE_ADV_SCAN_IND)) {
6660                         store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6661                                                  rssi, flags, data, len);
6662                         return;
6663                 }
6664
6665                 /* The advertising reports cannot be merged, so clear
6666                  * the pending report and send out a device found event.
6667                  */
6668                 clear_pending_adv_report(hdev);
6669                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6670                                   rssi, flags, data, len, NULL, 0, 0);
6671                 return;
6672         }
6673
6674         /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6675          * the new event is a SCAN_RSP. We can therefore proceed with
6676          * sending a merged device found event.
6677          */
6678         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6679                           d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6680                           d->last_adv_data, d->last_adv_data_len, data, len, 0);
6681         clear_pending_adv_report(hdev);
6682 #endif
6683 }
6684
6685 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6686                                   struct sk_buff *skb)
6687 {
6688         struct hci_ev_le_advertising_report *ev = data;
6689         u64 instant = jiffies;
6690
6691         if (!ev->num)
6692                 return;
6693
6694         hci_dev_lock(hdev);
6695
6696         while (ev->num--) {
6697                 struct hci_ev_le_advertising_info *info;
6698                 s8 rssi;
6699
6700                 info = hci_le_ev_skb_pull(hdev, skb,
6701                                           HCI_EV_LE_ADVERTISING_REPORT,
6702                                           sizeof(*info));
6703                 if (!info)
6704                         break;
6705
6706                 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6707                                         info->length + 1))
6708                         break;
6709
6710                 if (info->length <= HCI_MAX_AD_LENGTH) {
6711                         rssi = info->data[info->length];
6712                         process_adv_report(hdev, info->type, &info->bdaddr,
6713                                            info->bdaddr_type, NULL, 0, rssi,
6714                                            info->data, info->length, false,
6715                                            false, instant);
6716                 } else {
6717                         bt_dev_err(hdev, "Dropping invalid advertising data");
6718                 }
6719         }
6720
6721         hci_dev_unlock(hdev);
6722 }
6723
6724 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6725 {
6726         if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6727                 switch (evt_type) {
6728                 case LE_LEGACY_ADV_IND:
6729                         return LE_ADV_IND;
6730                 case LE_LEGACY_ADV_DIRECT_IND:
6731                         return LE_ADV_DIRECT_IND;
6732                 case LE_LEGACY_ADV_SCAN_IND:
6733                         return LE_ADV_SCAN_IND;
6734                 case LE_LEGACY_NONCONN_IND:
6735                         return LE_ADV_NONCONN_IND;
6736                 case LE_LEGACY_SCAN_RSP_ADV:
6737                 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6738                         return LE_ADV_SCAN_RSP;
6739                 }
6740
6741                 goto invalid;
6742         }
6743
6744         if (evt_type & LE_EXT_ADV_CONN_IND) {
6745                 if (evt_type & LE_EXT_ADV_DIRECT_IND)
6746                         return LE_ADV_DIRECT_IND;
6747
6748                 return LE_ADV_IND;
6749         }
6750
6751         if (evt_type & LE_EXT_ADV_SCAN_RSP)
6752                 return LE_ADV_SCAN_RSP;
6753
6754         if (evt_type & LE_EXT_ADV_SCAN_IND)
6755                 return LE_ADV_SCAN_IND;
6756
6757         if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6758             evt_type & LE_EXT_ADV_DIRECT_IND)
6759                 return LE_ADV_NONCONN_IND;
6760
6761 invalid:
6762         bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6763                                evt_type);
6764
6765         return LE_ADV_INVALID;
6766 }
6767
6768 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6769                                       struct sk_buff *skb)
6770 {
6771         struct hci_ev_le_ext_adv_report *ev = data;
6772         u64 instant = jiffies;
6773
6774         if (!ev->num)
6775                 return;
6776
6777         hci_dev_lock(hdev);
6778
6779         while (ev->num--) {
6780                 struct hci_ev_le_ext_adv_info *info;
6781                 u8 legacy_evt_type;
6782                 u16 evt_type;
6783
6784                 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6785                                           sizeof(*info));
6786                 if (!info)
6787                         break;
6788
6789                 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6790                                         info->length))
6791                         break;
6792
6793                 evt_type = __le16_to_cpu(info->type);
6794                 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6795                 if (legacy_evt_type != LE_ADV_INVALID) {
6796                         process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6797                                            info->bdaddr_type, NULL, 0,
6798                                            info->rssi, info->data, info->length,
6799                                            !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6800                                            false, instant);
6801                 }
6802         }
6803
6804         hci_dev_unlock(hdev);
6805 }
6806
6807 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6808 {
6809         struct hci_cp_le_pa_term_sync cp;
6810
6811         memset(&cp, 0, sizeof(cp));
6812         cp.handle = handle;
6813
6814         return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6815 }
6816
6817 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6818                                             struct sk_buff *skb)
6819 {
6820         struct hci_ev_le_pa_sync_established *ev = data;
6821         int mask = hdev->link_mode;
6822         __u8 flags = 0;
6823
6824         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6825
6826         if (ev->status)
6827                 return;
6828
6829         hci_dev_lock(hdev);
6830
6831         hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6832
6833         mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6834         if (!(mask & HCI_LM_ACCEPT))
6835                 hci_le_pa_term_sync(hdev, ev->handle);
6836
6837         hci_dev_unlock(hdev);
6838 }
6839
6840 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6841                                             struct sk_buff *skb)
6842 {
6843         struct hci_ev_le_remote_feat_complete *ev = data;
6844         struct hci_conn *conn;
6845
6846         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6847
6848         hci_dev_lock(hdev);
6849
6850         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6851         if (conn) {
6852                 if (!ev->status)
6853                         memcpy(conn->features[0], ev->features, 8);
6854
6855                 if (conn->state == BT_CONFIG) {
6856                         __u8 status;
6857
6858                         /* If the local controller supports peripheral-initiated
6859                          * features exchange, but the remote controller does
6860                          * not, then it is possible that the error code 0x1a
6861                          * for unsupported remote feature gets returned.
6862                          *
6863                          * In this specific case, allow the connection to
6864                          * transition into connected state and mark it as
6865                          * successful.
6866                          */
6867                         if (!conn->out && ev->status == 0x1a &&
6868                             (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6869                                 status = 0x00;
6870                         else
6871                                 status = ev->status;
6872
6873                         conn->state = BT_CONNECTED;
6874                         hci_connect_cfm(conn, status);
6875                         hci_conn_drop(conn);
6876                 }
6877         }
6878
6879         hci_dev_unlock(hdev);
6880 }
6881
6882 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6883                                    struct sk_buff *skb)
6884 {
6885         struct hci_ev_le_ltk_req *ev = data;
6886         struct hci_cp_le_ltk_reply cp;
6887         struct hci_cp_le_ltk_neg_reply neg;
6888         struct hci_conn *conn;
6889         struct smp_ltk *ltk;
6890
6891         bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6892
6893         hci_dev_lock(hdev);
6894
6895         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6896         if (conn == NULL)
6897                 goto not_found;
6898
6899         ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6900         if (!ltk)
6901                 goto not_found;
6902
6903         if (smp_ltk_is_sc(ltk)) {
6904                 /* With SC both EDiv and Rand are set to zero */
6905                 if (ev->ediv || ev->rand)
6906                         goto not_found;
6907         } else {
6908                 /* For non-SC keys check that EDiv and Rand match */
6909                 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6910                         goto not_found;
6911         }
6912
6913         memcpy(cp.ltk, ltk->val, ltk->enc_size);
6914         memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6915         cp.handle = cpu_to_le16(conn->handle);
6916
6917         conn->pending_sec_level = smp_ltk_sec_level(ltk);
6918
6919         conn->enc_key_size = ltk->enc_size;
6920
6921         hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6922
6923         /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6924          * temporary key used to encrypt a connection following
6925          * pairing. It is used during the Encrypted Session Setup to
6926          * distribute the keys. Later, security can be re-established
6927          * using a distributed LTK.
6928          */
6929         if (ltk->type == SMP_STK) {
6930                 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6931                 list_del_rcu(&ltk->list);
6932                 kfree_rcu(ltk, rcu);
6933         } else {
6934                 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6935         }
6936
6937         hci_dev_unlock(hdev);
6938
6939         return;
6940
6941 not_found:
6942         neg.handle = ev->handle;
6943         hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6944         hci_dev_unlock(hdev);
6945 }
6946
6947 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6948                                       u8 reason)
6949 {
6950         struct hci_cp_le_conn_param_req_neg_reply cp;
6951
6952         cp.handle = cpu_to_le16(handle);
6953         cp.reason = reason;
6954
6955         hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6956                      &cp);
6957 }
6958
6959 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6960                                              struct sk_buff *skb)
6961 {
6962         struct hci_ev_le_remote_conn_param_req *ev = data;
6963         struct hci_cp_le_conn_param_req_reply cp;
6964         struct hci_conn *hcon;
6965         u16 handle, min, max, latency, timeout;
6966
6967         bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6968
6969         handle = le16_to_cpu(ev->handle);
6970         min = le16_to_cpu(ev->interval_min);
6971         max = le16_to_cpu(ev->interval_max);
6972         latency = le16_to_cpu(ev->latency);
6973         timeout = le16_to_cpu(ev->timeout);
6974
6975         hcon = hci_conn_hash_lookup_handle(hdev, handle);
6976         if (!hcon || hcon->state != BT_CONNECTED)
6977                 return send_conn_param_neg_reply(hdev, handle,
6978                                                  HCI_ERROR_UNKNOWN_CONN_ID);
6979
6980         if (hci_check_conn_params(min, max, latency, timeout))
6981                 return send_conn_param_neg_reply(hdev, handle,
6982                                                  HCI_ERROR_INVALID_LL_PARAMS);
6983
6984         if (hcon->role == HCI_ROLE_MASTER) {
6985                 struct hci_conn_params *params;
6986                 u8 store_hint;
6987
6988                 hci_dev_lock(hdev);
6989
6990                 params = hci_conn_params_lookup(hdev, &hcon->dst,
6991                                                 hcon->dst_type);
6992                 if (params) {
6993                         params->conn_min_interval = min;
6994                         params->conn_max_interval = max;
6995                         params->conn_latency = latency;
6996                         params->supervision_timeout = timeout;
6997                         store_hint = 0x01;
6998                 } else {
6999                         store_hint = 0x00;
7000                 }
7001
7002                 hci_dev_unlock(hdev);
7003
7004                 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
7005                                     store_hint, min, max, latency, timeout);
7006         }
7007
7008         cp.handle = ev->handle;
7009         cp.interval_min = ev->interval_min;
7010         cp.interval_max = ev->interval_max;
7011         cp.latency = ev->latency;
7012         cp.timeout = ev->timeout;
7013         cp.min_ce_len = 0;
7014         cp.max_ce_len = 0;
7015
7016         hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
7017 }
7018
7019 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
7020                                          struct sk_buff *skb)
7021 {
7022         struct hci_ev_le_direct_adv_report *ev = data;
7023         u64 instant = jiffies;
7024         int i;
7025
7026         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
7027                                 flex_array_size(ev, info, ev->num)))
7028                 return;
7029
7030         if (!ev->num)
7031                 return;
7032
7033         hci_dev_lock(hdev);
7034
7035         for (i = 0; i < ev->num; i++) {
7036                 struct hci_ev_le_direct_adv_info *info = &ev->info[i];
7037
7038                 process_adv_report(hdev, info->type, &info->bdaddr,
7039                                    info->bdaddr_type, &info->direct_addr,
7040                                    info->direct_addr_type, info->rssi, NULL, 0,
7041                                    false, false, instant);
7042         }
7043
7044         hci_dev_unlock(hdev);
7045 }
7046
7047 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
7048                                   struct sk_buff *skb)
7049 {
7050         struct hci_ev_le_phy_update_complete *ev = data;
7051         struct hci_conn *conn;
7052
7053         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7054
7055         if (ev->status)
7056                 return;
7057
7058         hci_dev_lock(hdev);
7059
7060         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
7061         if (!conn)
7062                 goto unlock;
7063
7064         conn->le_tx_phy = ev->tx_phy;
7065         conn->le_rx_phy = ev->rx_phy;
7066
7067 unlock:
7068         hci_dev_unlock(hdev);
7069 }
7070
7071 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
7072                                         struct sk_buff *skb)
7073 {
7074         struct hci_evt_le_cis_established *ev = data;
7075         struct hci_conn *conn;
7076         u16 handle = __le16_to_cpu(ev->handle);
7077
7078         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7079
7080         hci_dev_lock(hdev);
7081
7082         conn = hci_conn_hash_lookup_handle(hdev, handle);
7083         if (!conn) {
7084                 bt_dev_err(hdev,
7085                            "Unable to find connection with handle 0x%4.4x",
7086                            handle);
7087                 goto unlock;
7088         }
7089
7090         if (conn->type != ISO_LINK) {
7091                 bt_dev_err(hdev,
7092                            "Invalid connection link type handle 0x%4.4x",
7093                            handle);
7094                 goto unlock;
7095         }
7096
7097         if (conn->role == HCI_ROLE_SLAVE) {
7098                 __le32 interval;
7099
7100                 memset(&interval, 0, sizeof(interval));
7101
7102                 memcpy(&interval, ev->c_latency, sizeof(ev->c_latency));
7103                 conn->iso_qos.in.interval = le32_to_cpu(interval);
7104                 memcpy(&interval, ev->p_latency, sizeof(ev->p_latency));
7105                 conn->iso_qos.out.interval = le32_to_cpu(interval);
7106                 conn->iso_qos.in.latency = le16_to_cpu(ev->interval);
7107                 conn->iso_qos.out.latency = le16_to_cpu(ev->interval);
7108                 conn->iso_qos.in.sdu = le16_to_cpu(ev->c_mtu);
7109                 conn->iso_qos.out.sdu = le16_to_cpu(ev->p_mtu);
7110                 conn->iso_qos.in.phy = ev->c_phy;
7111                 conn->iso_qos.out.phy = ev->p_phy;
7112         }
7113
7114         if (!ev->status) {
7115                 conn->state = BT_CONNECTED;
7116                 hci_debugfs_create_conn(conn);
7117                 hci_conn_add_sysfs(conn);
7118                 hci_iso_setup_path(conn);
7119                 goto unlock;
7120         }
7121
7122         hci_connect_cfm(conn, ev->status);
7123         hci_conn_del(conn);
7124
7125 unlock:
7126         hci_dev_unlock(hdev);
7127 }
7128
7129 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
7130 {
7131         struct hci_cp_le_reject_cis cp;
7132
7133         memset(&cp, 0, sizeof(cp));
7134         cp.handle = handle;
7135         cp.reason = HCI_ERROR_REJ_BAD_ADDR;
7136         hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
7137 }
7138
7139 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
7140 {
7141         struct hci_cp_le_accept_cis cp;
7142
7143         memset(&cp, 0, sizeof(cp));
7144         cp.handle = handle;
7145         hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
7146 }
7147
7148 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
7149                                struct sk_buff *skb)
7150 {
7151         struct hci_evt_le_cis_req *ev = data;
7152         u16 acl_handle, cis_handle;
7153         struct hci_conn *acl, *cis;
7154         int mask;
7155         __u8 flags = 0;
7156
7157         acl_handle = __le16_to_cpu(ev->acl_handle);
7158         cis_handle = __le16_to_cpu(ev->cis_handle);
7159
7160         bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
7161                    acl_handle, cis_handle, ev->cig_id, ev->cis_id);
7162
7163         hci_dev_lock(hdev);
7164
7165         acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
7166         if (!acl)
7167                 goto unlock;
7168
7169         mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
7170         if (!(mask & HCI_LM_ACCEPT)) {
7171                 hci_le_reject_cis(hdev, ev->cis_handle);
7172                 goto unlock;
7173         }
7174
7175         cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
7176         if (!cis) {
7177                 cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE);
7178                 if (!cis) {
7179                         hci_le_reject_cis(hdev, ev->cis_handle);
7180                         goto unlock;
7181                 }
7182                 cis->handle = cis_handle;
7183         }
7184
7185         cis->iso_qos.cig = ev->cig_id;
7186         cis->iso_qos.cis = ev->cis_id;
7187
7188         if (!(flags & HCI_PROTO_DEFER)) {
7189                 hci_le_accept_cis(hdev, ev->cis_handle);
7190         } else {
7191                 cis->state = BT_CONNECT2;
7192                 hci_connect_cfm(cis, 0);
7193         }
7194
7195 unlock:
7196         hci_dev_unlock(hdev);
7197 }
7198
7199 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
7200                                            struct sk_buff *skb)
7201 {
7202         struct hci_evt_le_create_big_complete *ev = data;
7203         struct hci_conn *conn;
7204
7205         BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
7206
7207         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
7208                                 flex_array_size(ev, bis_handle, ev->num_bis)))
7209                 return;
7210
7211         hci_dev_lock(hdev);
7212
7213         conn = hci_conn_hash_lookup_big(hdev, ev->handle);
7214         if (!conn)
7215                 goto unlock;
7216
7217         if (conn->type != ISO_LINK) {
7218                 bt_dev_err(hdev,
7219                            "Invalid connection link type handle 0x%2.2x",
7220                            ev->handle);
7221                 goto unlock;
7222         }
7223
7224         if (ev->num_bis)
7225                 conn->handle = __le16_to_cpu(ev->bis_handle[0]);
7226
7227         if (!ev->status) {
7228                 conn->state = BT_CONNECTED;
7229                 hci_debugfs_create_conn(conn);
7230                 hci_conn_add_sysfs(conn);
7231                 hci_iso_setup_path(conn);
7232                 goto unlock;
7233         }
7234
7235         hci_connect_cfm(conn, ev->status);
7236         hci_conn_del(conn);
7237
7238 unlock:
7239         hci_dev_unlock(hdev);
7240 }
7241
7242 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
7243                                             struct sk_buff *skb)
7244 {
7245         struct hci_evt_le_big_sync_estabilished *ev = data;
7246         struct hci_conn *bis;
7247         int i;
7248
7249         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7250
7251         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7252                                 flex_array_size(ev, bis, ev->num_bis)))
7253                 return;
7254
7255         if (ev->status)
7256                 return;
7257
7258         hci_dev_lock(hdev);
7259
7260         for (i = 0; i < ev->num_bis; i++) {
7261                 u16 handle = le16_to_cpu(ev->bis[i]);
7262                 __le32 interval;
7263
7264                 bis = hci_conn_hash_lookup_handle(hdev, handle);
7265                 if (!bis) {
7266                         bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
7267                                            HCI_ROLE_SLAVE);
7268                         if (!bis)
7269                                 continue;
7270                         bis->handle = handle;
7271                 }
7272
7273                 bis->iso_qos.big = ev->handle;
7274                 memset(&interval, 0, sizeof(interval));
7275                 memcpy(&interval, ev->latency, sizeof(ev->latency));
7276                 bis->iso_qos.in.interval = le32_to_cpu(interval);
7277                 /* Convert ISO Interval (1.25 ms slots) to latency (ms) */
7278                 bis->iso_qos.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
7279                 bis->iso_qos.in.sdu = le16_to_cpu(ev->max_pdu);
7280
7281                 hci_iso_setup_path(bis);
7282         }
7283
7284         hci_dev_unlock(hdev);
7285 }
7286
7287 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7288                                            struct sk_buff *skb)
7289 {
7290         struct hci_evt_le_big_info_adv_report *ev = data;
7291         int mask = hdev->link_mode;
7292         __u8 flags = 0;
7293
7294         bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7295
7296         hci_dev_lock(hdev);
7297
7298         mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
7299         if (!(mask & HCI_LM_ACCEPT))
7300                 hci_le_pa_term_sync(hdev, ev->sync_handle);
7301
7302         hci_dev_unlock(hdev);
7303 }
7304
7305 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7306 [_op] = { \
7307         .func = _func, \
7308         .min_len = _min_len, \
7309         .max_len = _max_len, \
7310 }
7311
7312 #define HCI_LE_EV(_op, _func, _len) \
7313         HCI_LE_EV_VL(_op, _func, _len, _len)
7314
7315 #define HCI_LE_EV_STATUS(_op, _func) \
7316         HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7317
7318 /* Entries in this table shall have their position according to the subevent
7319  * opcode they handle so the use of the macros above is recommend since it does
7320  * attempt to initialize at its proper index using Designated Initializers that
7321  * way events without a callback function can be ommited.
7322  */
7323 static const struct hci_le_ev {
7324         void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7325         u16  min_len;
7326         u16  max_len;
7327 } hci_le_ev_table[U8_MAX + 1] = {
7328         /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7329         HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7330                   sizeof(struct hci_ev_le_conn_complete)),
7331         /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7332         HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7333                      sizeof(struct hci_ev_le_advertising_report),
7334                      HCI_MAX_EVENT_SIZE),
7335         /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7336         HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7337                   hci_le_conn_update_complete_evt,
7338                   sizeof(struct hci_ev_le_conn_update_complete)),
7339         /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7340         HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7341                   hci_le_remote_feat_complete_evt,
7342                   sizeof(struct hci_ev_le_remote_feat_complete)),
7343         /* [0x05 = HCI_EV_LE_LTK_REQ] */
7344         HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7345                   sizeof(struct hci_ev_le_ltk_req)),
7346         /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7347         HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7348                   hci_le_remote_conn_param_req_evt,
7349                   sizeof(struct hci_ev_le_remote_conn_param_req)),
7350 #ifdef TIZEN_BT
7351         /* [0x07 = HCI_EV_LE_DATA_LEN_CHANGE] */
7352         HCI_LE_EV(HCI_EV_LE_DATA_LEN_CHANGE,
7353                   hci_le_data_length_changed_complete_evt,
7354                   sizeof(struct hci_ev_le_data_len_change)),
7355 #endif
7356         /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7357         HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7358                   hci_le_enh_conn_complete_evt,
7359                   sizeof(struct hci_ev_le_enh_conn_complete)),
7360         /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7361         HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7362                      sizeof(struct hci_ev_le_direct_adv_report),
7363                      HCI_MAX_EVENT_SIZE),
7364         /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7365         HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7366                   sizeof(struct hci_ev_le_phy_update_complete)),
7367         /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7368         HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7369                      sizeof(struct hci_ev_le_ext_adv_report),
7370                      HCI_MAX_EVENT_SIZE),
7371         /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7372         HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7373                   hci_le_pa_sync_estabilished_evt,
7374                   sizeof(struct hci_ev_le_pa_sync_established)),
7375         /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7376         HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7377                   sizeof(struct hci_evt_le_ext_adv_set_term)),
7378         /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7379         HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7380                   sizeof(struct hci_evt_le_cis_established)),
7381         /* [0x1a = HCI_EVT_LE_CIS_REQ] */
7382         HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7383                   sizeof(struct hci_evt_le_cis_req)),
7384         /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7385         HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7386                      hci_le_create_big_complete_evt,
7387                      sizeof(struct hci_evt_le_create_big_complete),
7388                      HCI_MAX_EVENT_SIZE),
7389         /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7390         HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7391                      hci_le_big_sync_established_evt,
7392                      sizeof(struct hci_evt_le_big_sync_estabilished),
7393                      HCI_MAX_EVENT_SIZE),
7394         /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7395         HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7396                      hci_le_big_info_adv_report_evt,
7397                      sizeof(struct hci_evt_le_big_info_adv_report),
7398                      HCI_MAX_EVENT_SIZE),
7399 };
7400
7401 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7402                             struct sk_buff *skb, u16 *opcode, u8 *status,
7403                             hci_req_complete_t *req_complete,
7404                             hci_req_complete_skb_t *req_complete_skb)
7405 {
7406         struct hci_ev_le_meta *ev = data;
7407         const struct hci_le_ev *subev;
7408
7409         bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7410
7411         /* Only match event if command OGF is for LE */
7412         if (hdev->sent_cmd &&
7413             hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
7414             hci_skb_event(hdev->sent_cmd) == ev->subevent) {
7415                 *opcode = hci_skb_opcode(hdev->sent_cmd);
7416                 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7417                                      req_complete_skb);
7418         }
7419
7420         subev = &hci_le_ev_table[ev->subevent];
7421         if (!subev->func)
7422                 return;
7423
7424         if (skb->len < subev->min_len) {
7425                 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7426                            ev->subevent, skb->len, subev->min_len);
7427                 return;
7428         }
7429
7430         /* Just warn if the length is over max_len size it still be
7431          * possible to partially parse the event so leave to callback to
7432          * decide if that is acceptable.
7433          */
7434         if (skb->len > subev->max_len)
7435                 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7436                             ev->subevent, skb->len, subev->max_len);
7437         data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7438         if (!data)
7439                 return;
7440
7441         subev->func(hdev, data, skb);
7442 }
7443
7444 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7445                                  u8 event, struct sk_buff *skb)
7446 {
7447         struct hci_ev_cmd_complete *ev;
7448         struct hci_event_hdr *hdr;
7449
7450         if (!skb)
7451                 return false;
7452
7453         hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7454         if (!hdr)
7455                 return false;
7456
7457         if (event) {
7458                 if (hdr->evt != event)
7459                         return false;
7460                 return true;
7461         }
7462
7463         /* Check if request ended in Command Status - no way to retrieve
7464          * any extra parameters in this case.
7465          */
7466         if (hdr->evt == HCI_EV_CMD_STATUS)
7467                 return false;
7468
7469         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7470                 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7471                            hdr->evt);
7472                 return false;
7473         }
7474
7475         ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7476         if (!ev)
7477                 return false;
7478
7479         if (opcode != __le16_to_cpu(ev->opcode)) {
7480                 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7481                        __le16_to_cpu(ev->opcode));
7482                 return false;
7483         }
7484
7485         return true;
7486 }
7487
7488 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7489                                   struct sk_buff *skb)
7490 {
7491         struct hci_ev_le_advertising_info *adv;
7492         struct hci_ev_le_direct_adv_info *direct_adv;
7493         struct hci_ev_le_ext_adv_info *ext_adv;
7494         const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7495         const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7496
7497         hci_dev_lock(hdev);
7498
7499         /* If we are currently suspended and this is the first BT event seen,
7500          * save the wake reason associated with the event.
7501          */
7502         if (!hdev->suspended || hdev->wake_reason)
7503                 goto unlock;
7504
7505         /* Default to remote wake. Values for wake_reason are documented in the
7506          * Bluez mgmt api docs.
7507          */
7508         hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7509
7510         /* Once configured for remote wakeup, we should only wake up for
7511          * reconnections. It's useful to see which device is waking us up so
7512          * keep track of the bdaddr of the connection event that woke us up.
7513          */
7514         if (event == HCI_EV_CONN_REQUEST) {
7515                 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7516                 hdev->wake_addr_type = BDADDR_BREDR;
7517         } else if (event == HCI_EV_CONN_COMPLETE) {
7518                 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7519                 hdev->wake_addr_type = BDADDR_BREDR;
7520         } else if (event == HCI_EV_LE_META) {
7521                 struct hci_ev_le_meta *le_ev = (void *)skb->data;
7522                 u8 subevent = le_ev->subevent;
7523                 u8 *ptr = &skb->data[sizeof(*le_ev)];
7524                 u8 num_reports = *ptr;
7525
7526                 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7527                      subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7528                      subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7529                     num_reports) {
7530                         adv = (void *)(ptr + 1);
7531                         direct_adv = (void *)(ptr + 1);
7532                         ext_adv = (void *)(ptr + 1);
7533
7534                         switch (subevent) {
7535                         case HCI_EV_LE_ADVERTISING_REPORT:
7536                                 bacpy(&hdev->wake_addr, &adv->bdaddr);
7537                                 hdev->wake_addr_type = adv->bdaddr_type;
7538                                 break;
7539                         case HCI_EV_LE_DIRECT_ADV_REPORT:
7540                                 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7541                                 hdev->wake_addr_type = direct_adv->bdaddr_type;
7542                                 break;
7543                         case HCI_EV_LE_EXT_ADV_REPORT:
7544                                 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7545                                 hdev->wake_addr_type = ext_adv->bdaddr_type;
7546                                 break;
7547                         }
7548                 }
7549         } else {
7550                 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7551         }
7552
7553 unlock:
7554         hci_dev_unlock(hdev);
7555 }
7556
7557 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7558 [_op] = { \
7559         .req = false, \
7560         .func = _func, \
7561         .min_len = _min_len, \
7562         .max_len = _max_len, \
7563 }
7564
7565 #define HCI_EV(_op, _func, _len) \
7566         HCI_EV_VL(_op, _func, _len, _len)
7567
7568 #define HCI_EV_STATUS(_op, _func) \
7569         HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7570
7571 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7572 [_op] = { \
7573         .req = true, \
7574         .func_req = _func, \
7575         .min_len = _min_len, \
7576         .max_len = _max_len, \
7577 }
7578
7579 #define HCI_EV_REQ(_op, _func, _len) \
7580         HCI_EV_REQ_VL(_op, _func, _len, _len)
7581
7582 /* Entries in this table shall have their position according to the event opcode
7583  * they handle so the use of the macros above is recommend since it does attempt
7584  * to initialize at its proper index using Designated Initializers that way
7585  * events without a callback function don't have entered.
7586  */
7587 static const struct hci_ev {
7588         bool req;
7589         union {
7590                 void (*func)(struct hci_dev *hdev, void *data,
7591                              struct sk_buff *skb);
7592                 void (*func_req)(struct hci_dev *hdev, void *data,
7593                                  struct sk_buff *skb, u16 *opcode, u8 *status,
7594                                  hci_req_complete_t *req_complete,
7595                                  hci_req_complete_skb_t *req_complete_skb);
7596         };
7597         u16  min_len;
7598         u16  max_len;
7599 } hci_ev_table[U8_MAX + 1] = {
7600         /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7601         HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7602         /* [0x02 = HCI_EV_INQUIRY_RESULT] */
7603         HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7604                   sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7605         /* [0x03 = HCI_EV_CONN_COMPLETE] */
7606         HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7607                sizeof(struct hci_ev_conn_complete)),
7608         /* [0x04 = HCI_EV_CONN_REQUEST] */
7609         HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7610                sizeof(struct hci_ev_conn_request)),
7611         /* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7612         HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7613                sizeof(struct hci_ev_disconn_complete)),
7614         /* [0x06 = HCI_EV_AUTH_COMPLETE] */
7615         HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7616                sizeof(struct hci_ev_auth_complete)),
7617         /* [0x07 = HCI_EV_REMOTE_NAME] */
7618         HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7619                sizeof(struct hci_ev_remote_name)),
7620         /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7621         HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7622                sizeof(struct hci_ev_encrypt_change)),
7623         /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7624         HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7625                hci_change_link_key_complete_evt,
7626                sizeof(struct hci_ev_change_link_key_complete)),
7627         /* [0x0b = HCI_EV_REMOTE_FEATURES] */
7628         HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7629                sizeof(struct hci_ev_remote_features)),
7630         /* [0x0e = HCI_EV_CMD_COMPLETE] */
7631         HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7632                       sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7633         /* [0x0f = HCI_EV_CMD_STATUS] */
7634         HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7635                    sizeof(struct hci_ev_cmd_status)),
7636         /* [0x10 = HCI_EV_CMD_STATUS] */
7637         HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7638                sizeof(struct hci_ev_hardware_error)),
7639         /* [0x12 = HCI_EV_ROLE_CHANGE] */
7640         HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7641                sizeof(struct hci_ev_role_change)),
7642         /* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7643         HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7644                   sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7645         /* [0x14 = HCI_EV_MODE_CHANGE] */
7646         HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7647                sizeof(struct hci_ev_mode_change)),
7648         /* [0x16 = HCI_EV_PIN_CODE_REQ] */
7649         HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7650                sizeof(struct hci_ev_pin_code_req)),
7651         /* [0x17 = HCI_EV_LINK_KEY_REQ] */
7652         HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7653                sizeof(struct hci_ev_link_key_req)),
7654         /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7655         HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7656                sizeof(struct hci_ev_link_key_notify)),
7657         /* [0x1c = HCI_EV_CLOCK_OFFSET] */
7658         HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7659                sizeof(struct hci_ev_clock_offset)),
7660         /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7661         HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7662                sizeof(struct hci_ev_pkt_type_change)),
7663         /* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7664         HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7665                sizeof(struct hci_ev_pscan_rep_mode)),
7666         /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7667         HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7668                   hci_inquiry_result_with_rssi_evt,
7669                   sizeof(struct hci_ev_inquiry_result_rssi),
7670                   HCI_MAX_EVENT_SIZE),
7671         /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7672         HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7673                sizeof(struct hci_ev_remote_ext_features)),
7674         /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7675         HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7676                sizeof(struct hci_ev_sync_conn_complete)),
7677         /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7678         HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7679                   hci_extended_inquiry_result_evt,
7680                   sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7681         /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7682         HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7683                sizeof(struct hci_ev_key_refresh_complete)),
7684         /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7685         HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7686                sizeof(struct hci_ev_io_capa_request)),
7687         /* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7688         HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7689                sizeof(struct hci_ev_io_capa_reply)),
7690         /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7691         HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7692                sizeof(struct hci_ev_user_confirm_req)),
7693         /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7694         HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7695                sizeof(struct hci_ev_user_passkey_req)),
7696         /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7697         HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7698                sizeof(struct hci_ev_remote_oob_data_request)),
7699         /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7700         HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7701                sizeof(struct hci_ev_simple_pair_complete)),
7702         /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7703         HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7704                sizeof(struct hci_ev_user_passkey_notify)),
7705         /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7706         HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7707                sizeof(struct hci_ev_keypress_notify)),
7708         /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7709         HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7710                sizeof(struct hci_ev_remote_host_features)),
7711         /* [0x3e = HCI_EV_LE_META] */
7712         HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7713                       sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7714 #if IS_ENABLED(CONFIG_BT_HS)
7715         /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
7716         HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
7717                sizeof(struct hci_ev_phy_link_complete)),
7718         /* [0x41 = HCI_EV_CHANNEL_SELECTED] */
7719         HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
7720                sizeof(struct hci_ev_channel_selected)),
7721         /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
7722         HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
7723                hci_disconn_loglink_complete_evt,
7724                sizeof(struct hci_ev_disconn_logical_link_complete)),
7725         /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
7726         HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
7727                sizeof(struct hci_ev_logical_link_complete)),
7728         /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
7729         HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
7730                hci_disconn_phylink_complete_evt,
7731                sizeof(struct hci_ev_disconn_phy_link_complete)),
7732 #endif
7733         /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
7734         HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
7735                sizeof(struct hci_ev_num_comp_blocks)),
7736 #ifdef TIZEN_BT
7737         /* [0xFF = HCI_EV_VENDOR_SPECIFIC] */
7738         HCI_EV(HCI_EV_VENDOR_SPECIFIC, hci_vendor_specific_evt,
7739                sizeof(struct hci_ev_vendor_specific)),
7740 #else
7741         /* [0xff = HCI_EV_VENDOR] */
7742         HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7743 #endif
7744 };
7745
7746 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7747                            u16 *opcode, u8 *status,
7748                            hci_req_complete_t *req_complete,
7749                            hci_req_complete_skb_t *req_complete_skb)
7750 {
7751         const struct hci_ev *ev = &hci_ev_table[event];
7752         void *data;
7753
7754         if (!ev->func)
7755                 return;
7756
7757         if (skb->len < ev->min_len) {
7758                 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7759                            event, skb->len, ev->min_len);
7760                 return;
7761         }
7762
7763         /* Just warn if the length is over max_len size it still be
7764          * possible to partially parse the event so leave to callback to
7765          * decide if that is acceptable.
7766          */
7767         if (skb->len > ev->max_len)
7768                 bt_dev_warn_ratelimited(hdev,
7769                                         "unexpected event 0x%2.2x length: %u > %u",
7770                                         event, skb->len, ev->max_len);
7771
7772         data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7773         if (!data)
7774                 return;
7775
7776         if (ev->req)
7777                 ev->func_req(hdev, data, skb, opcode, status, req_complete,
7778                              req_complete_skb);
7779         else
7780                 ev->func(hdev, data, skb);
7781 }
7782
7783 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7784 {
7785         struct hci_event_hdr *hdr = (void *) skb->data;
7786         hci_req_complete_t req_complete = NULL;
7787         hci_req_complete_skb_t req_complete_skb = NULL;
7788         struct sk_buff *orig_skb = NULL;
7789         u8 status = 0, event, req_evt = 0;
7790         u16 opcode = HCI_OP_NOP;
7791
7792         if (skb->len < sizeof(*hdr)) {
7793                 bt_dev_err(hdev, "Malformed HCI Event");
7794                 goto done;
7795         }
7796
7797         kfree_skb(hdev->recv_event);
7798         hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7799
7800         event = hdr->evt;
7801         if (!event) {
7802                 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7803                             event);
7804                 goto done;
7805         }
7806
7807         /* Only match event if command OGF is not for LE */
7808         if (hdev->sent_cmd &&
7809             hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
7810             hci_skb_event(hdev->sent_cmd) == event) {
7811                 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
7812                                      status, &req_complete, &req_complete_skb);
7813                 req_evt = event;
7814         }
7815
7816         /* If it looks like we might end up having to call
7817          * req_complete_skb, store a pristine copy of the skb since the
7818          * various handlers may modify the original one through
7819          * skb_pull() calls, etc.
7820          */
7821         if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7822             event == HCI_EV_CMD_COMPLETE)
7823                 orig_skb = skb_clone(skb, GFP_KERNEL);
7824
7825         skb_pull(skb, HCI_EVENT_HDR_SIZE);
7826
7827         /* Store wake reason if we're suspended */
7828         hci_store_wake_reason(hdev, event, skb);
7829
7830         bt_dev_dbg(hdev, "event 0x%2.2x", event);
7831
7832         hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7833                        &req_complete_skb);
7834
7835         if (req_complete) {
7836                 req_complete(hdev, status, opcode);
7837         } else if (req_complete_skb) {
7838                 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7839                         kfree_skb(orig_skb);
7840                         orig_skb = NULL;
7841                 }
7842                 req_complete_skb(hdev, status, opcode, orig_skb);
7843         }
7844
7845 done:
7846         kfree_skb(orig_skb);
7847         kfree_skb(skb);
7848         hdev->stat.evt_rx++;
7849 }