c53bdb0e00f508de49ddea84c671fcea80eb900c
[platform/kernel/linux-starfive.git] / net / bluetooth / hci_event.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023 NXP
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI event handling. */
27
28 #include <asm/unaligned.h>
29 #include <linux/crypto.h>
30 #include <crypto/algapi.h>
31
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "hci_debugfs.h"
38 #include "hci_codec.h"
39 #include "a2mp.h"
40 #include "amp.h"
41 #include "smp.h"
42 #include "msft.h"
43 #include "eir.h"
44
45 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
46                  "\x00\x00\x00\x00\x00\x00\x00\x00"
47
48 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
49
50 /* Handle HCI Event packets */
51
52 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
53                              u8 ev, size_t len)
54 {
55         void *data;
56
57         data = skb_pull_data(skb, len);
58         if (!data)
59                 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
60
61         return data;
62 }
63
64 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
65                              u16 op, size_t len)
66 {
67         void *data;
68
69         data = skb_pull_data(skb, len);
70         if (!data)
71                 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
72
73         return data;
74 }
75
76 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
77                                 u8 ev, size_t len)
78 {
79         void *data;
80
81         data = skb_pull_data(skb, len);
82         if (!data)
83                 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
84
85         return data;
86 }
87
88 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
89                                 struct sk_buff *skb)
90 {
91         struct hci_ev_status *rp = data;
92
93         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
94
95         /* It is possible that we receive Inquiry Complete event right
96          * before we receive Inquiry Cancel Command Complete event, in
97          * which case the latter event should have status of Command
98          * Disallowed (0x0c). This should not be treated as error, since
99          * we actually achieve what Inquiry Cancel wants to achieve,
100          * which is to end the last Inquiry session.
101          */
102         if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
103                 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
104                 rp->status = 0x00;
105         }
106
107         if (rp->status)
108                 return rp->status;
109
110         clear_bit(HCI_INQUIRY, &hdev->flags);
111         smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
112         wake_up_bit(&hdev->flags, HCI_INQUIRY);
113
114         hci_dev_lock(hdev);
115         /* Set discovery state to stopped if we're not doing LE active
116          * scanning.
117          */
118         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
119             hdev->le_scan_type != LE_SCAN_ACTIVE)
120                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
121         hci_dev_unlock(hdev);
122
123         hci_conn_check_pending(hdev);
124
125         return rp->status;
126 }
127
128 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
129                               struct sk_buff *skb)
130 {
131         struct hci_ev_status *rp = data;
132
133         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
134
135         if (rp->status)
136                 return rp->status;
137
138         hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
139
140         return rp->status;
141 }
142
143 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
144                                    struct sk_buff *skb)
145 {
146         struct hci_ev_status *rp = data;
147
148         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
149
150         if (rp->status)
151                 return rp->status;
152
153         hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
154
155         hci_conn_check_pending(hdev);
156
157         return rp->status;
158 }
159
160 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
161                                         struct sk_buff *skb)
162 {
163         struct hci_ev_status *rp = data;
164
165         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
166
167         return rp->status;
168 }
169
170 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
171                                 struct sk_buff *skb)
172 {
173         struct hci_rp_role_discovery *rp = data;
174         struct hci_conn *conn;
175
176         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
177
178         if (rp->status)
179                 return rp->status;
180
181         hci_dev_lock(hdev);
182
183         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
184         if (conn)
185                 conn->role = rp->role;
186
187         hci_dev_unlock(hdev);
188
189         return rp->status;
190 }
191
192 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
193                                   struct sk_buff *skb)
194 {
195         struct hci_rp_read_link_policy *rp = data;
196         struct hci_conn *conn;
197
198         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
199
200         if (rp->status)
201                 return rp->status;
202
203         hci_dev_lock(hdev);
204
205         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
206         if (conn)
207                 conn->link_policy = __le16_to_cpu(rp->policy);
208
209         hci_dev_unlock(hdev);
210
211         return rp->status;
212 }
213
214 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
215                                    struct sk_buff *skb)
216 {
217         struct hci_rp_write_link_policy *rp = data;
218         struct hci_conn *conn;
219         void *sent;
220
221         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
222
223         if (rp->status)
224                 return rp->status;
225
226         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
227         if (!sent)
228                 return rp->status;
229
230         hci_dev_lock(hdev);
231
232         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
233         if (conn)
234                 conn->link_policy = get_unaligned_le16(sent + 2);
235
236         hci_dev_unlock(hdev);
237
238         return rp->status;
239 }
240
241 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
242                                       struct sk_buff *skb)
243 {
244         struct hci_rp_read_def_link_policy *rp = data;
245
246         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
247
248         if (rp->status)
249                 return rp->status;
250
251         hdev->link_policy = __le16_to_cpu(rp->policy);
252
253         return rp->status;
254 }
255
256 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
257                                        struct sk_buff *skb)
258 {
259         struct hci_ev_status *rp = data;
260         void *sent;
261
262         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
263
264         if (rp->status)
265                 return rp->status;
266
267         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
268         if (!sent)
269                 return rp->status;
270
271         hdev->link_policy = get_unaligned_le16(sent);
272
273         return rp->status;
274 }
275
276 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
277 {
278         struct hci_ev_status *rp = data;
279
280         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
281
282         clear_bit(HCI_RESET, &hdev->flags);
283
284         if (rp->status)
285                 return rp->status;
286
287         /* Reset all non-persistent flags */
288         hci_dev_clear_volatile_flags(hdev);
289
290         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
291
292         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
293         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
294
295         memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
296         hdev->adv_data_len = 0;
297
298         memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
299         hdev->scan_rsp_data_len = 0;
300
301         hdev->le_scan_type = LE_SCAN_PASSIVE;
302
303         hdev->ssp_debug_mode = 0;
304
305         hci_bdaddr_list_clear(&hdev->le_accept_list);
306         hci_bdaddr_list_clear(&hdev->le_resolv_list);
307
308         return rp->status;
309 }
310
311 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
312                                       struct sk_buff *skb)
313 {
314         struct hci_rp_read_stored_link_key *rp = data;
315         struct hci_cp_read_stored_link_key *sent;
316
317         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
318
319         sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
320         if (!sent)
321                 return rp->status;
322
323         if (!rp->status && sent->read_all == 0x01) {
324                 hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
325                 hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
326         }
327
328         return rp->status;
329 }
330
331 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
332                                         struct sk_buff *skb)
333 {
334         struct hci_rp_delete_stored_link_key *rp = data;
335         u16 num_keys;
336
337         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
338
339         if (rp->status)
340                 return rp->status;
341
342         num_keys = le16_to_cpu(rp->num_keys);
343
344         if (num_keys <= hdev->stored_num_keys)
345                 hdev->stored_num_keys -= num_keys;
346         else
347                 hdev->stored_num_keys = 0;
348
349         return rp->status;
350 }
351
352 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
353                                   struct sk_buff *skb)
354 {
355         struct hci_ev_status *rp = data;
356         void *sent;
357
358         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
359
360         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
361         if (!sent)
362                 return rp->status;
363
364         hci_dev_lock(hdev);
365
366         if (hci_dev_test_flag(hdev, HCI_MGMT))
367                 mgmt_set_local_name_complete(hdev, sent, rp->status);
368         else if (!rp->status)
369                 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
370
371         hci_dev_unlock(hdev);
372
373         return rp->status;
374 }
375
376 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
377                                  struct sk_buff *skb)
378 {
379         struct hci_rp_read_local_name *rp = data;
380
381         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
382
383         if (rp->status)
384                 return rp->status;
385
386         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
387             hci_dev_test_flag(hdev, HCI_CONFIG))
388                 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
389
390         return rp->status;
391 }
392
393 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
394                                    struct sk_buff *skb)
395 {
396         struct hci_ev_status *rp = data;
397         void *sent;
398
399         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
400
401         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
402         if (!sent)
403                 return rp->status;
404
405         hci_dev_lock(hdev);
406
407         if (!rp->status) {
408                 __u8 param = *((__u8 *) sent);
409
410                 if (param == AUTH_ENABLED)
411                         set_bit(HCI_AUTH, &hdev->flags);
412                 else
413                         clear_bit(HCI_AUTH, &hdev->flags);
414         }
415
416         if (hci_dev_test_flag(hdev, HCI_MGMT))
417                 mgmt_auth_enable_complete(hdev, rp->status);
418
419         hci_dev_unlock(hdev);
420
421         return rp->status;
422 }
423
424 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
425                                     struct sk_buff *skb)
426 {
427         struct hci_ev_status *rp = data;
428         __u8 param;
429         void *sent;
430
431         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
432
433         if (rp->status)
434                 return rp->status;
435
436         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
437         if (!sent)
438                 return rp->status;
439
440         param = *((__u8 *) sent);
441
442         if (param)
443                 set_bit(HCI_ENCRYPT, &hdev->flags);
444         else
445                 clear_bit(HCI_ENCRYPT, &hdev->flags);
446
447         return rp->status;
448 }
449
450 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
451                                    struct sk_buff *skb)
452 {
453         struct hci_ev_status *rp = data;
454         __u8 param;
455         void *sent;
456
457         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
458
459         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
460         if (!sent)
461                 return rp->status;
462
463         param = *((__u8 *) sent);
464
465         hci_dev_lock(hdev);
466
467         if (rp->status) {
468                 hdev->discov_timeout = 0;
469                 goto done;
470         }
471
472         if (param & SCAN_INQUIRY)
473                 set_bit(HCI_ISCAN, &hdev->flags);
474         else
475                 clear_bit(HCI_ISCAN, &hdev->flags);
476
477         if (param & SCAN_PAGE)
478                 set_bit(HCI_PSCAN, &hdev->flags);
479         else
480                 clear_bit(HCI_PSCAN, &hdev->flags);
481
482 done:
483         hci_dev_unlock(hdev);
484
485         return rp->status;
486 }
487
488 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
489                                   struct sk_buff *skb)
490 {
491         struct hci_ev_status *rp = data;
492         struct hci_cp_set_event_filter *cp;
493         void *sent;
494
495         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
496
497         if (rp->status)
498                 return rp->status;
499
500         sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
501         if (!sent)
502                 return rp->status;
503
504         cp = (struct hci_cp_set_event_filter *)sent;
505
506         if (cp->flt_type == HCI_FLT_CLEAR_ALL)
507                 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
508         else
509                 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
510
511         return rp->status;
512 }
513
514 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
515                                    struct sk_buff *skb)
516 {
517         struct hci_rp_read_class_of_dev *rp = data;
518
519         if (WARN_ON(!hdev))
520                 return HCI_ERROR_UNSPECIFIED;
521
522         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
523
524         if (rp->status)
525                 return rp->status;
526
527         memcpy(hdev->dev_class, rp->dev_class, 3);
528
529         bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
530                    hdev->dev_class[1], hdev->dev_class[0]);
531
532         return rp->status;
533 }
534
535 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
536                                     struct sk_buff *skb)
537 {
538         struct hci_ev_status *rp = data;
539         void *sent;
540
541         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
542
543         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
544         if (!sent)
545                 return rp->status;
546
547         hci_dev_lock(hdev);
548
549         if (!rp->status)
550                 memcpy(hdev->dev_class, sent, 3);
551
552         if (hci_dev_test_flag(hdev, HCI_MGMT))
553                 mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
554
555         hci_dev_unlock(hdev);
556
557         return rp->status;
558 }
559
560 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
561                                     struct sk_buff *skb)
562 {
563         struct hci_rp_read_voice_setting *rp = data;
564         __u16 setting;
565
566         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
567
568         if (rp->status)
569                 return rp->status;
570
571         setting = __le16_to_cpu(rp->voice_setting);
572
573         if (hdev->voice_setting == setting)
574                 return rp->status;
575
576         hdev->voice_setting = setting;
577
578         bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
579
580         if (hdev->notify)
581                 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
582
583         return rp->status;
584 }
585
586 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
587                                      struct sk_buff *skb)
588 {
589         struct hci_ev_status *rp = data;
590         __u16 setting;
591         void *sent;
592
593         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
594
595         if (rp->status)
596                 return rp->status;
597
598         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
599         if (!sent)
600                 return rp->status;
601
602         setting = get_unaligned_le16(sent);
603
604         if (hdev->voice_setting == setting)
605                 return rp->status;
606
607         hdev->voice_setting = setting;
608
609         bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
610
611         if (hdev->notify)
612                 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
613
614         return rp->status;
615 }
616
617 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
618                                         struct sk_buff *skb)
619 {
620         struct hci_rp_read_num_supported_iac *rp = data;
621
622         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
623
624         if (rp->status)
625                 return rp->status;
626
627         hdev->num_iac = rp->num_iac;
628
629         bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
630
631         return rp->status;
632 }
633
634 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
635                                 struct sk_buff *skb)
636 {
637         struct hci_ev_status *rp = data;
638         struct hci_cp_write_ssp_mode *sent;
639
640         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
641
642         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
643         if (!sent)
644                 return rp->status;
645
646         hci_dev_lock(hdev);
647
648         if (!rp->status) {
649                 if (sent->mode)
650                         hdev->features[1][0] |= LMP_HOST_SSP;
651                 else
652                         hdev->features[1][0] &= ~LMP_HOST_SSP;
653         }
654
655         if (!rp->status) {
656                 if (sent->mode)
657                         hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
658                 else
659                         hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
660         }
661
662         hci_dev_unlock(hdev);
663
664         return rp->status;
665 }
666
667 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
668                                   struct sk_buff *skb)
669 {
670         struct hci_ev_status *rp = data;
671         struct hci_cp_write_sc_support *sent;
672
673         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
674
675         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
676         if (!sent)
677                 return rp->status;
678
679         hci_dev_lock(hdev);
680
681         if (!rp->status) {
682                 if (sent->support)
683                         hdev->features[1][0] |= LMP_HOST_SC;
684                 else
685                         hdev->features[1][0] &= ~LMP_HOST_SC;
686         }
687
688         if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
689                 if (sent->support)
690                         hci_dev_set_flag(hdev, HCI_SC_ENABLED);
691                 else
692                         hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
693         }
694
695         hci_dev_unlock(hdev);
696
697         return rp->status;
698 }
699
700 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
701                                     struct sk_buff *skb)
702 {
703         struct hci_rp_read_local_version *rp = data;
704
705         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
706
707         if (rp->status)
708                 return rp->status;
709
710         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
711             hci_dev_test_flag(hdev, HCI_CONFIG)) {
712                 hdev->hci_ver = rp->hci_ver;
713                 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
714                 hdev->lmp_ver = rp->lmp_ver;
715                 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
716                 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
717         }
718
719         return rp->status;
720 }
721
722 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
723                                    struct sk_buff *skb)
724 {
725         struct hci_rp_read_enc_key_size *rp = data;
726         struct hci_conn *conn;
727         u16 handle;
728         u8 status = rp->status;
729
730         bt_dev_dbg(hdev, "status 0x%2.2x", status);
731
732         handle = le16_to_cpu(rp->handle);
733
734         hci_dev_lock(hdev);
735
736         conn = hci_conn_hash_lookup_handle(hdev, handle);
737         if (!conn) {
738                 status = 0xFF;
739                 goto done;
740         }
741
742         /* While unexpected, the read_enc_key_size command may fail. The most
743          * secure approach is to then assume the key size is 0 to force a
744          * disconnection.
745          */
746         if (status) {
747                 bt_dev_err(hdev, "failed to read key size for handle %u",
748                            handle);
749                 conn->enc_key_size = 0;
750         } else {
751                 conn->enc_key_size = rp->key_size;
752                 status = 0;
753
754                 if (conn->enc_key_size < hdev->min_enc_key_size) {
755                         /* As slave role, the conn->state has been set to
756                          * BT_CONNECTED and l2cap conn req might not be received
757                          * yet, at this moment the l2cap layer almost does
758                          * nothing with the non-zero status.
759                          * So we also clear encrypt related bits, and then the
760                          * handler of l2cap conn req will get the right secure
761                          * state at a later time.
762                          */
763                         status = HCI_ERROR_AUTH_FAILURE;
764                         clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
765                         clear_bit(HCI_CONN_AES_CCM, &conn->flags);
766                 }
767         }
768
769         hci_encrypt_cfm(conn, status);
770
771 done:
772         hci_dev_unlock(hdev);
773
774         return status;
775 }
776
777 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
778                                      struct sk_buff *skb)
779 {
780         struct hci_rp_read_local_commands *rp = data;
781
782         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
783
784         if (rp->status)
785                 return rp->status;
786
787         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
788             hci_dev_test_flag(hdev, HCI_CONFIG))
789                 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
790
791         return rp->status;
792 }
793
794 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
795                                            struct sk_buff *skb)
796 {
797         struct hci_rp_read_auth_payload_to *rp = data;
798         struct hci_conn *conn;
799
800         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
801
802         if (rp->status)
803                 return rp->status;
804
805         hci_dev_lock(hdev);
806
807         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
808         if (conn)
809                 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
810
811         hci_dev_unlock(hdev);
812
813         return rp->status;
814 }
815
816 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
817                                             struct sk_buff *skb)
818 {
819         struct hci_rp_write_auth_payload_to *rp = data;
820         struct hci_conn *conn;
821         void *sent;
822
823         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
824
825         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
826         if (!sent)
827                 return rp->status;
828
829         hci_dev_lock(hdev);
830
831         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
832         if (!conn) {
833                 rp->status = 0xff;
834                 goto unlock;
835         }
836
837         if (!rp->status)
838                 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
839
840 unlock:
841         hci_dev_unlock(hdev);
842
843         return rp->status;
844 }
845
846 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
847                                      struct sk_buff *skb)
848 {
849         struct hci_rp_read_local_features *rp = data;
850
851         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
852
853         if (rp->status)
854                 return rp->status;
855
856         memcpy(hdev->features, rp->features, 8);
857
858         /* Adjust default settings according to features
859          * supported by device. */
860
861         if (hdev->features[0][0] & LMP_3SLOT)
862                 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
863
864         if (hdev->features[0][0] & LMP_5SLOT)
865                 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
866
867         if (hdev->features[0][1] & LMP_HV2) {
868                 hdev->pkt_type  |= (HCI_HV2);
869                 hdev->esco_type |= (ESCO_HV2);
870         }
871
872         if (hdev->features[0][1] & LMP_HV3) {
873                 hdev->pkt_type  |= (HCI_HV3);
874                 hdev->esco_type |= (ESCO_HV3);
875         }
876
877         if (lmp_esco_capable(hdev))
878                 hdev->esco_type |= (ESCO_EV3);
879
880         if (hdev->features[0][4] & LMP_EV4)
881                 hdev->esco_type |= (ESCO_EV4);
882
883         if (hdev->features[0][4] & LMP_EV5)
884                 hdev->esco_type |= (ESCO_EV5);
885
886         if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
887                 hdev->esco_type |= (ESCO_2EV3);
888
889         if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
890                 hdev->esco_type |= (ESCO_3EV3);
891
892         if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
893                 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
894
895         return rp->status;
896 }
897
898 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
899                                          struct sk_buff *skb)
900 {
901         struct hci_rp_read_local_ext_features *rp = data;
902
903         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
904
905         if (rp->status)
906                 return rp->status;
907
908         if (hdev->max_page < rp->max_page) {
909                 if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
910                              &hdev->quirks))
911                         bt_dev_warn(hdev, "broken local ext features page 2");
912                 else
913                         hdev->max_page = rp->max_page;
914         }
915
916         if (rp->page < HCI_MAX_PAGES)
917                 memcpy(hdev->features[rp->page], rp->features, 8);
918
919         return rp->status;
920 }
921
922 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
923                                         struct sk_buff *skb)
924 {
925         struct hci_rp_read_flow_control_mode *rp = data;
926
927         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
928
929         if (rp->status)
930                 return rp->status;
931
932         hdev->flow_ctl_mode = rp->mode;
933
934         return rp->status;
935 }
936
937 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
938                                   struct sk_buff *skb)
939 {
940         struct hci_rp_read_buffer_size *rp = data;
941
942         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
943
944         if (rp->status)
945                 return rp->status;
946
947         hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
948         hdev->sco_mtu  = rp->sco_mtu;
949         hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
950         hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
951
952         if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
953                 hdev->sco_mtu  = 64;
954                 hdev->sco_pkts = 8;
955         }
956
957         hdev->acl_cnt = hdev->acl_pkts;
958         hdev->sco_cnt = hdev->sco_pkts;
959
960         BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
961                hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
962
963         return rp->status;
964 }
965
966 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
967                               struct sk_buff *skb)
968 {
969         struct hci_rp_read_bd_addr *rp = data;
970
971         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
972
973         if (rp->status)
974                 return rp->status;
975
976         if (test_bit(HCI_INIT, &hdev->flags))
977                 bacpy(&hdev->bdaddr, &rp->bdaddr);
978
979         if (hci_dev_test_flag(hdev, HCI_SETUP))
980                 bacpy(&hdev->setup_addr, &rp->bdaddr);
981
982         return rp->status;
983 }
984
985 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
986                                          struct sk_buff *skb)
987 {
988         struct hci_rp_read_local_pairing_opts *rp = data;
989
990         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
991
992         if (rp->status)
993                 return rp->status;
994
995         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
996             hci_dev_test_flag(hdev, HCI_CONFIG)) {
997                 hdev->pairing_opts = rp->pairing_opts;
998                 hdev->max_enc_key_size = rp->max_key_size;
999         }
1000
1001         return rp->status;
1002 }
1003
1004 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
1005                                          struct sk_buff *skb)
1006 {
1007         struct hci_rp_read_page_scan_activity *rp = data;
1008
1009         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1010
1011         if (rp->status)
1012                 return rp->status;
1013
1014         if (test_bit(HCI_INIT, &hdev->flags)) {
1015                 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
1016                 hdev->page_scan_window = __le16_to_cpu(rp->window);
1017         }
1018
1019         return rp->status;
1020 }
1021
1022 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1023                                           struct sk_buff *skb)
1024 {
1025         struct hci_ev_status *rp = data;
1026         struct hci_cp_write_page_scan_activity *sent;
1027
1028         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1029
1030         if (rp->status)
1031                 return rp->status;
1032
1033         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1034         if (!sent)
1035                 return rp->status;
1036
1037         hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1038         hdev->page_scan_window = __le16_to_cpu(sent->window);
1039
1040         return rp->status;
1041 }
1042
1043 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1044                                      struct sk_buff *skb)
1045 {
1046         struct hci_rp_read_page_scan_type *rp = data;
1047
1048         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1049
1050         if (rp->status)
1051                 return rp->status;
1052
1053         if (test_bit(HCI_INIT, &hdev->flags))
1054                 hdev->page_scan_type = rp->type;
1055
1056         return rp->status;
1057 }
1058
1059 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1060                                       struct sk_buff *skb)
1061 {
1062         struct hci_ev_status *rp = data;
1063         u8 *type;
1064
1065         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1066
1067         if (rp->status)
1068                 return rp->status;
1069
1070         type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1071         if (type)
1072                 hdev->page_scan_type = *type;
1073
1074         return rp->status;
1075 }
1076
1077 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
1078                                       struct sk_buff *skb)
1079 {
1080         struct hci_rp_read_data_block_size *rp = data;
1081
1082         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1083
1084         if (rp->status)
1085                 return rp->status;
1086
1087         hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
1088         hdev->block_len = __le16_to_cpu(rp->block_len);
1089         hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
1090
1091         hdev->block_cnt = hdev->num_blocks;
1092
1093         BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
1094                hdev->block_cnt, hdev->block_len);
1095
1096         return rp->status;
1097 }
1098
1099 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1100                             struct sk_buff *skb)
1101 {
1102         struct hci_rp_read_clock *rp = data;
1103         struct hci_cp_read_clock *cp;
1104         struct hci_conn *conn;
1105
1106         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1107
1108         if (rp->status)
1109                 return rp->status;
1110
1111         hci_dev_lock(hdev);
1112
1113         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1114         if (!cp)
1115                 goto unlock;
1116
1117         if (cp->which == 0x00) {
1118                 hdev->clock = le32_to_cpu(rp->clock);
1119                 goto unlock;
1120         }
1121
1122         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1123         if (conn) {
1124                 conn->clock = le32_to_cpu(rp->clock);
1125                 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1126         }
1127
1128 unlock:
1129         hci_dev_unlock(hdev);
1130         return rp->status;
1131 }
1132
1133 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
1134                                      struct sk_buff *skb)
1135 {
1136         struct hci_rp_read_local_amp_info *rp = data;
1137
1138         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1139
1140         if (rp->status)
1141                 return rp->status;
1142
1143         hdev->amp_status = rp->amp_status;
1144         hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
1145         hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
1146         hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
1147         hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
1148         hdev->amp_type = rp->amp_type;
1149         hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
1150         hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
1151         hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
1152         hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
1153
1154         return rp->status;
1155 }
1156
1157 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1158                                        struct sk_buff *skb)
1159 {
1160         struct hci_rp_read_inq_rsp_tx_power *rp = data;
1161
1162         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1163
1164         if (rp->status)
1165                 return rp->status;
1166
1167         hdev->inq_tx_power = rp->tx_power;
1168
1169         return rp->status;
1170 }
1171
1172 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1173                                              struct sk_buff *skb)
1174 {
1175         struct hci_rp_read_def_err_data_reporting *rp = data;
1176
1177         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1178
1179         if (rp->status)
1180                 return rp->status;
1181
1182         hdev->err_data_reporting = rp->err_data_reporting;
1183
1184         return rp->status;
1185 }
1186
1187 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1188                                               struct sk_buff *skb)
1189 {
1190         struct hci_ev_status *rp = data;
1191         struct hci_cp_write_def_err_data_reporting *cp;
1192
1193         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1194
1195         if (rp->status)
1196                 return rp->status;
1197
1198         cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1199         if (!cp)
1200                 return rp->status;
1201
1202         hdev->err_data_reporting = cp->err_data_reporting;
1203
1204         return rp->status;
1205 }
1206
1207 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1208                                 struct sk_buff *skb)
1209 {
1210         struct hci_rp_pin_code_reply *rp = data;
1211         struct hci_cp_pin_code_reply *cp;
1212         struct hci_conn *conn;
1213
1214         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1215
1216         hci_dev_lock(hdev);
1217
1218         if (hci_dev_test_flag(hdev, HCI_MGMT))
1219                 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1220
1221         if (rp->status)
1222                 goto unlock;
1223
1224         cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1225         if (!cp)
1226                 goto unlock;
1227
1228         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1229         if (conn)
1230                 conn->pin_length = cp->pin_len;
1231
1232 unlock:
1233         hci_dev_unlock(hdev);
1234         return rp->status;
1235 }
1236
1237 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1238                                     struct sk_buff *skb)
1239 {
1240         struct hci_rp_pin_code_neg_reply *rp = data;
1241
1242         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1243
1244         hci_dev_lock(hdev);
1245
1246         if (hci_dev_test_flag(hdev, HCI_MGMT))
1247                 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1248                                                  rp->status);
1249
1250         hci_dev_unlock(hdev);
1251
1252         return rp->status;
1253 }
1254
1255 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1256                                      struct sk_buff *skb)
1257 {
1258         struct hci_rp_le_read_buffer_size *rp = data;
1259
1260         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1261
1262         if (rp->status)
1263                 return rp->status;
1264
1265         hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1266         hdev->le_pkts = rp->le_max_pkt;
1267
1268         hdev->le_cnt = hdev->le_pkts;
1269
1270         BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1271
1272         return rp->status;
1273 }
1274
1275 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1276                                         struct sk_buff *skb)
1277 {
1278         struct hci_rp_le_read_local_features *rp = data;
1279
1280         BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1281
1282         if (rp->status)
1283                 return rp->status;
1284
1285         memcpy(hdev->le_features, rp->features, 8);
1286
1287         return rp->status;
1288 }
1289
1290 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1291                                       struct sk_buff *skb)
1292 {
1293         struct hci_rp_le_read_adv_tx_power *rp = data;
1294
1295         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1296
1297         if (rp->status)
1298                 return rp->status;
1299
1300         hdev->adv_tx_power = rp->tx_power;
1301
1302         return rp->status;
1303 }
1304
1305 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1306                                     struct sk_buff *skb)
1307 {
1308         struct hci_rp_user_confirm_reply *rp = data;
1309
1310         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1311
1312         hci_dev_lock(hdev);
1313
1314         if (hci_dev_test_flag(hdev, HCI_MGMT))
1315                 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1316                                                  rp->status);
1317
1318         hci_dev_unlock(hdev);
1319
1320         return rp->status;
1321 }
1322
1323 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1324                                         struct sk_buff *skb)
1325 {
1326         struct hci_rp_user_confirm_reply *rp = data;
1327
1328         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1329
1330         hci_dev_lock(hdev);
1331
1332         if (hci_dev_test_flag(hdev, HCI_MGMT))
1333                 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1334                                                      ACL_LINK, 0, rp->status);
1335
1336         hci_dev_unlock(hdev);
1337
1338         return rp->status;
1339 }
1340
1341 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1342                                     struct sk_buff *skb)
1343 {
1344         struct hci_rp_user_confirm_reply *rp = data;
1345
1346         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1347
1348         hci_dev_lock(hdev);
1349
1350         if (hci_dev_test_flag(hdev, HCI_MGMT))
1351                 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1352                                                  0, rp->status);
1353
1354         hci_dev_unlock(hdev);
1355
1356         return rp->status;
1357 }
1358
1359 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1360                                         struct sk_buff *skb)
1361 {
1362         struct hci_rp_user_confirm_reply *rp = data;
1363
1364         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1365
1366         hci_dev_lock(hdev);
1367
1368         if (hci_dev_test_flag(hdev, HCI_MGMT))
1369                 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1370                                                      ACL_LINK, 0, rp->status);
1371
1372         hci_dev_unlock(hdev);
1373
1374         return rp->status;
1375 }
1376
1377 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1378                                      struct sk_buff *skb)
1379 {
1380         struct hci_rp_read_local_oob_data *rp = data;
1381
1382         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1383
1384         return rp->status;
1385 }
1386
1387 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1388                                          struct sk_buff *skb)
1389 {
1390         struct hci_rp_read_local_oob_ext_data *rp = data;
1391
1392         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1393
1394         return rp->status;
1395 }
1396
1397 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1398                                     struct sk_buff *skb)
1399 {
1400         struct hci_ev_status *rp = data;
1401         bdaddr_t *sent;
1402
1403         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1404
1405         if (rp->status)
1406                 return rp->status;
1407
1408         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1409         if (!sent)
1410                 return rp->status;
1411
1412         hci_dev_lock(hdev);
1413
1414         bacpy(&hdev->random_addr, sent);
1415
1416         if (!bacmp(&hdev->rpa, sent)) {
1417                 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1418                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1419                                    secs_to_jiffies(hdev->rpa_timeout));
1420         }
1421
1422         hci_dev_unlock(hdev);
1423
1424         return rp->status;
1425 }
1426
1427 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1428                                     struct sk_buff *skb)
1429 {
1430         struct hci_ev_status *rp = data;
1431         struct hci_cp_le_set_default_phy *cp;
1432
1433         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1434
1435         if (rp->status)
1436                 return rp->status;
1437
1438         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1439         if (!cp)
1440                 return rp->status;
1441
1442         hci_dev_lock(hdev);
1443
1444         hdev->le_tx_def_phys = cp->tx_phys;
1445         hdev->le_rx_def_phys = cp->rx_phys;
1446
1447         hci_dev_unlock(hdev);
1448
1449         return rp->status;
1450 }
1451
1452 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1453                                             struct sk_buff *skb)
1454 {
1455         struct hci_ev_status *rp = data;
1456         struct hci_cp_le_set_adv_set_rand_addr *cp;
1457         struct adv_info *adv;
1458
1459         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1460
1461         if (rp->status)
1462                 return rp->status;
1463
1464         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1465         /* Update only in case the adv instance since handle 0x00 shall be using
1466          * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1467          * non-extended adverting.
1468          */
1469         if (!cp || !cp->handle)
1470                 return rp->status;
1471
1472         hci_dev_lock(hdev);
1473
1474         adv = hci_find_adv_instance(hdev, cp->handle);
1475         if (adv) {
1476                 bacpy(&adv->random_addr, &cp->bdaddr);
1477                 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1478                         adv->rpa_expired = false;
1479                         queue_delayed_work(hdev->workqueue,
1480                                            &adv->rpa_expired_cb,
1481                                            secs_to_jiffies(hdev->rpa_timeout));
1482                 }
1483         }
1484
1485         hci_dev_unlock(hdev);
1486
1487         return rp->status;
1488 }
1489
1490 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1491                                    struct sk_buff *skb)
1492 {
1493         struct hci_ev_status *rp = data;
1494         u8 *instance;
1495         int err;
1496
1497         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1498
1499         if (rp->status)
1500                 return rp->status;
1501
1502         instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1503         if (!instance)
1504                 return rp->status;
1505
1506         hci_dev_lock(hdev);
1507
1508         err = hci_remove_adv_instance(hdev, *instance);
1509         if (!err)
1510                 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1511                                          *instance);
1512
1513         hci_dev_unlock(hdev);
1514
1515         return rp->status;
1516 }
1517
1518 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1519                                    struct sk_buff *skb)
1520 {
1521         struct hci_ev_status *rp = data;
1522         struct adv_info *adv, *n;
1523         int err;
1524
1525         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1526
1527         if (rp->status)
1528                 return rp->status;
1529
1530         if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1531                 return rp->status;
1532
1533         hci_dev_lock(hdev);
1534
1535         list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1536                 u8 instance = adv->instance;
1537
1538                 err = hci_remove_adv_instance(hdev, instance);
1539                 if (!err)
1540                         mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1541                                                  hdev, instance);
1542         }
1543
1544         hci_dev_unlock(hdev);
1545
1546         return rp->status;
1547 }
1548
1549 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1550                                         struct sk_buff *skb)
1551 {
1552         struct hci_rp_le_read_transmit_power *rp = data;
1553
1554         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1555
1556         if (rp->status)
1557                 return rp->status;
1558
1559         hdev->min_le_tx_power = rp->min_le_tx_power;
1560         hdev->max_le_tx_power = rp->max_le_tx_power;
1561
1562         return rp->status;
1563 }
1564
1565 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1566                                      struct sk_buff *skb)
1567 {
1568         struct hci_ev_status *rp = data;
1569         struct hci_cp_le_set_privacy_mode *cp;
1570         struct hci_conn_params *params;
1571
1572         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1573
1574         if (rp->status)
1575                 return rp->status;
1576
1577         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1578         if (!cp)
1579                 return rp->status;
1580
1581         hci_dev_lock(hdev);
1582
1583         params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1584         if (params)
1585                 WRITE_ONCE(params->privacy_mode, cp->mode);
1586
1587         hci_dev_unlock(hdev);
1588
1589         return rp->status;
1590 }
1591
1592 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1593                                    struct sk_buff *skb)
1594 {
1595         struct hci_ev_status *rp = data;
1596         __u8 *sent;
1597
1598         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1599
1600         if (rp->status)
1601                 return rp->status;
1602
1603         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1604         if (!sent)
1605                 return rp->status;
1606
1607         hci_dev_lock(hdev);
1608
1609         /* If we're doing connection initiation as peripheral. Set a
1610          * timeout in case something goes wrong.
1611          */
1612         if (*sent) {
1613                 struct hci_conn *conn;
1614
1615                 hci_dev_set_flag(hdev, HCI_LE_ADV);
1616
1617                 conn = hci_lookup_le_connect(hdev);
1618                 if (conn)
1619                         queue_delayed_work(hdev->workqueue,
1620                                            &conn->le_conn_timeout,
1621                                            conn->conn_timeout);
1622         } else {
1623                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1624         }
1625
1626         hci_dev_unlock(hdev);
1627
1628         return rp->status;
1629 }
1630
1631 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1632                                        struct sk_buff *skb)
1633 {
1634         struct hci_cp_le_set_ext_adv_enable *cp;
1635         struct hci_cp_ext_adv_set *set;
1636         struct adv_info *adv = NULL, *n;
1637         struct hci_ev_status *rp = data;
1638
1639         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1640
1641         if (rp->status)
1642                 return rp->status;
1643
1644         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1645         if (!cp)
1646                 return rp->status;
1647
1648         set = (void *)cp->data;
1649
1650         hci_dev_lock(hdev);
1651
1652         if (cp->num_of_sets)
1653                 adv = hci_find_adv_instance(hdev, set->handle);
1654
1655         if (cp->enable) {
1656                 struct hci_conn *conn;
1657
1658                 hci_dev_set_flag(hdev, HCI_LE_ADV);
1659
1660                 if (adv && !adv->periodic)
1661                         adv->enabled = true;
1662
1663                 conn = hci_lookup_le_connect(hdev);
1664                 if (conn)
1665                         queue_delayed_work(hdev->workqueue,
1666                                            &conn->le_conn_timeout,
1667                                            conn->conn_timeout);
1668         } else {
1669                 if (cp->num_of_sets) {
1670                         if (adv)
1671                                 adv->enabled = false;
1672
1673                         /* If just one instance was disabled check if there are
1674                          * any other instance enabled before clearing HCI_LE_ADV
1675                          */
1676                         list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1677                                                  list) {
1678                                 if (adv->enabled)
1679                                         goto unlock;
1680                         }
1681                 } else {
1682                         /* All instances shall be considered disabled */
1683                         list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1684                                                  list)
1685                                 adv->enabled = false;
1686                 }
1687
1688                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1689         }
1690
1691 unlock:
1692         hci_dev_unlock(hdev);
1693         return rp->status;
1694 }
1695
1696 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1697                                    struct sk_buff *skb)
1698 {
1699         struct hci_cp_le_set_scan_param *cp;
1700         struct hci_ev_status *rp = data;
1701
1702         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1703
1704         if (rp->status)
1705                 return rp->status;
1706
1707         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1708         if (!cp)
1709                 return rp->status;
1710
1711         hci_dev_lock(hdev);
1712
1713         hdev->le_scan_type = cp->type;
1714
1715         hci_dev_unlock(hdev);
1716
1717         return rp->status;
1718 }
1719
1720 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1721                                        struct sk_buff *skb)
1722 {
1723         struct hci_cp_le_set_ext_scan_params *cp;
1724         struct hci_ev_status *rp = data;
1725         struct hci_cp_le_scan_phy_params *phy_param;
1726
1727         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1728
1729         if (rp->status)
1730                 return rp->status;
1731
1732         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1733         if (!cp)
1734                 return rp->status;
1735
1736         phy_param = (void *)cp->data;
1737
1738         hci_dev_lock(hdev);
1739
1740         hdev->le_scan_type = phy_param->type;
1741
1742         hci_dev_unlock(hdev);
1743
1744         return rp->status;
1745 }
1746
1747 static bool has_pending_adv_report(struct hci_dev *hdev)
1748 {
1749         struct discovery_state *d = &hdev->discovery;
1750
1751         return bacmp(&d->last_adv_addr, BDADDR_ANY);
1752 }
1753
1754 static void clear_pending_adv_report(struct hci_dev *hdev)
1755 {
1756         struct discovery_state *d = &hdev->discovery;
1757
1758         bacpy(&d->last_adv_addr, BDADDR_ANY);
1759         d->last_adv_data_len = 0;
1760 }
1761
1762 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1763                                      u8 bdaddr_type, s8 rssi, u32 flags,
1764                                      u8 *data, u8 len)
1765 {
1766         struct discovery_state *d = &hdev->discovery;
1767
1768         if (len > max_adv_len(hdev))
1769                 return;
1770
1771         bacpy(&d->last_adv_addr, bdaddr);
1772         d->last_adv_addr_type = bdaddr_type;
1773         d->last_adv_rssi = rssi;
1774         d->last_adv_flags = flags;
1775         memcpy(d->last_adv_data, data, len);
1776         d->last_adv_data_len = len;
1777 }
1778
1779 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1780 {
1781         hci_dev_lock(hdev);
1782
1783         switch (enable) {
1784         case LE_SCAN_ENABLE:
1785                 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1786                 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1787                         clear_pending_adv_report(hdev);
1788                 if (hci_dev_test_flag(hdev, HCI_MESH))
1789                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1790                 break;
1791
1792         case LE_SCAN_DISABLE:
1793                 /* We do this here instead of when setting DISCOVERY_STOPPED
1794                  * since the latter would potentially require waiting for
1795                  * inquiry to stop too.
1796                  */
1797                 if (has_pending_adv_report(hdev)) {
1798                         struct discovery_state *d = &hdev->discovery;
1799
1800                         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1801                                           d->last_adv_addr_type, NULL,
1802                                           d->last_adv_rssi, d->last_adv_flags,
1803                                           d->last_adv_data,
1804                                           d->last_adv_data_len, NULL, 0, 0);
1805                 }
1806
1807                 /* Cancel this timer so that we don't try to disable scanning
1808                  * when it's already disabled.
1809                  */
1810                 cancel_delayed_work(&hdev->le_scan_disable);
1811
1812                 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1813
1814                 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1815                  * interrupted scanning due to a connect request. Mark
1816                  * therefore discovery as stopped.
1817                  */
1818                 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1819 #ifndef TIZEN_BT /* The below line is kernel bug. */
1820                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1821 #else
1822                         hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
1823 #endif
1824                 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1825                          hdev->discovery.state == DISCOVERY_FINDING)
1826                         queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1827
1828                 break;
1829
1830         default:
1831                 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1832                            enable);
1833                 break;
1834         }
1835
1836         hci_dev_unlock(hdev);
1837 }
1838
1839 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1840                                     struct sk_buff *skb)
1841 {
1842         struct hci_cp_le_set_scan_enable *cp;
1843         struct hci_ev_status *rp = data;
1844
1845         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1846
1847         if (rp->status)
1848                 return rp->status;
1849
1850         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1851         if (!cp)
1852                 return rp->status;
1853
1854         le_set_scan_enable_complete(hdev, cp->enable);
1855
1856         return rp->status;
1857 }
1858
1859 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1860                                         struct sk_buff *skb)
1861 {
1862         struct hci_cp_le_set_ext_scan_enable *cp;
1863         struct hci_ev_status *rp = data;
1864
1865         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1866
1867         if (rp->status)
1868                 return rp->status;
1869
1870         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1871         if (!cp)
1872                 return rp->status;
1873
1874         le_set_scan_enable_complete(hdev, cp->enable);
1875
1876         return rp->status;
1877 }
1878
1879 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1880                                       struct sk_buff *skb)
1881 {
1882         struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1883
1884         bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1885                    rp->num_of_sets);
1886
1887         if (rp->status)
1888                 return rp->status;
1889
1890         hdev->le_num_of_adv_sets = rp->num_of_sets;
1891
1892         return rp->status;
1893 }
1894
1895 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1896                                           struct sk_buff *skb)
1897 {
1898         struct hci_rp_le_read_accept_list_size *rp = data;
1899
1900         bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1901
1902         if (rp->status)
1903                 return rp->status;
1904
1905         hdev->le_accept_list_size = rp->size;
1906
1907         return rp->status;
1908 }
1909
1910 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1911                                       struct sk_buff *skb)
1912 {
1913         struct hci_ev_status *rp = data;
1914
1915         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1916
1917         if (rp->status)
1918                 return rp->status;
1919
1920         hci_dev_lock(hdev);
1921         hci_bdaddr_list_clear(&hdev->le_accept_list);
1922         hci_dev_unlock(hdev);
1923
1924         return rp->status;
1925 }
1926
1927 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1928                                        struct sk_buff *skb)
1929 {
1930         struct hci_cp_le_add_to_accept_list *sent;
1931         struct hci_ev_status *rp = data;
1932
1933         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1934
1935         if (rp->status)
1936                 return rp->status;
1937
1938         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1939         if (!sent)
1940                 return rp->status;
1941
1942         hci_dev_lock(hdev);
1943         hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1944                             sent->bdaddr_type);
1945         hci_dev_unlock(hdev);
1946
1947         return rp->status;
1948 }
1949
1950 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1951                                          struct sk_buff *skb)
1952 {
1953         struct hci_cp_le_del_from_accept_list *sent;
1954         struct hci_ev_status *rp = data;
1955
1956         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1957
1958         if (rp->status)
1959                 return rp->status;
1960
1961         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1962         if (!sent)
1963                 return rp->status;
1964
1965         hci_dev_lock(hdev);
1966         hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1967                             sent->bdaddr_type);
1968         hci_dev_unlock(hdev);
1969
1970         return rp->status;
1971 }
1972
1973 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1974                                           struct sk_buff *skb)
1975 {
1976         struct hci_rp_le_read_supported_states *rp = data;
1977
1978         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1979
1980         if (rp->status)
1981                 return rp->status;
1982
1983         memcpy(hdev->le_states, rp->le_states, 8);
1984
1985         return rp->status;
1986 }
1987
1988 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1989                                       struct sk_buff *skb)
1990 {
1991         struct hci_rp_le_read_def_data_len *rp = data;
1992
1993         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1994
1995         if (rp->status)
1996                 return rp->status;
1997
1998         hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1999         hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
2000
2001         return rp->status;
2002 }
2003
2004 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
2005                                        struct sk_buff *skb)
2006 {
2007         struct hci_cp_le_write_def_data_len *sent;
2008         struct hci_ev_status *rp = data;
2009
2010         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2011
2012         if (rp->status)
2013                 return rp->status;
2014
2015         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
2016         if (!sent)
2017                 return rp->status;
2018
2019         hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
2020         hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
2021
2022         return rp->status;
2023 }
2024
2025 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
2026                                        struct sk_buff *skb)
2027 {
2028         struct hci_cp_le_add_to_resolv_list *sent;
2029         struct hci_ev_status *rp = data;
2030
2031         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2032
2033         if (rp->status)
2034                 return rp->status;
2035
2036         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
2037         if (!sent)
2038                 return rp->status;
2039
2040         hci_dev_lock(hdev);
2041         hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2042                                 sent->bdaddr_type, sent->peer_irk,
2043                                 sent->local_irk);
2044         hci_dev_unlock(hdev);
2045
2046         return rp->status;
2047 }
2048
2049 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2050                                          struct sk_buff *skb)
2051 {
2052         struct hci_cp_le_del_from_resolv_list *sent;
2053         struct hci_ev_status *rp = data;
2054
2055         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2056
2057         if (rp->status)
2058                 return rp->status;
2059
2060         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2061         if (!sent)
2062                 return rp->status;
2063
2064         hci_dev_lock(hdev);
2065         hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2066                             sent->bdaddr_type);
2067         hci_dev_unlock(hdev);
2068
2069         return rp->status;
2070 }
2071
2072 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2073                                       struct sk_buff *skb)
2074 {
2075         struct hci_ev_status *rp = data;
2076
2077         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2078
2079         if (rp->status)
2080                 return rp->status;
2081
2082         hci_dev_lock(hdev);
2083         hci_bdaddr_list_clear(&hdev->le_resolv_list);
2084         hci_dev_unlock(hdev);
2085
2086         return rp->status;
2087 }
2088
2089 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2090                                           struct sk_buff *skb)
2091 {
2092         struct hci_rp_le_read_resolv_list_size *rp = data;
2093
2094         bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2095
2096         if (rp->status)
2097                 return rp->status;
2098
2099         hdev->le_resolv_list_size = rp->size;
2100
2101         return rp->status;
2102 }
2103
2104 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2105                                                struct sk_buff *skb)
2106 {
2107         struct hci_ev_status *rp = data;
2108         __u8 *sent;
2109
2110         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2111
2112         if (rp->status)
2113                 return rp->status;
2114
2115         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2116         if (!sent)
2117                 return rp->status;
2118
2119         hci_dev_lock(hdev);
2120
2121         if (*sent)
2122                 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2123         else
2124                 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2125
2126         hci_dev_unlock(hdev);
2127
2128         return rp->status;
2129 }
2130
2131 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2132                                       struct sk_buff *skb)
2133 {
2134         struct hci_rp_le_read_max_data_len *rp = data;
2135
2136         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2137
2138         if (rp->status)
2139                 return rp->status;
2140
2141         hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2142         hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2143         hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2144         hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2145
2146         return rp->status;
2147 }
2148
2149 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2150                                          struct sk_buff *skb)
2151 {
2152         struct hci_cp_write_le_host_supported *sent;
2153         struct hci_ev_status *rp = data;
2154
2155         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2156
2157         if (rp->status)
2158                 return rp->status;
2159
2160         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2161         if (!sent)
2162                 return rp->status;
2163
2164         hci_dev_lock(hdev);
2165
2166         if (sent->le) {
2167                 hdev->features[1][0] |= LMP_HOST_LE;
2168                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2169         } else {
2170                 hdev->features[1][0] &= ~LMP_HOST_LE;
2171                 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2172                 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2173         }
2174
2175         if (sent->simul)
2176                 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2177         else
2178                 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2179
2180         hci_dev_unlock(hdev);
2181
2182         return rp->status;
2183 }
2184
2185 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2186                                struct sk_buff *skb)
2187 {
2188         struct hci_cp_le_set_adv_param *cp;
2189         struct hci_ev_status *rp = data;
2190
2191         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2192
2193         if (rp->status)
2194                 return rp->status;
2195
2196         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2197         if (!cp)
2198                 return rp->status;
2199
2200         hci_dev_lock(hdev);
2201         hdev->adv_addr_type = cp->own_address_type;
2202         hci_dev_unlock(hdev);
2203
2204         return rp->status;
2205 }
2206
2207 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2208                                    struct sk_buff *skb)
2209 {
2210         struct hci_rp_le_set_ext_adv_params *rp = data;
2211         struct hci_cp_le_set_ext_adv_params *cp;
2212         struct adv_info *adv_instance;
2213
2214         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2215
2216         if (rp->status)
2217                 return rp->status;
2218
2219         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2220         if (!cp)
2221                 return rp->status;
2222
2223         hci_dev_lock(hdev);
2224         hdev->adv_addr_type = cp->own_addr_type;
2225         if (!cp->handle) {
2226                 /* Store in hdev for instance 0 */
2227                 hdev->adv_tx_power = rp->tx_power;
2228         } else {
2229                 adv_instance = hci_find_adv_instance(hdev, cp->handle);
2230                 if (adv_instance)
2231                         adv_instance->tx_power = rp->tx_power;
2232         }
2233         /* Update adv data as tx power is known now */
2234         hci_update_adv_data(hdev, cp->handle);
2235
2236         hci_dev_unlock(hdev);
2237
2238         return rp->status;
2239 }
2240
2241 #ifdef TIZEN_BT
2242 static u8 hci_cc_enable_rssi(struct hci_dev *hdev, void *data,
2243                              struct sk_buff *skb)
2244 {
2245         struct hci_cc_rsp_enable_rssi *rp = data;
2246
2247         BT_DBG("hci_cc_enable_rssi - %s status 0x%2.2x Event_LE_ext_Opcode 0x%2.2x",
2248                hdev->name, rp->status, rp->le_ext_opcode);
2249
2250         mgmt_enable_rssi_cc(hdev, rp, rp->status);
2251
2252         return rp->status;
2253 }
2254
2255 static u8 hci_cc_get_raw_rssi(struct hci_dev *hdev, void *data,
2256                               struct sk_buff *skb)
2257 {
2258         struct hci_cc_rp_get_raw_rssi *rp = data;
2259
2260         BT_DBG("hci_cc_get_raw_rssi- %s Get Raw Rssi Response[%2.2x %4.4x %2.2X]",
2261                hdev->name, rp->status, rp->conn_handle, rp->rssi_dbm);
2262
2263         mgmt_raw_rssi_response(hdev, rp, rp->status);
2264
2265         return rp->status;
2266 }
2267
2268 static void hci_vendor_ext_rssi_link_alert_evt(struct hci_dev *hdev,
2269                                                struct sk_buff *skb)
2270 {
2271         struct hci_ev_vendor_specific_rssi_alert *ev = (void *)skb->data;
2272
2273         BT_DBG("RSSI event LE_RSSI_LINK_ALERT %X", LE_RSSI_LINK_ALERT);
2274
2275         mgmt_rssi_alert_evt(hdev, ev->conn_handle, ev->alert_type,
2276                             ev->rssi_dbm);
2277 }
2278
2279 static void hci_vendor_specific_group_ext_evt(struct hci_dev *hdev,
2280                                               struct sk_buff *skb)
2281 {
2282         struct hci_ev_ext_vendor_specific *ev = (void *)skb->data;
2283         __u8 event_le_ext_sub_code;
2284
2285         BT_DBG("RSSI event LE_META_VENDOR_SPECIFIC_GROUP_EVENT: %X",
2286                LE_META_VENDOR_SPECIFIC_GROUP_EVENT);
2287
2288         skb_pull(skb, sizeof(*ev));
2289         event_le_ext_sub_code = ev->event_le_ext_sub_code;
2290
2291         switch (event_le_ext_sub_code) {
2292         case LE_RSSI_LINK_ALERT:
2293                 hci_vendor_ext_rssi_link_alert_evt(hdev, skb);
2294                 break;
2295
2296         default:
2297                 break;
2298         }
2299 }
2300
2301 static void hci_vendor_specific_evt(struct hci_dev *hdev, void *data,
2302                                     struct sk_buff *skb)
2303 {
2304         struct hci_ev_vendor_specific *ev = (void *)skb->data;
2305         __u8 event_sub_code;
2306
2307         BT_DBG("hci_vendor_specific_evt");
2308
2309         skb_pull(skb, sizeof(*ev));
2310         event_sub_code = ev->event_sub_code;
2311
2312         switch (event_sub_code) {
2313         case LE_META_VENDOR_SPECIFIC_GROUP_EVENT:
2314                 hci_vendor_specific_group_ext_evt(hdev, skb);
2315                 break;
2316
2317         default:
2318                 break;
2319         }
2320 }
2321 #endif
2322
2323 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2324                            struct sk_buff *skb)
2325 {
2326         struct hci_rp_read_rssi *rp = data;
2327         struct hci_conn *conn;
2328
2329         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2330
2331         if (rp->status)
2332                 return rp->status;
2333
2334         hci_dev_lock(hdev);
2335
2336         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2337         if (conn)
2338                 conn->rssi = rp->rssi;
2339
2340         hci_dev_unlock(hdev);
2341
2342         return rp->status;
2343 }
2344
2345 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2346                                struct sk_buff *skb)
2347 {
2348         struct hci_cp_read_tx_power *sent;
2349         struct hci_rp_read_tx_power *rp = data;
2350         struct hci_conn *conn;
2351
2352         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2353
2354         if (rp->status)
2355                 return rp->status;
2356
2357         sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2358         if (!sent)
2359                 return rp->status;
2360
2361         hci_dev_lock(hdev);
2362
2363         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2364         if (!conn)
2365                 goto unlock;
2366
2367         switch (sent->type) {
2368         case 0x00:
2369                 conn->tx_power = rp->tx_power;
2370                 break;
2371         case 0x01:
2372                 conn->max_tx_power = rp->tx_power;
2373                 break;
2374         }
2375
2376 unlock:
2377         hci_dev_unlock(hdev);
2378         return rp->status;
2379 }
2380
2381 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2382                                       struct sk_buff *skb)
2383 {
2384         struct hci_ev_status *rp = data;
2385         u8 *mode;
2386
2387         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2388
2389         if (rp->status)
2390                 return rp->status;
2391
2392         mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2393         if (mode)
2394                 hdev->ssp_debug_mode = *mode;
2395
2396         return rp->status;
2397 }
2398
2399 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2400 {
2401         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2402
2403         if (status) {
2404                 hci_conn_check_pending(hdev);
2405                 return;
2406         }
2407
2408         if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
2409                 set_bit(HCI_INQUIRY, &hdev->flags);
2410 }
2411
2412 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2413 {
2414         struct hci_cp_create_conn *cp;
2415         struct hci_conn *conn;
2416
2417         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2418
2419         cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2420         if (!cp)
2421                 return;
2422
2423         hci_dev_lock(hdev);
2424
2425         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2426
2427         bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2428
2429         if (status) {
2430                 if (conn && conn->state == BT_CONNECT) {
2431                         if (status != 0x0c || conn->attempt > 2) {
2432                                 conn->state = BT_CLOSED;
2433                                 hci_connect_cfm(conn, status);
2434                                 hci_conn_del(conn);
2435                         } else
2436                                 conn->state = BT_CONNECT2;
2437                 }
2438         } else {
2439                 if (!conn) {
2440                         conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
2441                                                   HCI_ROLE_MASTER);
2442                         if (!conn)
2443                                 bt_dev_err(hdev, "no memory for new connection");
2444                 }
2445         }
2446
2447         hci_dev_unlock(hdev);
2448 }
2449
2450 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2451 {
2452         struct hci_cp_add_sco *cp;
2453         struct hci_conn *acl;
2454         struct hci_link *link;
2455         __u16 handle;
2456
2457         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2458
2459         if (!status)
2460                 return;
2461
2462         cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2463         if (!cp)
2464                 return;
2465
2466         handle = __le16_to_cpu(cp->handle);
2467
2468         bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2469
2470         hci_dev_lock(hdev);
2471
2472         acl = hci_conn_hash_lookup_handle(hdev, handle);
2473         if (acl) {
2474                 link = list_first_entry_or_null(&acl->link_list,
2475                                                 struct hci_link, list);
2476                 if (link && link->conn) {
2477                         link->conn->state = BT_CLOSED;
2478
2479                         hci_connect_cfm(link->conn, status);
2480                         hci_conn_del(link->conn);
2481                 }
2482         }
2483
2484         hci_dev_unlock(hdev);
2485 }
2486
2487 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2488 {
2489         struct hci_cp_auth_requested *cp;
2490         struct hci_conn *conn;
2491
2492         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2493
2494         if (!status)
2495                 return;
2496
2497         cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2498         if (!cp)
2499                 return;
2500
2501         hci_dev_lock(hdev);
2502
2503         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2504         if (conn) {
2505                 if (conn->state == BT_CONFIG) {
2506                         hci_connect_cfm(conn, status);
2507                         hci_conn_drop(conn);
2508                 }
2509         }
2510
2511         hci_dev_unlock(hdev);
2512 }
2513
2514 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2515 {
2516         struct hci_cp_set_conn_encrypt *cp;
2517         struct hci_conn *conn;
2518
2519         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2520
2521         if (!status)
2522                 return;
2523
2524         cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2525         if (!cp)
2526                 return;
2527
2528         hci_dev_lock(hdev);
2529
2530         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2531         if (conn) {
2532                 if (conn->state == BT_CONFIG) {
2533                         hci_connect_cfm(conn, status);
2534                         hci_conn_drop(conn);
2535                 }
2536         }
2537
2538         hci_dev_unlock(hdev);
2539 }
2540
2541 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2542                                     struct hci_conn *conn)
2543 {
2544         if (conn->state != BT_CONFIG || !conn->out)
2545                 return 0;
2546
2547         if (conn->pending_sec_level == BT_SECURITY_SDP)
2548                 return 0;
2549
2550         /* Only request authentication for SSP connections or non-SSP
2551          * devices with sec_level MEDIUM or HIGH or if MITM protection
2552          * is requested.
2553          */
2554         if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2555             conn->pending_sec_level != BT_SECURITY_FIPS &&
2556             conn->pending_sec_level != BT_SECURITY_HIGH &&
2557             conn->pending_sec_level != BT_SECURITY_MEDIUM)
2558                 return 0;
2559
2560         return 1;
2561 }
2562
2563 static int hci_resolve_name(struct hci_dev *hdev,
2564                                    struct inquiry_entry *e)
2565 {
2566         struct hci_cp_remote_name_req cp;
2567
2568         memset(&cp, 0, sizeof(cp));
2569
2570         bacpy(&cp.bdaddr, &e->data.bdaddr);
2571         cp.pscan_rep_mode = e->data.pscan_rep_mode;
2572         cp.pscan_mode = e->data.pscan_mode;
2573         cp.clock_offset = e->data.clock_offset;
2574
2575         return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2576 }
2577
2578 static bool hci_resolve_next_name(struct hci_dev *hdev)
2579 {
2580         struct discovery_state *discov = &hdev->discovery;
2581         struct inquiry_entry *e;
2582
2583         if (list_empty(&discov->resolve))
2584                 return false;
2585
2586         /* We should stop if we already spent too much time resolving names. */
2587         if (time_after(jiffies, discov->name_resolve_timeout)) {
2588                 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2589                 return false;
2590         }
2591
2592         e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2593         if (!e)
2594                 return false;
2595
2596         if (hci_resolve_name(hdev, e) == 0) {
2597                 e->name_state = NAME_PENDING;
2598                 return true;
2599         }
2600
2601         return false;
2602 }
2603
2604 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2605                                    bdaddr_t *bdaddr, u8 *name, u8 name_len)
2606 {
2607         struct discovery_state *discov = &hdev->discovery;
2608         struct inquiry_entry *e;
2609
2610 #ifdef TIZEN_BT
2611         /* Update the mgmt connected state if necessary. Be careful with
2612          * conn objects that exist but are not (yet) connected however.
2613          * Only those in BT_CONFIG or BT_CONNECTED states can be
2614          * considered connected.
2615          */
2616         if (conn &&
2617             (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) {
2618                 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2619                         mgmt_device_connected(hdev, conn, name, name_len);
2620                 else
2621                         mgmt_device_name_update(hdev, bdaddr, name, name_len);
2622         }
2623 #else
2624         if (conn &&
2625             (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2626             !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2627                 mgmt_device_connected(hdev, conn, name, name_len);
2628 #endif
2629
2630         if (discov->state == DISCOVERY_STOPPED)
2631                 return;
2632
2633         if (discov->state == DISCOVERY_STOPPING)
2634                 goto discov_complete;
2635
2636         if (discov->state != DISCOVERY_RESOLVING)
2637                 return;
2638
2639         e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2640         /* If the device was not found in a list of found devices names of which
2641          * are pending. there is no need to continue resolving a next name as it
2642          * will be done upon receiving another Remote Name Request Complete
2643          * Event */
2644         if (!e)
2645                 return;
2646
2647         list_del(&e->list);
2648
2649         e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2650         mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2651                          name, name_len);
2652
2653         if (hci_resolve_next_name(hdev))
2654                 return;
2655
2656 discov_complete:
2657         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2658 }
2659
2660 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2661 {
2662         struct hci_cp_remote_name_req *cp;
2663         struct hci_conn *conn;
2664
2665         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2666
2667         /* If successful wait for the name req complete event before
2668          * checking for the need to do authentication */
2669         if (!status)
2670                 return;
2671
2672         cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2673         if (!cp)
2674                 return;
2675
2676         hci_dev_lock(hdev);
2677
2678         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2679
2680         if (hci_dev_test_flag(hdev, HCI_MGMT))
2681                 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2682
2683         if (!conn)
2684                 goto unlock;
2685
2686         if (!hci_outgoing_auth_needed(hdev, conn))
2687                 goto unlock;
2688
2689         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2690                 struct hci_cp_auth_requested auth_cp;
2691
2692                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2693
2694                 auth_cp.handle = __cpu_to_le16(conn->handle);
2695                 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2696                              sizeof(auth_cp), &auth_cp);
2697         }
2698
2699 unlock:
2700         hci_dev_unlock(hdev);
2701 }
2702
2703 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2704 {
2705         struct hci_cp_read_remote_features *cp;
2706         struct hci_conn *conn;
2707
2708         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2709
2710         if (!status)
2711                 return;
2712
2713         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2714         if (!cp)
2715                 return;
2716
2717         hci_dev_lock(hdev);
2718
2719         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2720         if (conn) {
2721                 if (conn->state == BT_CONFIG) {
2722                         hci_connect_cfm(conn, status);
2723                         hci_conn_drop(conn);
2724                 }
2725         }
2726
2727         hci_dev_unlock(hdev);
2728 }
2729
2730 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2731 {
2732         struct hci_cp_read_remote_ext_features *cp;
2733         struct hci_conn *conn;
2734
2735         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2736
2737         if (!status)
2738                 return;
2739
2740         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2741         if (!cp)
2742                 return;
2743
2744         hci_dev_lock(hdev);
2745
2746         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2747         if (conn) {
2748                 if (conn->state == BT_CONFIG) {
2749                         hci_connect_cfm(conn, status);
2750                         hci_conn_drop(conn);
2751                 }
2752         }
2753
2754         hci_dev_unlock(hdev);
2755 }
2756
2757 static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle,
2758                                        __u8 status)
2759 {
2760         struct hci_conn *acl;
2761         struct hci_link *link;
2762
2763         bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status);
2764
2765         hci_dev_lock(hdev);
2766
2767         acl = hci_conn_hash_lookup_handle(hdev, handle);
2768         if (acl) {
2769                 link = list_first_entry_or_null(&acl->link_list,
2770                                                 struct hci_link, list);
2771                 if (link && link->conn) {
2772                         link->conn->state = BT_CLOSED;
2773
2774                         hci_connect_cfm(link->conn, status);
2775                         hci_conn_del(link->conn);
2776                 }
2777         }
2778
2779         hci_dev_unlock(hdev);
2780 }
2781
2782 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2783 {
2784         struct hci_cp_setup_sync_conn *cp;
2785
2786         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2787
2788         if (!status)
2789                 return;
2790
2791         cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2792         if (!cp)
2793                 return;
2794
2795         hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2796 }
2797
2798 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2799 {
2800         struct hci_cp_enhanced_setup_sync_conn *cp;
2801
2802         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2803
2804         if (!status)
2805                 return;
2806
2807         cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2808         if (!cp)
2809                 return;
2810
2811         hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2812 }
2813
2814 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2815 {
2816         struct hci_cp_sniff_mode *cp;
2817         struct hci_conn *conn;
2818
2819         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2820
2821         if (!status)
2822                 return;
2823
2824         cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2825         if (!cp)
2826                 return;
2827
2828         hci_dev_lock(hdev);
2829
2830         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2831         if (conn) {
2832                 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2833
2834                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2835                         hci_sco_setup(conn, status);
2836         }
2837
2838         hci_dev_unlock(hdev);
2839 }
2840
2841 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2842 {
2843         struct hci_cp_exit_sniff_mode *cp;
2844         struct hci_conn *conn;
2845
2846         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2847
2848         if (!status)
2849                 return;
2850
2851         cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2852         if (!cp)
2853                 return;
2854
2855         hci_dev_lock(hdev);
2856
2857         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2858         if (conn) {
2859                 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2860
2861                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2862                         hci_sco_setup(conn, status);
2863         }
2864
2865         hci_dev_unlock(hdev);
2866 }
2867
2868 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2869 {
2870         struct hci_cp_disconnect *cp;
2871         struct hci_conn_params *params;
2872         struct hci_conn *conn;
2873         bool mgmt_conn;
2874
2875         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2876
2877         /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2878          * otherwise cleanup the connection immediately.
2879          */
2880         if (!status && !hdev->suspended)
2881                 return;
2882
2883         cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2884         if (!cp)
2885                 return;
2886
2887         hci_dev_lock(hdev);
2888
2889         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2890         if (!conn)
2891                 goto unlock;
2892
2893         if (status) {
2894                 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2895                                        conn->dst_type, status);
2896
2897                 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2898                         hdev->cur_adv_instance = conn->adv_instance;
2899                         hci_enable_advertising(hdev);
2900                 }
2901
2902                 /* Inform sockets conn is gone before we delete it */
2903                 hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
2904
2905                 goto done;
2906         }
2907
2908         mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2909
2910         if (conn->type == ACL_LINK) {
2911                 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2912                         hci_remove_link_key(hdev, &conn->dst);
2913         }
2914
2915         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2916         if (params) {
2917                 switch (params->auto_connect) {
2918                 case HCI_AUTO_CONN_LINK_LOSS:
2919                         if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2920                                 break;
2921                         fallthrough;
2922
2923                 case HCI_AUTO_CONN_DIRECT:
2924                 case HCI_AUTO_CONN_ALWAYS:
2925                         hci_pend_le_list_del_init(params);
2926                         hci_pend_le_list_add(params, &hdev->pend_le_conns);
2927                         break;
2928
2929                 default:
2930                         break;
2931                 }
2932         }
2933
2934         mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2935                                  cp->reason, mgmt_conn);
2936
2937         hci_disconn_cfm(conn, cp->reason);
2938
2939 done:
2940         /* If the disconnection failed for any reason, the upper layer
2941          * does not retry to disconnect in current implementation.
2942          * Hence, we need to do some basic cleanup here and re-enable
2943          * advertising if necessary.
2944          */
2945         hci_conn_del(conn);
2946 unlock:
2947         hci_dev_unlock(hdev);
2948 }
2949
2950 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2951 {
2952         /* When using controller based address resolution, then the new
2953          * address types 0x02 and 0x03 are used. These types need to be
2954          * converted back into either public address or random address type
2955          */
2956         switch (type) {
2957         case ADDR_LE_DEV_PUBLIC_RESOLVED:
2958                 if (resolved)
2959                         *resolved = true;
2960                 return ADDR_LE_DEV_PUBLIC;
2961         case ADDR_LE_DEV_RANDOM_RESOLVED:
2962                 if (resolved)
2963                         *resolved = true;
2964                 return ADDR_LE_DEV_RANDOM;
2965         }
2966
2967         if (resolved)
2968                 *resolved = false;
2969         return type;
2970 }
2971
2972 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2973                               u8 peer_addr_type, u8 own_address_type,
2974                               u8 filter_policy)
2975 {
2976         struct hci_conn *conn;
2977
2978         conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2979                                        peer_addr_type);
2980         if (!conn)
2981                 return;
2982
2983         own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2984
2985         /* Store the initiator and responder address information which
2986          * is needed for SMP. These values will not change during the
2987          * lifetime of the connection.
2988          */
2989         conn->init_addr_type = own_address_type;
2990         if (own_address_type == ADDR_LE_DEV_RANDOM)
2991                 bacpy(&conn->init_addr, &hdev->random_addr);
2992         else
2993                 bacpy(&conn->init_addr, &hdev->bdaddr);
2994
2995         conn->resp_addr_type = peer_addr_type;
2996         bacpy(&conn->resp_addr, peer_addr);
2997 }
2998
2999 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
3000 {
3001         struct hci_cp_le_create_conn *cp;
3002
3003         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3004
3005         /* All connection failure handling is taken care of by the
3006          * hci_conn_failed function which is triggered by the HCI
3007          * request completion callbacks used for connecting.
3008          */
3009         if (status)
3010                 return;
3011
3012         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
3013         if (!cp)
3014                 return;
3015
3016         hci_dev_lock(hdev);
3017
3018         cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
3019                           cp->own_address_type, cp->filter_policy);
3020
3021         hci_dev_unlock(hdev);
3022 }
3023
3024 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
3025 {
3026         struct hci_cp_le_ext_create_conn *cp;
3027
3028         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3029
3030         /* All connection failure handling is taken care of by the
3031          * hci_conn_failed function which is triggered by the HCI
3032          * request completion callbacks used for connecting.
3033          */
3034         if (status)
3035                 return;
3036
3037         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
3038         if (!cp)
3039                 return;
3040
3041         hci_dev_lock(hdev);
3042
3043         cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
3044                           cp->own_addr_type, cp->filter_policy);
3045
3046         hci_dev_unlock(hdev);
3047 }
3048
3049 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
3050 {
3051         struct hci_cp_le_read_remote_features *cp;
3052         struct hci_conn *conn;
3053
3054         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3055
3056         if (!status)
3057                 return;
3058
3059         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
3060         if (!cp)
3061                 return;
3062
3063         hci_dev_lock(hdev);
3064
3065         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3066         if (conn) {
3067                 if (conn->state == BT_CONFIG) {
3068                         hci_connect_cfm(conn, status);
3069                         hci_conn_drop(conn);
3070                 }
3071         }
3072
3073         hci_dev_unlock(hdev);
3074 }
3075
3076 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
3077 {
3078         struct hci_cp_le_start_enc *cp;
3079         struct hci_conn *conn;
3080
3081         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3082
3083         if (!status)
3084                 return;
3085
3086         hci_dev_lock(hdev);
3087
3088         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
3089         if (!cp)
3090                 goto unlock;
3091
3092         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3093         if (!conn)
3094                 goto unlock;
3095
3096         if (conn->state != BT_CONNECTED)
3097                 goto unlock;
3098
3099         hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3100         hci_conn_drop(conn);
3101
3102 unlock:
3103         hci_dev_unlock(hdev);
3104 }
3105
3106 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
3107 {
3108         struct hci_cp_switch_role *cp;
3109         struct hci_conn *conn;
3110
3111         BT_DBG("%s status 0x%2.2x", hdev->name, status);
3112
3113         if (!status)
3114                 return;
3115
3116         cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
3117         if (!cp)
3118                 return;
3119
3120         hci_dev_lock(hdev);
3121
3122         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
3123         if (conn)
3124                 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3125
3126         hci_dev_unlock(hdev);
3127 }
3128
3129 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
3130                                      struct sk_buff *skb)
3131 {
3132         struct hci_ev_status *ev = data;
3133         struct discovery_state *discov = &hdev->discovery;
3134         struct inquiry_entry *e;
3135
3136         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3137
3138         hci_conn_check_pending(hdev);
3139
3140         if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
3141                 return;
3142
3143         smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
3144         wake_up_bit(&hdev->flags, HCI_INQUIRY);
3145
3146         if (!hci_dev_test_flag(hdev, HCI_MGMT))
3147                 return;
3148
3149         hci_dev_lock(hdev);
3150
3151         if (discov->state != DISCOVERY_FINDING)
3152                 goto unlock;
3153
3154         if (list_empty(&discov->resolve)) {
3155                 /* When BR/EDR inquiry is active and no LE scanning is in
3156                  * progress, then change discovery state to indicate completion.
3157                  *
3158                  * When running LE scanning and BR/EDR inquiry simultaneously
3159                  * and the LE scan already finished, then change the discovery
3160                  * state to indicate completion.
3161                  */
3162                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3163                     !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3164                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3165                 goto unlock;
3166         }
3167
3168         e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3169         if (e && hci_resolve_name(hdev, e) == 0) {
3170                 e->name_state = NAME_PENDING;
3171                 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3172                 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3173         } else {
3174                 /* When BR/EDR inquiry is active and no LE scanning is in
3175                  * progress, then change discovery state to indicate completion.
3176                  *
3177                  * When running LE scanning and BR/EDR inquiry simultaneously
3178                  * and the LE scan already finished, then change the discovery
3179                  * state to indicate completion.
3180                  */
3181                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3182                     !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3183                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3184         }
3185
3186 unlock:
3187         hci_dev_unlock(hdev);
3188 }
3189
3190 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3191                                    struct sk_buff *skb)
3192 {
3193         struct hci_ev_inquiry_result *ev = edata;
3194         struct inquiry_data data;
3195         int i;
3196
3197         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3198                              flex_array_size(ev, info, ev->num)))
3199                 return;
3200
3201         bt_dev_dbg(hdev, "num %d", ev->num);
3202
3203         if (!ev->num)
3204                 return;
3205
3206         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3207                 return;
3208
3209         hci_dev_lock(hdev);
3210
3211         for (i = 0; i < ev->num; i++) {
3212                 struct inquiry_info *info = &ev->info[i];
3213                 u32 flags;
3214
3215                 bacpy(&data.bdaddr, &info->bdaddr);
3216                 data.pscan_rep_mode     = info->pscan_rep_mode;
3217                 data.pscan_period_mode  = info->pscan_period_mode;
3218                 data.pscan_mode         = info->pscan_mode;
3219                 memcpy(data.dev_class, info->dev_class, 3);
3220                 data.clock_offset       = info->clock_offset;
3221                 data.rssi               = HCI_RSSI_INVALID;
3222                 data.ssp_mode           = 0x00;
3223
3224                 flags = hci_inquiry_cache_update(hdev, &data, false);
3225
3226                 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3227                                   info->dev_class, HCI_RSSI_INVALID,
3228                                   flags, NULL, 0, NULL, 0, 0);
3229         }
3230
3231         hci_dev_unlock(hdev);
3232 }
3233
3234 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3235                                   struct sk_buff *skb)
3236 {
3237         struct hci_ev_conn_complete *ev = data;
3238         struct hci_conn *conn;
3239         u8 status = ev->status;
3240
3241         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3242
3243         hci_dev_lock(hdev);
3244
3245         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3246         if (!conn) {
3247                 /* In case of error status and there is no connection pending
3248                  * just unlock as there is nothing to cleanup.
3249                  */
3250                 if (ev->status)
3251                         goto unlock;
3252
3253                 /* Connection may not exist if auto-connected. Check the bredr
3254                  * allowlist to see if this device is allowed to auto connect.
3255                  * If link is an ACL type, create a connection class
3256                  * automatically.
3257                  *
3258                  * Auto-connect will only occur if the event filter is
3259                  * programmed with a given address. Right now, event filter is
3260                  * only used during suspend.
3261                  */
3262                 if (ev->link_type == ACL_LINK &&
3263                     hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3264                                                       &ev->bdaddr,
3265                                                       BDADDR_BREDR)) {
3266                         conn = hci_conn_add_unset(hdev, ev->link_type,
3267                                                   &ev->bdaddr, HCI_ROLE_SLAVE);
3268                         if (!conn) {
3269                                 bt_dev_err(hdev, "no memory for new conn");
3270                                 goto unlock;
3271                         }
3272                 } else {
3273                         if (ev->link_type != SCO_LINK)
3274                                 goto unlock;
3275
3276                         conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3277                                                        &ev->bdaddr);
3278                         if (!conn)
3279                                 goto unlock;
3280
3281                         conn->type = SCO_LINK;
3282                 }
3283         }
3284
3285         /* The HCI_Connection_Complete event is only sent once per connection.
3286          * Processing it more than once per connection can corrupt kernel memory.
3287          *
3288          * As the connection handle is set here for the first time, it indicates
3289          * whether the connection is already set up.
3290          */
3291         if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
3292                 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3293                 goto unlock;
3294         }
3295
3296         if (!status) {
3297                 status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
3298                 if (status)
3299                         goto done;
3300
3301                 if (conn->type == ACL_LINK) {
3302                         conn->state = BT_CONFIG;
3303                         hci_conn_hold(conn);
3304
3305                         if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3306                             !hci_find_link_key(hdev, &ev->bdaddr))
3307                                 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3308                         else
3309                                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3310                 } else
3311                         conn->state = BT_CONNECTED;
3312
3313                 hci_debugfs_create_conn(conn);
3314                 hci_conn_add_sysfs(conn);
3315
3316                 if (test_bit(HCI_AUTH, &hdev->flags))
3317                         set_bit(HCI_CONN_AUTH, &conn->flags);
3318
3319                 if (test_bit(HCI_ENCRYPT, &hdev->flags))
3320                         set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3321
3322                 /* Get remote features */
3323                 if (conn->type == ACL_LINK) {
3324                         struct hci_cp_read_remote_features cp;
3325                         cp.handle = ev->handle;
3326                         hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3327                                      sizeof(cp), &cp);
3328
3329                         hci_update_scan(hdev);
3330                 }
3331
3332                 /* Set packet type for incoming connection */
3333                 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3334                         struct hci_cp_change_conn_ptype cp;
3335                         cp.handle = ev->handle;
3336                         cp.pkt_type = cpu_to_le16(conn->pkt_type);
3337                         hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3338                                      &cp);
3339                 }
3340         }
3341
3342         if (conn->type == ACL_LINK)
3343                 hci_sco_setup(conn, ev->status);
3344
3345 done:
3346         if (status) {
3347                 hci_conn_failed(conn, status);
3348         } else if (ev->link_type == SCO_LINK) {
3349                 switch (conn->setting & SCO_AIRMODE_MASK) {
3350                 case SCO_AIRMODE_CVSD:
3351                         if (hdev->notify)
3352                                 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3353                         break;
3354                 }
3355
3356                 hci_connect_cfm(conn, status);
3357         }
3358
3359 unlock:
3360         hci_dev_unlock(hdev);
3361
3362         hci_conn_check_pending(hdev);
3363 }
3364
3365 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3366 {
3367         struct hci_cp_reject_conn_req cp;
3368
3369         bacpy(&cp.bdaddr, bdaddr);
3370         cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3371         hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3372 }
3373
3374 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3375                                  struct sk_buff *skb)
3376 {
3377         struct hci_ev_conn_request *ev = data;
3378         int mask = hdev->link_mode;
3379         struct inquiry_entry *ie;
3380         struct hci_conn *conn;
3381         __u8 flags = 0;
3382
3383         bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3384
3385         /* Reject incoming connection from device with same BD ADDR against
3386          * CVE-2020-26555
3387          */
3388         if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
3389                 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
3390                            &ev->bdaddr);
3391                 hci_reject_conn(hdev, &ev->bdaddr);
3392                 return;
3393         }
3394
3395         mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3396                                       &flags);
3397
3398         if (!(mask & HCI_LM_ACCEPT)) {
3399                 hci_reject_conn(hdev, &ev->bdaddr);
3400                 return;
3401         }
3402
3403         hci_dev_lock(hdev);
3404
3405         if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3406                                    BDADDR_BREDR)) {
3407                 hci_reject_conn(hdev, &ev->bdaddr);
3408                 goto unlock;
3409         }
3410
3411         /* Require HCI_CONNECTABLE or an accept list entry to accept the
3412          * connection. These features are only touched through mgmt so
3413          * only do the checks if HCI_MGMT is set.
3414          */
3415         if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3416             !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3417             !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3418                                                BDADDR_BREDR)) {
3419                 hci_reject_conn(hdev, &ev->bdaddr);
3420                 goto unlock;
3421         }
3422
3423         /* Connection accepted */
3424
3425         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3426         if (ie)
3427                 memcpy(ie->data.dev_class, ev->dev_class, 3);
3428
3429         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3430                         &ev->bdaddr);
3431         if (!conn) {
3432                 conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
3433                                           HCI_ROLE_SLAVE);
3434                 if (!conn) {
3435                         bt_dev_err(hdev, "no memory for new connection");
3436                         goto unlock;
3437                 }
3438         }
3439
3440         memcpy(conn->dev_class, ev->dev_class, 3);
3441
3442         hci_dev_unlock(hdev);
3443
3444         if (ev->link_type == ACL_LINK ||
3445             (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3446                 struct hci_cp_accept_conn_req cp;
3447                 conn->state = BT_CONNECT;
3448
3449                 bacpy(&cp.bdaddr, &ev->bdaddr);
3450
3451                 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3452                         cp.role = 0x00; /* Become central */
3453                 else
3454                         cp.role = 0x01; /* Remain peripheral */
3455
3456                 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3457         } else if (!(flags & HCI_PROTO_DEFER)) {
3458                 struct hci_cp_accept_sync_conn_req cp;
3459                 conn->state = BT_CONNECT;
3460
3461                 bacpy(&cp.bdaddr, &ev->bdaddr);
3462                 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3463
3464                 cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
3465                 cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
3466                 cp.max_latency    = cpu_to_le16(0xffff);
3467                 cp.content_format = cpu_to_le16(hdev->voice_setting);
3468                 cp.retrans_effort = 0xff;
3469
3470                 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3471                              &cp);
3472         } else {
3473                 conn->state = BT_CONNECT2;
3474                 hci_connect_cfm(conn, 0);
3475         }
3476
3477         return;
3478 unlock:
3479         hci_dev_unlock(hdev);
3480 }
3481
3482 static u8 hci_to_mgmt_reason(u8 err)
3483 {
3484         switch (err) {
3485         case HCI_ERROR_CONNECTION_TIMEOUT:
3486                 return MGMT_DEV_DISCONN_TIMEOUT;
3487         case HCI_ERROR_REMOTE_USER_TERM:
3488         case HCI_ERROR_REMOTE_LOW_RESOURCES:
3489         case HCI_ERROR_REMOTE_POWER_OFF:
3490                 return MGMT_DEV_DISCONN_REMOTE;
3491         case HCI_ERROR_LOCAL_HOST_TERM:
3492                 return MGMT_DEV_DISCONN_LOCAL_HOST;
3493         default:
3494                 return MGMT_DEV_DISCONN_UNKNOWN;
3495         }
3496 }
3497
3498 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3499                                      struct sk_buff *skb)
3500 {
3501         struct hci_ev_disconn_complete *ev = data;
3502         u8 reason;
3503         struct hci_conn_params *params;
3504         struct hci_conn *conn;
3505         bool mgmt_connected;
3506
3507         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3508
3509         hci_dev_lock(hdev);
3510
3511         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3512         if (!conn)
3513                 goto unlock;
3514
3515         if (ev->status) {
3516                 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3517                                        conn->dst_type, ev->status);
3518                 goto unlock;
3519         }
3520
3521         conn->state = BT_CLOSED;
3522
3523         mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3524
3525         if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3526                 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3527         else
3528                 reason = hci_to_mgmt_reason(ev->reason);
3529
3530         mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3531                                 reason, mgmt_connected);
3532
3533         if (conn->type == ACL_LINK) {
3534                 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3535                         hci_remove_link_key(hdev, &conn->dst);
3536
3537                 hci_update_scan(hdev);
3538         }
3539
3540         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3541         if (params) {
3542                 switch (params->auto_connect) {
3543                 case HCI_AUTO_CONN_LINK_LOSS:
3544                         if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3545                                 break;
3546                         fallthrough;
3547
3548                 case HCI_AUTO_CONN_DIRECT:
3549                 case HCI_AUTO_CONN_ALWAYS:
3550                         hci_pend_le_list_del_init(params);
3551                         hci_pend_le_list_add(params, &hdev->pend_le_conns);
3552                         hci_update_passive_scan(hdev);
3553                         break;
3554
3555                 default:
3556                         break;
3557                 }
3558         }
3559
3560         hci_disconn_cfm(conn, ev->reason);
3561
3562         /* Re-enable advertising if necessary, since it might
3563          * have been disabled by the connection. From the
3564          * HCI_LE_Set_Advertise_Enable command description in
3565          * the core specification (v4.0):
3566          * "The Controller shall continue advertising until the Host
3567          * issues an LE_Set_Advertise_Enable command with
3568          * Advertising_Enable set to 0x00 (Advertising is disabled)
3569          * or until a connection is created or until the Advertising
3570          * is timed out due to Directed Advertising."
3571          */
3572         if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3573                 hdev->cur_adv_instance = conn->adv_instance;
3574                 hci_enable_advertising(hdev);
3575         }
3576
3577         hci_conn_del(conn);
3578
3579 unlock:
3580         hci_dev_unlock(hdev);
3581 }
3582
3583 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3584                                   struct sk_buff *skb)
3585 {
3586         struct hci_ev_auth_complete *ev = data;
3587         struct hci_conn *conn;
3588
3589         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3590
3591         hci_dev_lock(hdev);
3592
3593         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3594         if (!conn)
3595                 goto unlock;
3596
3597         if (!ev->status) {
3598                 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3599                 set_bit(HCI_CONN_AUTH, &conn->flags);
3600                 conn->sec_level = conn->pending_sec_level;
3601         } else {
3602                 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3603                         set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3604
3605                 mgmt_auth_failed(conn, ev->status);
3606         }
3607
3608         clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3609
3610         if (conn->state == BT_CONFIG) {
3611                 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3612                         struct hci_cp_set_conn_encrypt cp;
3613                         cp.handle  = ev->handle;
3614                         cp.encrypt = 0x01;
3615                         hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3616                                      &cp);
3617                 } else {
3618                         conn->state = BT_CONNECTED;
3619                         hci_connect_cfm(conn, ev->status);
3620                         hci_conn_drop(conn);
3621                 }
3622         } else {
3623                 hci_auth_cfm(conn, ev->status);
3624
3625                 hci_conn_hold(conn);
3626                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3627                 hci_conn_drop(conn);
3628         }
3629
3630         if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3631                 if (!ev->status) {
3632                         struct hci_cp_set_conn_encrypt cp;
3633                         cp.handle  = ev->handle;
3634                         cp.encrypt = 0x01;
3635                         hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3636                                      &cp);
3637                 } else {
3638                         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3639                         hci_encrypt_cfm(conn, ev->status);
3640                 }
3641         }
3642
3643 unlock:
3644         hci_dev_unlock(hdev);
3645 }
3646
3647 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3648                                 struct sk_buff *skb)
3649 {
3650         struct hci_ev_remote_name *ev = data;
3651         struct hci_conn *conn;
3652
3653         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3654
3655         hci_conn_check_pending(hdev);
3656
3657         hci_dev_lock(hdev);
3658
3659         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3660
3661         if (!hci_dev_test_flag(hdev, HCI_MGMT))
3662                 goto check_auth;
3663
3664         if (ev->status == 0)
3665                 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3666                                        strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3667         else
3668                 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3669
3670 check_auth:
3671         if (!conn)
3672                 goto unlock;
3673
3674         if (!hci_outgoing_auth_needed(hdev, conn))
3675                 goto unlock;
3676
3677         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3678                 struct hci_cp_auth_requested cp;
3679
3680                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3681
3682                 cp.handle = __cpu_to_le16(conn->handle);
3683                 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3684         }
3685
3686 unlock:
3687         hci_dev_unlock(hdev);
3688 }
3689
3690 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3691                                    struct sk_buff *skb)
3692 {
3693         struct hci_ev_encrypt_change *ev = data;
3694         struct hci_conn *conn;
3695
3696         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3697
3698         hci_dev_lock(hdev);
3699
3700         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3701         if (!conn)
3702                 goto unlock;
3703
3704         if (!ev->status) {
3705                 if (ev->encrypt) {
3706                         /* Encryption implies authentication */
3707                         set_bit(HCI_CONN_AUTH, &conn->flags);
3708                         set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3709                         conn->sec_level = conn->pending_sec_level;
3710
3711                         /* P-256 authentication key implies FIPS */
3712                         if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3713                                 set_bit(HCI_CONN_FIPS, &conn->flags);
3714
3715                         if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3716                             conn->type == LE_LINK)
3717                                 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3718                 } else {
3719                         clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3720                         clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3721                 }
3722         }
3723
3724         /* We should disregard the current RPA and generate a new one
3725          * whenever the encryption procedure fails.
3726          */
3727         if (ev->status && conn->type == LE_LINK) {
3728                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3729                 hci_adv_instances_set_rpa_expired(hdev, true);
3730         }
3731
3732         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3733
3734         /* Check link security requirements are met */
3735         if (!hci_conn_check_link_mode(conn))
3736                 ev->status = HCI_ERROR_AUTH_FAILURE;
3737
3738         if (ev->status && conn->state == BT_CONNECTED) {
3739                 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3740                         set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3741
3742                 /* Notify upper layers so they can cleanup before
3743                  * disconnecting.
3744                  */
3745                 hci_encrypt_cfm(conn, ev->status);
3746                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3747                 hci_conn_drop(conn);
3748                 goto unlock;
3749         }
3750
3751         /* Try reading the encryption key size for encrypted ACL links */
3752         if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3753                 struct hci_cp_read_enc_key_size cp;
3754
3755                 /* Only send HCI_Read_Encryption_Key_Size if the
3756                  * controller really supports it. If it doesn't, assume
3757                  * the default size (16).
3758                  */
3759                 if (!(hdev->commands[20] & 0x10)) {
3760                         conn->enc_key_size = HCI_LINK_KEY_SIZE;
3761                         goto notify;
3762                 }
3763
3764                 cp.handle = cpu_to_le16(conn->handle);
3765                 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3766                                  sizeof(cp), &cp)) {
3767                         bt_dev_err(hdev, "sending read key size failed");
3768                         conn->enc_key_size = HCI_LINK_KEY_SIZE;
3769                         goto notify;
3770                 }
3771
3772                 goto unlock;
3773         }
3774
3775         /* Set the default Authenticated Payload Timeout after
3776          * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3777          * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3778          * sent when the link is active and Encryption is enabled, the conn
3779          * type can be either LE or ACL and controller must support LMP Ping.
3780          * Ensure for AES-CCM encryption as well.
3781          */
3782         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3783             test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3784             ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3785              (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3786                 struct hci_cp_write_auth_payload_to cp;
3787
3788                 cp.handle = cpu_to_le16(conn->handle);
3789                 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3790                 if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3791                                  sizeof(cp), &cp))
3792                         bt_dev_err(hdev, "write auth payload timeout failed");
3793         }
3794
3795 notify:
3796         hci_encrypt_cfm(conn, ev->status);
3797
3798 unlock:
3799         hci_dev_unlock(hdev);
3800 }
3801
3802 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3803                                              struct sk_buff *skb)
3804 {
3805         struct hci_ev_change_link_key_complete *ev = data;
3806         struct hci_conn *conn;
3807
3808         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3809
3810         hci_dev_lock(hdev);
3811
3812         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3813         if (conn) {
3814                 if (!ev->status)
3815                         set_bit(HCI_CONN_SECURE, &conn->flags);
3816
3817                 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3818
3819                 hci_key_change_cfm(conn, ev->status);
3820         }
3821
3822         hci_dev_unlock(hdev);
3823 }
3824
3825 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3826                                     struct sk_buff *skb)
3827 {
3828         struct hci_ev_remote_features *ev = data;
3829         struct hci_conn *conn;
3830
3831         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3832
3833         hci_dev_lock(hdev);
3834
3835         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3836         if (!conn)
3837                 goto unlock;
3838
3839         if (!ev->status)
3840                 memcpy(conn->features[0], ev->features, 8);
3841
3842         if (conn->state != BT_CONFIG)
3843                 goto unlock;
3844
3845         if (!ev->status && lmp_ext_feat_capable(hdev) &&
3846             lmp_ext_feat_capable(conn)) {
3847                 struct hci_cp_read_remote_ext_features cp;
3848                 cp.handle = ev->handle;
3849                 cp.page = 0x01;
3850                 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3851                              sizeof(cp), &cp);
3852                 goto unlock;
3853         }
3854
3855         if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3856                 struct hci_cp_remote_name_req cp;
3857                 memset(&cp, 0, sizeof(cp));
3858                 bacpy(&cp.bdaddr, &conn->dst);
3859                 cp.pscan_rep_mode = 0x02;
3860                 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3861         } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3862                 mgmt_device_connected(hdev, conn, NULL, 0);
3863
3864         if (!hci_outgoing_auth_needed(hdev, conn)) {
3865                 conn->state = BT_CONNECTED;
3866                 hci_connect_cfm(conn, ev->status);
3867                 hci_conn_drop(conn);
3868         }
3869
3870 unlock:
3871         hci_dev_unlock(hdev);
3872 }
3873
3874 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3875 {
3876         cancel_delayed_work(&hdev->cmd_timer);
3877
3878         rcu_read_lock();
3879         if (!test_bit(HCI_RESET, &hdev->flags)) {
3880                 if (ncmd) {
3881                         cancel_delayed_work(&hdev->ncmd_timer);
3882                         atomic_set(&hdev->cmd_cnt, 1);
3883                 } else {
3884                         if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3885                                 queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3886                                                    HCI_NCMD_TIMEOUT);
3887                 }
3888         }
3889         rcu_read_unlock();
3890 }
3891
3892 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3893                                         struct sk_buff *skb)
3894 {
3895         struct hci_rp_le_read_buffer_size_v2 *rp = data;
3896
3897         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3898
3899         if (rp->status)
3900                 return rp->status;
3901
3902         hdev->le_mtu   = __le16_to_cpu(rp->acl_mtu);
3903         hdev->le_pkts  = rp->acl_max_pkt;
3904         hdev->iso_mtu  = __le16_to_cpu(rp->iso_mtu);
3905         hdev->iso_pkts = rp->iso_max_pkt;
3906
3907         hdev->le_cnt  = hdev->le_pkts;
3908         hdev->iso_cnt = hdev->iso_pkts;
3909
3910         BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3911                hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3912
3913         return rp->status;
3914 }
3915
3916 static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status)
3917 {
3918         struct hci_conn *conn, *tmp;
3919
3920         lockdep_assert_held(&hdev->lock);
3921
3922         list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
3923                 if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY) ||
3924                     conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig)
3925                         continue;
3926
3927                 if (HCI_CONN_HANDLE_UNSET(conn->handle))
3928                         hci_conn_failed(conn, status);
3929         }
3930 }
3931
3932 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3933                                    struct sk_buff *skb)
3934 {
3935         struct hci_rp_le_set_cig_params *rp = data;
3936         struct hci_cp_le_set_cig_params *cp;
3937         struct hci_conn *conn;
3938         u8 status = rp->status;
3939         bool pending = false;
3940         int i;
3941
3942         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3943
3944         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS);
3945         if (!rp->status && (!cp || rp->num_handles != cp->num_cis ||
3946                             rp->cig_id != cp->cig_id)) {
3947                 bt_dev_err(hdev, "unexpected Set CIG Parameters response data");
3948                 status = HCI_ERROR_UNSPECIFIED;
3949         }
3950
3951         hci_dev_lock(hdev);
3952
3953         /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554
3954          *
3955          * If the Status return parameter is non-zero, then the state of the CIG
3956          * and its CIS configurations shall not be changed by the command. If
3957          * the CIG did not already exist, it shall not be created.
3958          */
3959         if (status) {
3960                 /* Keep current configuration, fail only the unbound CIS */
3961                 hci_unbound_cis_failed(hdev, rp->cig_id, status);
3962                 goto unlock;
3963         }
3964
3965         /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553
3966          *
3967          * If the Status return parameter is zero, then the Controller shall
3968          * set the Connection_Handle arrayed return parameter to the connection
3969          * handle(s) corresponding to the CIS configurations specified in
3970          * the CIS_IDs command parameter, in the same order.
3971          */
3972         for (i = 0; i < rp->num_handles; ++i) {
3973                 conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id,
3974                                                 cp->cis[i].cis_id);
3975                 if (!conn || !bacmp(&conn->dst, BDADDR_ANY))
3976                         continue;
3977
3978                 if (conn->state != BT_BOUND && conn->state != BT_CONNECT)
3979                         continue;
3980
3981                 if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i])))
3982                         continue;
3983
3984                 if (conn->state == BT_CONNECT)
3985                         pending = true;
3986         }
3987
3988 unlock:
3989         if (pending)
3990                 hci_le_create_cis_pending(hdev);
3991
3992         hci_dev_unlock(hdev);
3993
3994         return rp->status;
3995 }
3996
3997 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3998                                    struct sk_buff *skb)
3999 {
4000         struct hci_rp_le_setup_iso_path *rp = data;
4001         struct hci_cp_le_setup_iso_path *cp;
4002         struct hci_conn *conn;
4003
4004         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4005
4006         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
4007         if (!cp)
4008                 return rp->status;
4009
4010         hci_dev_lock(hdev);
4011
4012         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
4013         if (!conn)
4014                 goto unlock;
4015
4016         if (rp->status) {
4017                 hci_connect_cfm(conn, rp->status);
4018                 hci_conn_del(conn);
4019                 goto unlock;
4020         }
4021
4022         switch (cp->direction) {
4023         /* Input (Host to Controller) */
4024         case 0x00:
4025                 /* Only confirm connection if output only */
4026                 if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu)
4027                         hci_connect_cfm(conn, rp->status);
4028                 break;
4029         /* Output (Controller to Host) */
4030         case 0x01:
4031                 /* Confirm connection since conn->iso_qos is always configured
4032                  * last.
4033                  */
4034                 hci_connect_cfm(conn, rp->status);
4035                 break;
4036         }
4037
4038 unlock:
4039         hci_dev_unlock(hdev);
4040         return rp->status;
4041 }
4042
4043 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
4044 {
4045         bt_dev_dbg(hdev, "status 0x%2.2x", status);
4046 }
4047
4048 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
4049                                    struct sk_buff *skb)
4050 {
4051         struct hci_ev_status *rp = data;
4052         struct hci_cp_le_set_per_adv_params *cp;
4053
4054         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4055
4056         if (rp->status)
4057                 return rp->status;
4058
4059         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
4060         if (!cp)
4061                 return rp->status;
4062
4063         /* TODO: set the conn state */
4064         return rp->status;
4065 }
4066
4067 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
4068                                        struct sk_buff *skb)
4069 {
4070         struct hci_ev_status *rp = data;
4071         struct hci_cp_le_set_per_adv_enable *cp;
4072         struct adv_info *adv = NULL, *n;
4073         u8 per_adv_cnt = 0;
4074
4075         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4076
4077         if (rp->status)
4078                 return rp->status;
4079
4080         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
4081         if (!cp)
4082                 return rp->status;
4083
4084         hci_dev_lock(hdev);
4085
4086         adv = hci_find_adv_instance(hdev, cp->handle);
4087
4088         if (cp->enable) {
4089                 hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
4090
4091                 if (adv)
4092                         adv->enabled = true;
4093         } else {
4094                 /* If just one instance was disabled check if there are
4095                  * any other instance enabled before clearing HCI_LE_PER_ADV.
4096                  * The current periodic adv instance will be marked as
4097                  * disabled once extended advertising is also disabled.
4098                  */
4099                 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
4100                                          list) {
4101                         if (adv->periodic && adv->enabled)
4102                                 per_adv_cnt++;
4103                 }
4104
4105                 if (per_adv_cnt > 1)
4106                         goto unlock;
4107
4108                 hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
4109         }
4110
4111 unlock:
4112         hci_dev_unlock(hdev);
4113
4114         return rp->status;
4115 }
4116
4117 #define HCI_CC_VL(_op, _func, _min, _max) \
4118 { \
4119         .op = _op, \
4120         .func = _func, \
4121         .min_len = _min, \
4122         .max_len = _max, \
4123 }
4124
4125 #define HCI_CC(_op, _func, _len) \
4126         HCI_CC_VL(_op, _func, _len, _len)
4127
4128 #define HCI_CC_STATUS(_op, _func) \
4129         HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4130
4131 static const struct hci_cc {
4132         u16  op;
4133         u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4134         u16  min_len;
4135         u16  max_len;
4136 } hci_cc_table[] = {
4137         HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4138         HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4139         HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4140         HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
4141                       hci_cc_remote_name_req_cancel),
4142         HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4143                sizeof(struct hci_rp_role_discovery)),
4144         HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4145                sizeof(struct hci_rp_read_link_policy)),
4146         HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4147                sizeof(struct hci_rp_write_link_policy)),
4148         HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4149                sizeof(struct hci_rp_read_def_link_policy)),
4150         HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4151                       hci_cc_write_def_link_policy),
4152         HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4153         HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4154                sizeof(struct hci_rp_read_stored_link_key)),
4155         HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4156                sizeof(struct hci_rp_delete_stored_link_key)),
4157         HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4158         HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4159                sizeof(struct hci_rp_read_local_name)),
4160         HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4161         HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4162         HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4163         HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4164         HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4165                sizeof(struct hci_rp_read_class_of_dev)),
4166         HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4167         HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4168                sizeof(struct hci_rp_read_voice_setting)),
4169         HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4170         HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4171                sizeof(struct hci_rp_read_num_supported_iac)),
4172         HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4173         HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4174         HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4175                sizeof(struct hci_rp_read_auth_payload_to)),
4176         HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4177                sizeof(struct hci_rp_write_auth_payload_to)),
4178         HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4179                sizeof(struct hci_rp_read_local_version)),
4180         HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4181                sizeof(struct hci_rp_read_local_commands)),
4182         HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4183                sizeof(struct hci_rp_read_local_features)),
4184         HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4185                sizeof(struct hci_rp_read_local_ext_features)),
4186         HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4187                sizeof(struct hci_rp_read_buffer_size)),
4188         HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4189                sizeof(struct hci_rp_read_bd_addr)),
4190         HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4191                sizeof(struct hci_rp_read_local_pairing_opts)),
4192         HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4193                sizeof(struct hci_rp_read_page_scan_activity)),
4194         HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4195                       hci_cc_write_page_scan_activity),
4196         HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4197                sizeof(struct hci_rp_read_page_scan_type)),
4198         HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4199         HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
4200                sizeof(struct hci_rp_read_data_block_size)),
4201         HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
4202                sizeof(struct hci_rp_read_flow_control_mode)),
4203         HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
4204                sizeof(struct hci_rp_read_local_amp_info)),
4205         HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4206                sizeof(struct hci_rp_read_clock)),
4207         HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4208                sizeof(struct hci_rp_read_enc_key_size)),
4209         HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4210                sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4211         HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4212                hci_cc_read_def_err_data_reporting,
4213                sizeof(struct hci_rp_read_def_err_data_reporting)),
4214         HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4215                       hci_cc_write_def_err_data_reporting),
4216         HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4217                sizeof(struct hci_rp_pin_code_reply)),
4218         HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4219                sizeof(struct hci_rp_pin_code_neg_reply)),
4220         HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4221                sizeof(struct hci_rp_read_local_oob_data)),
4222         HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4223                sizeof(struct hci_rp_read_local_oob_ext_data)),
4224         HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4225                sizeof(struct hci_rp_le_read_buffer_size)),
4226         HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4227                sizeof(struct hci_rp_le_read_local_features)),
4228         HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4229                sizeof(struct hci_rp_le_read_adv_tx_power)),
4230         HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4231                sizeof(struct hci_rp_user_confirm_reply)),
4232         HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4233                sizeof(struct hci_rp_user_confirm_reply)),
4234         HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4235                sizeof(struct hci_rp_user_confirm_reply)),
4236         HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4237                sizeof(struct hci_rp_user_confirm_reply)),
4238         HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4239         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4240         HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4241         HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4242         HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4243                hci_cc_le_read_accept_list_size,
4244                sizeof(struct hci_rp_le_read_accept_list_size)),
4245         HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4246         HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4247                       hci_cc_le_add_to_accept_list),
4248         HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4249                       hci_cc_le_del_from_accept_list),
4250         HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4251                sizeof(struct hci_rp_le_read_supported_states)),
4252         HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4253                sizeof(struct hci_rp_le_read_def_data_len)),
4254         HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4255                       hci_cc_le_write_def_data_len),
4256         HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4257                       hci_cc_le_add_to_resolv_list),
4258         HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4259                       hci_cc_le_del_from_resolv_list),
4260         HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4261                       hci_cc_le_clear_resolv_list),
4262         HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4263                sizeof(struct hci_rp_le_read_resolv_list_size)),
4264         HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4265                       hci_cc_le_set_addr_resolution_enable),
4266         HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4267                sizeof(struct hci_rp_le_read_max_data_len)),
4268         HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4269                       hci_cc_write_le_host_supported),
4270         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4271         HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4272                sizeof(struct hci_rp_read_rssi)),
4273         HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4274                sizeof(struct hci_rp_read_tx_power)),
4275         HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4276         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4277                       hci_cc_le_set_ext_scan_param),
4278         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4279                       hci_cc_le_set_ext_scan_enable),
4280         HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4281         HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4282                hci_cc_le_read_num_adv_sets,
4283                sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4284         HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4285                sizeof(struct hci_rp_le_set_ext_adv_params)),
4286         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4287                       hci_cc_le_set_ext_adv_enable),
4288         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4289                       hci_cc_le_set_adv_set_random_addr),
4290         HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4291         HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4292         HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4293         HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4294                       hci_cc_le_set_per_adv_enable),
4295         HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4296                sizeof(struct hci_rp_le_read_transmit_power)),
4297 #ifdef TIZEN_BT
4298         HCI_CC(HCI_OP_ENABLE_RSSI, hci_cc_enable_rssi,
4299                sizeof(struct hci_cc_rsp_enable_rssi)),
4300         HCI_CC(HCI_OP_GET_RAW_RSSI, hci_cc_get_raw_rssi,
4301                sizeof(struct hci_cc_rp_get_raw_rssi)),
4302 #endif
4303         HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4304         HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4305                sizeof(struct hci_rp_le_read_buffer_size_v2)),
4306         HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4307                   sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4308         HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4309                sizeof(struct hci_rp_le_setup_iso_path)),
4310 };
4311
4312 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4313                       struct sk_buff *skb)
4314 {
4315         void *data;
4316
4317         if (skb->len < cc->min_len) {
4318                 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4319                            cc->op, skb->len, cc->min_len);
4320                 return HCI_ERROR_UNSPECIFIED;
4321         }
4322
4323         /* Just warn if the length is over max_len size it still be possible to
4324          * partially parse the cc so leave to callback to decide if that is
4325          * acceptable.
4326          */
4327         if (skb->len > cc->max_len)
4328                 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4329                             cc->op, skb->len, cc->max_len);
4330
4331         data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4332         if (!data)
4333                 return HCI_ERROR_UNSPECIFIED;
4334
4335         return cc->func(hdev, data, skb);
4336 }
4337
4338 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4339                                  struct sk_buff *skb, u16 *opcode, u8 *status,
4340                                  hci_req_complete_t *req_complete,
4341                                  hci_req_complete_skb_t *req_complete_skb)
4342 {
4343         struct hci_ev_cmd_complete *ev = data;
4344         int i;
4345
4346         *opcode = __le16_to_cpu(ev->opcode);
4347
4348         bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4349
4350         for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4351                 if (hci_cc_table[i].op == *opcode) {
4352                         *status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4353                         break;
4354                 }
4355         }
4356
4357         if (i == ARRAY_SIZE(hci_cc_table)) {
4358                 /* Unknown opcode, assume byte 0 contains the status, so
4359                  * that e.g. __hci_cmd_sync() properly returns errors
4360                  * for vendor specific commands send by HCI drivers.
4361                  * If a vendor doesn't actually follow this convention we may
4362                  * need to introduce a vendor CC table in order to properly set
4363                  * the status.
4364                  */
4365                 *status = skb->data[0];
4366         }
4367
4368         handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4369
4370         hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4371                              req_complete_skb);
4372
4373         if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4374                 bt_dev_err(hdev,
4375                            "unexpected event for opcode 0x%4.4x", *opcode);
4376                 return;
4377         }
4378
4379         if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4380                 queue_work(hdev->workqueue, &hdev->cmd_work);
4381 }
4382
4383 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4384 {
4385         struct hci_cp_le_create_cis *cp;
4386         bool pending = false;
4387         int i;
4388
4389         bt_dev_dbg(hdev, "status 0x%2.2x", status);
4390
4391         if (!status)
4392                 return;
4393
4394         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4395         if (!cp)
4396                 return;
4397
4398         hci_dev_lock(hdev);
4399
4400         /* Remove connection if command failed */
4401         for (i = 0; cp->num_cis; cp->num_cis--, i++) {
4402                 struct hci_conn *conn;
4403                 u16 handle;
4404
4405                 handle = __le16_to_cpu(cp->cis[i].cis_handle);
4406
4407                 conn = hci_conn_hash_lookup_handle(hdev, handle);
4408                 if (conn) {
4409                         if (test_and_clear_bit(HCI_CONN_CREATE_CIS,
4410                                                &conn->flags))
4411                                 pending = true;
4412                         conn->state = BT_CLOSED;
4413                         hci_connect_cfm(conn, status);
4414                         hci_conn_del(conn);
4415                 }
4416         }
4417
4418         if (pending)
4419                 hci_le_create_cis_pending(hdev);
4420
4421         hci_dev_unlock(hdev);
4422 }
4423
4424 #define HCI_CS(_op, _func) \
4425 { \
4426         .op = _op, \
4427         .func = _func, \
4428 }
4429
4430 static const struct hci_cs {
4431         u16  op;
4432         void (*func)(struct hci_dev *hdev, __u8 status);
4433 } hci_cs_table[] = {
4434         HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4435         HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4436         HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4437         HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4438         HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4439         HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4440         HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4441         HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4442         HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4443                hci_cs_read_remote_ext_features),
4444         HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4445         HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4446                hci_cs_enhanced_setup_sync_conn),
4447         HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4448         HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4449         HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4450         HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4451         HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4452         HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4453         HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4454         HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4455         HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4456 };
4457
4458 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4459                                struct sk_buff *skb, u16 *opcode, u8 *status,
4460                                hci_req_complete_t *req_complete,
4461                                hci_req_complete_skb_t *req_complete_skb)
4462 {
4463         struct hci_ev_cmd_status *ev = data;
4464         int i;
4465
4466         *opcode = __le16_to_cpu(ev->opcode);
4467         *status = ev->status;
4468
4469         bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4470
4471         for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4472                 if (hci_cs_table[i].op == *opcode) {
4473                         hci_cs_table[i].func(hdev, ev->status);
4474                         break;
4475                 }
4476         }
4477
4478         handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4479
4480         /* Indicate request completion if the command failed. Also, if
4481          * we're not waiting for a special event and we get a success
4482          * command status we should try to flag the request as completed
4483          * (since for this kind of commands there will not be a command
4484          * complete event).
4485          */
4486         if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
4487                 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4488                                      req_complete_skb);
4489                 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4490                         bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4491                                    *opcode);
4492                         return;
4493                 }
4494         }
4495
4496         if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4497                 queue_work(hdev->workqueue, &hdev->cmd_work);
4498 }
4499
4500 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4501                                    struct sk_buff *skb)
4502 {
4503         struct hci_ev_hardware_error *ev = data;
4504
4505         bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4506
4507 #ifdef TIZEN_BT
4508         hci_dev_lock(hdev);
4509         mgmt_hardware_error(hdev, ev->code);
4510         hci_dev_unlock(hdev);
4511 #endif
4512         hdev->hw_error_code = ev->code;
4513
4514         queue_work(hdev->req_workqueue, &hdev->error_reset);
4515 }
4516
4517 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4518                                 struct sk_buff *skb)
4519 {
4520         struct hci_ev_role_change *ev = data;
4521         struct hci_conn *conn;
4522
4523         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4524
4525         hci_dev_lock(hdev);
4526
4527         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4528         if (conn) {
4529                 if (!ev->status)
4530                         conn->role = ev->role;
4531
4532                 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4533
4534                 hci_role_switch_cfm(conn, ev->status, ev->role);
4535         }
4536
4537         hci_dev_unlock(hdev);
4538 }
4539
4540 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4541                                   struct sk_buff *skb)
4542 {
4543         struct hci_ev_num_comp_pkts *ev = data;
4544         int i;
4545
4546         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4547                              flex_array_size(ev, handles, ev->num)))
4548                 return;
4549
4550         if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4551                 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4552                 return;
4553         }
4554
4555         bt_dev_dbg(hdev, "num %d", ev->num);
4556
4557         for (i = 0; i < ev->num; i++) {
4558                 struct hci_comp_pkts_info *info = &ev->handles[i];
4559                 struct hci_conn *conn;
4560                 __u16  handle, count;
4561
4562                 handle = __le16_to_cpu(info->handle);
4563                 count  = __le16_to_cpu(info->count);
4564
4565                 conn = hci_conn_hash_lookup_handle(hdev, handle);
4566                 if (!conn)
4567                         continue;
4568
4569                 conn->sent -= count;
4570
4571                 switch (conn->type) {
4572                 case ACL_LINK:
4573                         hdev->acl_cnt += count;
4574                         if (hdev->acl_cnt > hdev->acl_pkts)
4575                                 hdev->acl_cnt = hdev->acl_pkts;
4576                         break;
4577
4578                 case LE_LINK:
4579                         if (hdev->le_pkts) {
4580                                 hdev->le_cnt += count;
4581                                 if (hdev->le_cnt > hdev->le_pkts)
4582                                         hdev->le_cnt = hdev->le_pkts;
4583                         } else {
4584                                 hdev->acl_cnt += count;
4585                                 if (hdev->acl_cnt > hdev->acl_pkts)
4586                                         hdev->acl_cnt = hdev->acl_pkts;
4587                         }
4588                         break;
4589
4590                 case SCO_LINK:
4591                         hdev->sco_cnt += count;
4592                         if (hdev->sco_cnt > hdev->sco_pkts)
4593                                 hdev->sco_cnt = hdev->sco_pkts;
4594                         break;
4595
4596                 case ISO_LINK:
4597                         if (hdev->iso_pkts) {
4598                                 hdev->iso_cnt += count;
4599                                 if (hdev->iso_cnt > hdev->iso_pkts)
4600                                         hdev->iso_cnt = hdev->iso_pkts;
4601                         } else if (hdev->le_pkts) {
4602                                 hdev->le_cnt += count;
4603                                 if (hdev->le_cnt > hdev->le_pkts)
4604                                         hdev->le_cnt = hdev->le_pkts;
4605                         } else {
4606                                 hdev->acl_cnt += count;
4607                                 if (hdev->acl_cnt > hdev->acl_pkts)
4608                                         hdev->acl_cnt = hdev->acl_pkts;
4609                         }
4610                         break;
4611
4612                 default:
4613                         bt_dev_err(hdev, "unknown type %d conn %p",
4614                                    conn->type, conn);
4615                         break;
4616                 }
4617         }
4618
4619         queue_work(hdev->workqueue, &hdev->tx_work);
4620 }
4621
4622 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4623                                                  __u16 handle)
4624 {
4625         struct hci_chan *chan;
4626
4627         switch (hdev->dev_type) {
4628         case HCI_PRIMARY:
4629                 return hci_conn_hash_lookup_handle(hdev, handle);
4630         case HCI_AMP:
4631                 chan = hci_chan_lookup_handle(hdev, handle);
4632                 if (chan)
4633                         return chan->conn;
4634                 break;
4635         default:
4636                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4637                 break;
4638         }
4639
4640         return NULL;
4641 }
4642
4643 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
4644                                     struct sk_buff *skb)
4645 {
4646         struct hci_ev_num_comp_blocks *ev = data;
4647         int i;
4648
4649         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
4650                              flex_array_size(ev, handles, ev->num_hndl)))
4651                 return;
4652
4653         if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4654                 bt_dev_err(hdev, "wrong event for mode %d",
4655                            hdev->flow_ctl_mode);
4656                 return;
4657         }
4658
4659         bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
4660                    ev->num_hndl);
4661
4662         for (i = 0; i < ev->num_hndl; i++) {
4663                 struct hci_comp_blocks_info *info = &ev->handles[i];
4664                 struct hci_conn *conn = NULL;
4665                 __u16  handle, block_count;
4666
4667                 handle = __le16_to_cpu(info->handle);
4668                 block_count = __le16_to_cpu(info->blocks);
4669
4670                 conn = __hci_conn_lookup_handle(hdev, handle);
4671                 if (!conn)
4672                         continue;
4673
4674                 conn->sent -= block_count;
4675
4676                 switch (conn->type) {
4677                 case ACL_LINK:
4678                 case AMP_LINK:
4679                         hdev->block_cnt += block_count;
4680                         if (hdev->block_cnt > hdev->num_blocks)
4681                                 hdev->block_cnt = hdev->num_blocks;
4682                         break;
4683
4684                 default:
4685                         bt_dev_err(hdev, "unknown type %d conn %p",
4686                                    conn->type, conn);
4687                         break;
4688                 }
4689         }
4690
4691         queue_work(hdev->workqueue, &hdev->tx_work);
4692 }
4693
4694 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4695                                 struct sk_buff *skb)
4696 {
4697         struct hci_ev_mode_change *ev = data;
4698         struct hci_conn *conn;
4699
4700         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4701
4702         hci_dev_lock(hdev);
4703
4704         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4705         if (conn) {
4706                 conn->mode = ev->mode;
4707
4708                 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4709                                         &conn->flags)) {
4710                         if (conn->mode == HCI_CM_ACTIVE)
4711                                 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4712                         else
4713                                 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4714                 }
4715
4716                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4717                         hci_sco_setup(conn, ev->status);
4718         }
4719
4720         hci_dev_unlock(hdev);
4721 }
4722
4723 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4724                                      struct sk_buff *skb)
4725 {
4726         struct hci_ev_pin_code_req *ev = data;
4727         struct hci_conn *conn;
4728
4729         bt_dev_dbg(hdev, "");
4730
4731         hci_dev_lock(hdev);
4732
4733         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4734         if (!conn)
4735                 goto unlock;
4736
4737         if (conn->state == BT_CONNECTED) {
4738                 hci_conn_hold(conn);
4739                 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4740                 hci_conn_drop(conn);
4741         }
4742
4743         if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4744             !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4745                 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4746                              sizeof(ev->bdaddr), &ev->bdaddr);
4747         } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4748                 u8 secure;
4749
4750                 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4751                         secure = 1;
4752                 else
4753                         secure = 0;
4754
4755                 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4756         }
4757
4758 unlock:
4759         hci_dev_unlock(hdev);
4760 }
4761
4762 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4763 {
4764         if (key_type == HCI_LK_CHANGED_COMBINATION)
4765                 return;
4766
4767         conn->pin_length = pin_len;
4768         conn->key_type = key_type;
4769
4770         switch (key_type) {
4771         case HCI_LK_LOCAL_UNIT:
4772         case HCI_LK_REMOTE_UNIT:
4773         case HCI_LK_DEBUG_COMBINATION:
4774                 return;
4775         case HCI_LK_COMBINATION:
4776                 if (pin_len == 16)
4777                         conn->pending_sec_level = BT_SECURITY_HIGH;
4778                 else
4779                         conn->pending_sec_level = BT_SECURITY_MEDIUM;
4780                 break;
4781         case HCI_LK_UNAUTH_COMBINATION_P192:
4782         case HCI_LK_UNAUTH_COMBINATION_P256:
4783                 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4784                 break;
4785         case HCI_LK_AUTH_COMBINATION_P192:
4786                 conn->pending_sec_level = BT_SECURITY_HIGH;
4787                 break;
4788         case HCI_LK_AUTH_COMBINATION_P256:
4789                 conn->pending_sec_level = BT_SECURITY_FIPS;
4790                 break;
4791         }
4792 }
4793
4794 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4795                                      struct sk_buff *skb)
4796 {
4797         struct hci_ev_link_key_req *ev = data;
4798         struct hci_cp_link_key_reply cp;
4799         struct hci_conn *conn;
4800         struct link_key *key;
4801
4802         bt_dev_dbg(hdev, "");
4803
4804         if (!hci_dev_test_flag(hdev, HCI_MGMT))
4805                 return;
4806
4807         hci_dev_lock(hdev);
4808
4809         key = hci_find_link_key(hdev, &ev->bdaddr);
4810         if (!key) {
4811                 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4812                 goto not_found;
4813         }
4814
4815         bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4816
4817         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4818         if (conn) {
4819                 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4820
4821                 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4822                      key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4823                     conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4824                         bt_dev_dbg(hdev, "ignoring unauthenticated key");
4825                         goto not_found;
4826                 }
4827
4828                 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4829                     (conn->pending_sec_level == BT_SECURITY_HIGH ||
4830                      conn->pending_sec_level == BT_SECURITY_FIPS)) {
4831                         bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4832                         goto not_found;
4833                 }
4834
4835                 conn_set_key(conn, key->type, key->pin_len);
4836         }
4837
4838         bacpy(&cp.bdaddr, &ev->bdaddr);
4839         memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4840
4841         hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4842
4843         hci_dev_unlock(hdev);
4844
4845         return;
4846
4847 not_found:
4848         hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4849         hci_dev_unlock(hdev);
4850 }
4851
4852 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4853                                     struct sk_buff *skb)
4854 {
4855         struct hci_ev_link_key_notify *ev = data;
4856         struct hci_conn *conn;
4857         struct link_key *key;
4858         bool persistent;
4859         u8 pin_len = 0;
4860
4861         bt_dev_dbg(hdev, "");
4862
4863         hci_dev_lock(hdev);
4864
4865         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4866         if (!conn)
4867                 goto unlock;
4868
4869         /* Ignore NULL link key against CVE-2020-26555 */
4870         if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4871                 bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4872                            &ev->bdaddr);
4873                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4874                 hci_conn_drop(conn);
4875                 goto unlock;
4876         }
4877
4878         hci_conn_hold(conn);
4879         conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4880         hci_conn_drop(conn);
4881
4882         set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4883         conn_set_key(conn, ev->key_type, conn->pin_length);
4884
4885         if (!hci_dev_test_flag(hdev, HCI_MGMT))
4886                 goto unlock;
4887
4888         key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4889                                 ev->key_type, pin_len, &persistent);
4890         if (!key)
4891                 goto unlock;
4892
4893         /* Update connection information since adding the key will have
4894          * fixed up the type in the case of changed combination keys.
4895          */
4896         if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4897                 conn_set_key(conn, key->type, key->pin_len);
4898
4899         mgmt_new_link_key(hdev, key, persistent);
4900
4901         /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4902          * is set. If it's not set simply remove the key from the kernel
4903          * list (we've still notified user space about it but with
4904          * store_hint being 0).
4905          */
4906         if (key->type == HCI_LK_DEBUG_COMBINATION &&
4907             !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4908                 list_del_rcu(&key->list);
4909                 kfree_rcu(key, rcu);
4910                 goto unlock;
4911         }
4912
4913         if (persistent)
4914                 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4915         else
4916                 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4917
4918 unlock:
4919         hci_dev_unlock(hdev);
4920 }
4921
4922 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4923                                  struct sk_buff *skb)
4924 {
4925         struct hci_ev_clock_offset *ev = data;
4926         struct hci_conn *conn;
4927
4928         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4929
4930         hci_dev_lock(hdev);
4931
4932         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4933         if (conn && !ev->status) {
4934                 struct inquiry_entry *ie;
4935
4936                 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4937                 if (ie) {
4938                         ie->data.clock_offset = ev->clock_offset;
4939                         ie->timestamp = jiffies;
4940                 }
4941         }
4942
4943         hci_dev_unlock(hdev);
4944 }
4945
4946 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4947                                     struct sk_buff *skb)
4948 {
4949         struct hci_ev_pkt_type_change *ev = data;
4950         struct hci_conn *conn;
4951
4952         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4953
4954         hci_dev_lock(hdev);
4955
4956         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4957         if (conn && !ev->status)
4958                 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4959
4960         hci_dev_unlock(hdev);
4961 }
4962
4963 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4964                                    struct sk_buff *skb)
4965 {
4966         struct hci_ev_pscan_rep_mode *ev = data;
4967         struct inquiry_entry *ie;
4968
4969         bt_dev_dbg(hdev, "");
4970
4971         hci_dev_lock(hdev);
4972
4973         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4974         if (ie) {
4975                 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4976                 ie->timestamp = jiffies;
4977         }
4978
4979         hci_dev_unlock(hdev);
4980 }
4981
4982 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4983                                              struct sk_buff *skb)
4984 {
4985         struct hci_ev_inquiry_result_rssi *ev = edata;
4986         struct inquiry_data data;
4987         int i;
4988
4989         bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4990
4991         if (!ev->num)
4992                 return;
4993
4994         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4995                 return;
4996
4997         hci_dev_lock(hdev);
4998
4999         if (skb->len == array_size(ev->num,
5000                                    sizeof(struct inquiry_info_rssi_pscan))) {
5001                 struct inquiry_info_rssi_pscan *info;
5002
5003                 for (i = 0; i < ev->num; i++) {
5004                         u32 flags;
5005
5006                         info = hci_ev_skb_pull(hdev, skb,
5007                                                HCI_EV_INQUIRY_RESULT_WITH_RSSI,
5008                                                sizeof(*info));
5009                         if (!info) {
5010                                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
5011                                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
5012                                 goto unlock;
5013                         }
5014
5015                         bacpy(&data.bdaddr, &info->bdaddr);
5016                         data.pscan_rep_mode     = info->pscan_rep_mode;
5017                         data.pscan_period_mode  = info->pscan_period_mode;
5018                         data.pscan_mode         = info->pscan_mode;
5019                         memcpy(data.dev_class, info->dev_class, 3);
5020                         data.clock_offset       = info->clock_offset;
5021                         data.rssi               = info->rssi;
5022                         data.ssp_mode           = 0x00;
5023
5024                         flags = hci_inquiry_cache_update(hdev, &data, false);
5025
5026                         mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5027                                           info->dev_class, info->rssi,
5028                                           flags, NULL, 0, NULL, 0, 0);
5029                 }
5030         } else if (skb->len == array_size(ev->num,
5031                                           sizeof(struct inquiry_info_rssi))) {
5032                 struct inquiry_info_rssi *info;
5033
5034                 for (i = 0; i < ev->num; i++) {
5035                         u32 flags;
5036
5037                         info = hci_ev_skb_pull(hdev, skb,
5038                                                HCI_EV_INQUIRY_RESULT_WITH_RSSI,
5039                                                sizeof(*info));
5040                         if (!info) {
5041                                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
5042                                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
5043                                 goto unlock;
5044                         }
5045
5046                         bacpy(&data.bdaddr, &info->bdaddr);
5047                         data.pscan_rep_mode     = info->pscan_rep_mode;
5048                         data.pscan_period_mode  = info->pscan_period_mode;
5049                         data.pscan_mode         = 0x00;
5050                         memcpy(data.dev_class, info->dev_class, 3);
5051                         data.clock_offset       = info->clock_offset;
5052                         data.rssi               = info->rssi;
5053                         data.ssp_mode           = 0x00;
5054
5055                         flags = hci_inquiry_cache_update(hdev, &data, false);
5056
5057                         mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5058                                           info->dev_class, info->rssi,
5059                                           flags, NULL, 0, NULL, 0, 0);
5060                 }
5061         } else {
5062                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
5063                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
5064         }
5065 unlock:
5066         hci_dev_unlock(hdev);
5067 }
5068
5069 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
5070                                         struct sk_buff *skb)
5071 {
5072         struct hci_ev_remote_ext_features *ev = data;
5073         struct hci_conn *conn;
5074
5075         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5076
5077         hci_dev_lock(hdev);
5078
5079         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5080         if (!conn)
5081                 goto unlock;
5082
5083         if (ev->page < HCI_MAX_PAGES)
5084                 memcpy(conn->features[ev->page], ev->features, 8);
5085
5086         if (!ev->status && ev->page == 0x01) {
5087                 struct inquiry_entry *ie;
5088
5089                 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
5090                 if (ie)
5091                         ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5092
5093                 if (ev->features[0] & LMP_HOST_SSP) {
5094                         set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5095                 } else {
5096                         /* It is mandatory by the Bluetooth specification that
5097                          * Extended Inquiry Results are only used when Secure
5098                          * Simple Pairing is enabled, but some devices violate
5099                          * this.
5100                          *
5101                          * To make these devices work, the internal SSP
5102                          * enabled flag needs to be cleared if the remote host
5103                          * features do not indicate SSP support */
5104                         clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5105                 }
5106
5107                 if (ev->features[0] & LMP_HOST_SC)
5108                         set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
5109         }
5110
5111         if (conn->state != BT_CONFIG)
5112                 goto unlock;
5113
5114         if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
5115                 struct hci_cp_remote_name_req cp;
5116                 memset(&cp, 0, sizeof(cp));
5117                 bacpy(&cp.bdaddr, &conn->dst);
5118                 cp.pscan_rep_mode = 0x02;
5119                 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
5120         } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5121                 mgmt_device_connected(hdev, conn, NULL, 0);
5122
5123         if (!hci_outgoing_auth_needed(hdev, conn)) {
5124                 conn->state = BT_CONNECTED;
5125                 hci_connect_cfm(conn, ev->status);
5126                 hci_conn_drop(conn);
5127         }
5128
5129 unlock:
5130         hci_dev_unlock(hdev);
5131 }
5132
5133 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
5134                                        struct sk_buff *skb)
5135 {
5136         struct hci_ev_sync_conn_complete *ev = data;
5137         struct hci_conn *conn;
5138         u8 status = ev->status;
5139
5140         switch (ev->link_type) {
5141         case SCO_LINK:
5142         case ESCO_LINK:
5143                 break;
5144         default:
5145                 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
5146                  * for HCI_Synchronous_Connection_Complete is limited to
5147                  * either SCO or eSCO
5148                  */
5149                 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
5150                 return;
5151         }
5152
5153         bt_dev_dbg(hdev, "status 0x%2.2x", status);
5154
5155         hci_dev_lock(hdev);
5156
5157         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
5158         if (!conn) {
5159                 if (ev->link_type == ESCO_LINK)
5160                         goto unlock;
5161
5162                 /* When the link type in the event indicates SCO connection
5163                  * and lookup of the connection object fails, then check
5164                  * if an eSCO connection object exists.
5165                  *
5166                  * The core limits the synchronous connections to either
5167                  * SCO or eSCO. The eSCO connection is preferred and tried
5168                  * to be setup first and until successfully established,
5169                  * the link type will be hinted as eSCO.
5170                  */
5171                 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
5172                 if (!conn)
5173                         goto unlock;
5174         }
5175
5176         /* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
5177          * Processing it more than once per connection can corrupt kernel memory.
5178          *
5179          * As the connection handle is set here for the first time, it indicates
5180          * whether the connection is already set up.
5181          */
5182         if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5183                 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
5184                 goto unlock;
5185         }
5186
5187         switch (status) {
5188         case 0x00:
5189                 status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
5190                 if (status) {
5191                         conn->state = BT_CLOSED;
5192                         break;
5193                 }
5194
5195                 conn->state  = BT_CONNECTED;
5196                 conn->type   = ev->link_type;
5197
5198                 hci_debugfs_create_conn(conn);
5199                 hci_conn_add_sysfs(conn);
5200                 break;
5201
5202         case 0x10:      /* Connection Accept Timeout */
5203         case 0x0d:      /* Connection Rejected due to Limited Resources */
5204         case 0x11:      /* Unsupported Feature or Parameter Value */
5205         case 0x1c:      /* SCO interval rejected */
5206         case 0x1a:      /* Unsupported Remote Feature */
5207         case 0x1e:      /* Invalid LMP Parameters */
5208         case 0x1f:      /* Unspecified error */
5209         case 0x20:      /* Unsupported LMP Parameter value */
5210                 if (conn->out) {
5211                         conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5212                                         (hdev->esco_type & EDR_ESCO_MASK);
5213                         if (hci_setup_sync(conn, conn->parent->handle))
5214                                 goto unlock;
5215                 }
5216                 fallthrough;
5217
5218         default:
5219                 conn->state = BT_CLOSED;
5220                 break;
5221         }
5222
5223         bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5224         /* Notify only in case of SCO over HCI transport data path which
5225          * is zero and non-zero value shall be non-HCI transport data path
5226          */
5227         if (conn->codec.data_path == 0 && hdev->notify) {
5228                 switch (ev->air_mode) {
5229                 case 0x02:
5230                         hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5231                         break;
5232                 case 0x03:
5233                         hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5234                         break;
5235                 }
5236         }
5237
5238         hci_connect_cfm(conn, status);
5239         if (status)
5240                 hci_conn_del(conn);
5241
5242 unlock:
5243         hci_dev_unlock(hdev);
5244 }
5245
5246 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5247 {
5248         size_t parsed = 0;
5249
5250         while (parsed < eir_len) {
5251                 u8 field_len = eir[0];
5252
5253                 if (field_len == 0)
5254                         return parsed;
5255
5256                 parsed += field_len + 1;
5257                 eir += field_len + 1;
5258         }
5259
5260         return eir_len;
5261 }
5262
5263 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5264                                             struct sk_buff *skb)
5265 {
5266         struct hci_ev_ext_inquiry_result *ev = edata;
5267         struct inquiry_data data;
5268         size_t eir_len;
5269         int i;
5270
5271         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5272                              flex_array_size(ev, info, ev->num)))
5273                 return;
5274
5275         bt_dev_dbg(hdev, "num %d", ev->num);
5276
5277         if (!ev->num)
5278                 return;
5279
5280         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5281                 return;
5282
5283         hci_dev_lock(hdev);
5284
5285         for (i = 0; i < ev->num; i++) {
5286                 struct extended_inquiry_info *info = &ev->info[i];
5287                 u32 flags;
5288                 bool name_known;
5289
5290                 bacpy(&data.bdaddr, &info->bdaddr);
5291                 data.pscan_rep_mode     = info->pscan_rep_mode;
5292                 data.pscan_period_mode  = info->pscan_period_mode;
5293                 data.pscan_mode         = 0x00;
5294                 memcpy(data.dev_class, info->dev_class, 3);
5295                 data.clock_offset       = info->clock_offset;
5296                 data.rssi               = info->rssi;
5297                 data.ssp_mode           = 0x01;
5298
5299                 if (hci_dev_test_flag(hdev, HCI_MGMT))
5300                         name_known = eir_get_data(info->data,
5301                                                   sizeof(info->data),
5302                                                   EIR_NAME_COMPLETE, NULL);
5303                 else
5304                         name_known = true;
5305
5306                 flags = hci_inquiry_cache_update(hdev, &data, name_known);
5307
5308                 eir_len = eir_get_length(info->data, sizeof(info->data));
5309
5310                 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5311                                   info->dev_class, info->rssi,
5312                                   flags, info->data, eir_len, NULL, 0, 0);
5313         }
5314
5315         hci_dev_unlock(hdev);
5316 }
5317
5318 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5319                                          struct sk_buff *skb)
5320 {
5321         struct hci_ev_key_refresh_complete *ev = data;
5322         struct hci_conn *conn;
5323
5324         bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5325                    __le16_to_cpu(ev->handle));
5326
5327         hci_dev_lock(hdev);
5328
5329         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5330         if (!conn)
5331                 goto unlock;
5332
5333         /* For BR/EDR the necessary steps are taken through the
5334          * auth_complete event.
5335          */
5336         if (conn->type != LE_LINK)
5337                 goto unlock;
5338
5339         if (!ev->status)
5340                 conn->sec_level = conn->pending_sec_level;
5341
5342         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5343
5344         if (ev->status && conn->state == BT_CONNECTED) {
5345                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5346                 hci_conn_drop(conn);
5347                 goto unlock;
5348         }
5349
5350         if (conn->state == BT_CONFIG) {
5351                 if (!ev->status)
5352                         conn->state = BT_CONNECTED;
5353
5354                 hci_connect_cfm(conn, ev->status);
5355                 hci_conn_drop(conn);
5356         } else {
5357                 hci_auth_cfm(conn, ev->status);
5358
5359                 hci_conn_hold(conn);
5360                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5361                 hci_conn_drop(conn);
5362         }
5363
5364 unlock:
5365         hci_dev_unlock(hdev);
5366 }
5367
5368 static u8 hci_get_auth_req(struct hci_conn *conn)
5369 {
5370         /* If remote requests no-bonding follow that lead */
5371         if (conn->remote_auth == HCI_AT_NO_BONDING ||
5372             conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5373                 return conn->remote_auth | (conn->auth_type & 0x01);
5374
5375         /* If both remote and local have enough IO capabilities, require
5376          * MITM protection
5377          */
5378         if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5379             conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5380                 return conn->remote_auth | 0x01;
5381
5382         /* No MITM protection possible so ignore remote requirement */
5383         return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5384 }
5385
5386 static u8 bredr_oob_data_present(struct hci_conn *conn)
5387 {
5388         struct hci_dev *hdev = conn->hdev;
5389         struct oob_data *data;
5390
5391         data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5392         if (!data)
5393                 return 0x00;
5394
5395         if (bredr_sc_enabled(hdev)) {
5396                 /* When Secure Connections is enabled, then just
5397                  * return the present value stored with the OOB
5398                  * data. The stored value contains the right present
5399                  * information. However it can only be trusted when
5400                  * not in Secure Connection Only mode.
5401                  */
5402                 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5403                         return data->present;
5404
5405                 /* When Secure Connections Only mode is enabled, then
5406                  * the P-256 values are required. If they are not
5407                  * available, then do not declare that OOB data is
5408                  * present.
5409                  */
5410                 if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
5411                     !crypto_memneq(data->hash256, ZERO_KEY, 16))
5412                         return 0x00;
5413
5414                 return 0x02;
5415         }
5416
5417         /* When Secure Connections is not enabled or actually
5418          * not supported by the hardware, then check that if
5419          * P-192 data values are present.
5420          */
5421         if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
5422             !crypto_memneq(data->hash192, ZERO_KEY, 16))
5423                 return 0x00;
5424
5425         return 0x01;
5426 }
5427
5428 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5429                                     struct sk_buff *skb)
5430 {
5431         struct hci_ev_io_capa_request *ev = data;
5432         struct hci_conn *conn;
5433
5434         bt_dev_dbg(hdev, "");
5435
5436         hci_dev_lock(hdev);
5437
5438         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5439         if (!conn || !hci_conn_ssp_enabled(conn))
5440                 goto unlock;
5441
5442         hci_conn_hold(conn);
5443
5444         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5445                 goto unlock;
5446
5447         /* Allow pairing if we're pairable, the initiators of the
5448          * pairing or if the remote is not requesting bonding.
5449          */
5450         if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5451             test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5452             (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5453                 struct hci_cp_io_capability_reply cp;
5454
5455                 bacpy(&cp.bdaddr, &ev->bdaddr);
5456                 /* Change the IO capability from KeyboardDisplay
5457                  * to DisplayYesNo as it is not supported by BT spec. */
5458                 cp.capability = (conn->io_capability == 0x04) ?
5459                                 HCI_IO_DISPLAY_YESNO : conn->io_capability;
5460
5461                 /* If we are initiators, there is no remote information yet */
5462                 if (conn->remote_auth == 0xff) {
5463                         /* Request MITM protection if our IO caps allow it
5464                          * except for the no-bonding case.
5465                          */
5466                         if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5467                             conn->auth_type != HCI_AT_NO_BONDING)
5468                                 conn->auth_type |= 0x01;
5469                 } else {
5470                         conn->auth_type = hci_get_auth_req(conn);
5471                 }
5472
5473                 /* If we're not bondable, force one of the non-bondable
5474                  * authentication requirement values.
5475                  */
5476                 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5477                         conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5478
5479                 cp.authentication = conn->auth_type;
5480                 cp.oob_data = bredr_oob_data_present(conn);
5481
5482                 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5483                              sizeof(cp), &cp);
5484         } else {
5485                 struct hci_cp_io_capability_neg_reply cp;
5486
5487                 bacpy(&cp.bdaddr, &ev->bdaddr);
5488                 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5489
5490                 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5491                              sizeof(cp), &cp);
5492         }
5493
5494 unlock:
5495         hci_dev_unlock(hdev);
5496 }
5497
5498 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5499                                   struct sk_buff *skb)
5500 {
5501         struct hci_ev_io_capa_reply *ev = data;
5502         struct hci_conn *conn;
5503
5504         bt_dev_dbg(hdev, "");
5505
5506         hci_dev_lock(hdev);
5507
5508         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5509         if (!conn)
5510                 goto unlock;
5511
5512         conn->remote_cap = ev->capability;
5513         conn->remote_auth = ev->authentication;
5514
5515 unlock:
5516         hci_dev_unlock(hdev);
5517 }
5518
5519 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5520                                          struct sk_buff *skb)
5521 {
5522         struct hci_ev_user_confirm_req *ev = data;
5523         int loc_mitm, rem_mitm, confirm_hint = 0;
5524         struct hci_conn *conn;
5525
5526         bt_dev_dbg(hdev, "");
5527
5528         hci_dev_lock(hdev);
5529
5530         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5531                 goto unlock;
5532
5533         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5534         if (!conn)
5535                 goto unlock;
5536
5537         loc_mitm = (conn->auth_type & 0x01);
5538         rem_mitm = (conn->remote_auth & 0x01);
5539
5540         /* If we require MITM but the remote device can't provide that
5541          * (it has NoInputNoOutput) then reject the confirmation
5542          * request. We check the security level here since it doesn't
5543          * necessarily match conn->auth_type.
5544          */
5545         if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5546             conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5547                 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5548                 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5549                              sizeof(ev->bdaddr), &ev->bdaddr);
5550                 goto unlock;
5551         }
5552
5553         /* If no side requires MITM protection; auto-accept */
5554         if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5555             (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5556
5557                 /* If we're not the initiators request authorization to
5558                  * proceed from user space (mgmt_user_confirm with
5559                  * confirm_hint set to 1). The exception is if neither
5560                  * side had MITM or if the local IO capability is
5561                  * NoInputNoOutput, in which case we do auto-accept
5562                  */
5563                 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5564                     conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5565                     (loc_mitm || rem_mitm)) {
5566                         bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5567                         confirm_hint = 1;
5568                         goto confirm;
5569                 }
5570
5571                 /* If there already exists link key in local host, leave the
5572                  * decision to user space since the remote device could be
5573                  * legitimate or malicious.
5574                  */
5575                 if (hci_find_link_key(hdev, &ev->bdaddr)) {
5576                         bt_dev_dbg(hdev, "Local host already has link key");
5577                         confirm_hint = 1;
5578                         goto confirm;
5579                 }
5580
5581                 BT_DBG("Auto-accept of user confirmation with %ums delay",
5582                        hdev->auto_accept_delay);
5583
5584                 if (hdev->auto_accept_delay > 0) {
5585                         int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5586                         queue_delayed_work(conn->hdev->workqueue,
5587                                            &conn->auto_accept_work, delay);
5588                         goto unlock;
5589                 }
5590
5591                 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5592                              sizeof(ev->bdaddr), &ev->bdaddr);
5593                 goto unlock;
5594         }
5595
5596 confirm:
5597         mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5598                                   le32_to_cpu(ev->passkey), confirm_hint);
5599
5600 unlock:
5601         hci_dev_unlock(hdev);
5602 }
5603
5604 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5605                                          struct sk_buff *skb)
5606 {
5607         struct hci_ev_user_passkey_req *ev = data;
5608
5609         bt_dev_dbg(hdev, "");
5610
5611         if (hci_dev_test_flag(hdev, HCI_MGMT))
5612                 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5613 }
5614
5615 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5616                                         struct sk_buff *skb)
5617 {
5618         struct hci_ev_user_passkey_notify *ev = data;
5619         struct hci_conn *conn;
5620
5621         bt_dev_dbg(hdev, "");
5622
5623         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5624         if (!conn)
5625                 return;
5626
5627         conn->passkey_notify = __le32_to_cpu(ev->passkey);
5628         conn->passkey_entered = 0;
5629
5630         if (hci_dev_test_flag(hdev, HCI_MGMT))
5631                 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5632                                          conn->dst_type, conn->passkey_notify,
5633                                          conn->passkey_entered);
5634 }
5635
5636 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5637                                     struct sk_buff *skb)
5638 {
5639         struct hci_ev_keypress_notify *ev = data;
5640         struct hci_conn *conn;
5641
5642         bt_dev_dbg(hdev, "");
5643
5644         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5645         if (!conn)
5646                 return;
5647
5648         switch (ev->type) {
5649         case HCI_KEYPRESS_STARTED:
5650                 conn->passkey_entered = 0;
5651                 return;
5652
5653         case HCI_KEYPRESS_ENTERED:
5654                 conn->passkey_entered++;
5655                 break;
5656
5657         case HCI_KEYPRESS_ERASED:
5658                 conn->passkey_entered--;
5659                 break;
5660
5661         case HCI_KEYPRESS_CLEARED:
5662                 conn->passkey_entered = 0;
5663                 break;
5664
5665         case HCI_KEYPRESS_COMPLETED:
5666                 return;
5667         }
5668
5669         if (hci_dev_test_flag(hdev, HCI_MGMT))
5670                 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5671                                          conn->dst_type, conn->passkey_notify,
5672                                          conn->passkey_entered);
5673 }
5674
5675 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5676                                          struct sk_buff *skb)
5677 {
5678         struct hci_ev_simple_pair_complete *ev = data;
5679         struct hci_conn *conn;
5680
5681         bt_dev_dbg(hdev, "");
5682
5683         hci_dev_lock(hdev);
5684
5685         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5686         if (!conn || !hci_conn_ssp_enabled(conn))
5687                 goto unlock;
5688
5689         /* Reset the authentication requirement to unknown */
5690         conn->remote_auth = 0xff;
5691
5692         /* To avoid duplicate auth_failed events to user space we check
5693          * the HCI_CONN_AUTH_PEND flag which will be set if we
5694          * initiated the authentication. A traditional auth_complete
5695          * event gets always produced as initiator and is also mapped to
5696          * the mgmt_auth_failed event */
5697         if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5698                 mgmt_auth_failed(conn, ev->status);
5699
5700         hci_conn_drop(conn);
5701
5702 unlock:
5703         hci_dev_unlock(hdev);
5704 }
5705
5706 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5707                                          struct sk_buff *skb)
5708 {
5709         struct hci_ev_remote_host_features *ev = data;
5710         struct inquiry_entry *ie;
5711         struct hci_conn *conn;
5712
5713         bt_dev_dbg(hdev, "");
5714
5715         hci_dev_lock(hdev);
5716
5717         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5718         if (conn)
5719                 memcpy(conn->features[1], ev->features, 8);
5720
5721         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5722         if (ie)
5723                 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5724
5725         hci_dev_unlock(hdev);
5726 }
5727
5728 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5729                                             struct sk_buff *skb)
5730 {
5731         struct hci_ev_remote_oob_data_request *ev = edata;
5732         struct oob_data *data;
5733
5734         bt_dev_dbg(hdev, "");
5735
5736         hci_dev_lock(hdev);
5737
5738         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5739                 goto unlock;
5740
5741         data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5742         if (!data) {
5743                 struct hci_cp_remote_oob_data_neg_reply cp;
5744
5745                 bacpy(&cp.bdaddr, &ev->bdaddr);
5746                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5747                              sizeof(cp), &cp);
5748                 goto unlock;
5749         }
5750
5751         if (bredr_sc_enabled(hdev)) {
5752                 struct hci_cp_remote_oob_ext_data_reply cp;
5753
5754                 bacpy(&cp.bdaddr, &ev->bdaddr);
5755                 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5756                         memset(cp.hash192, 0, sizeof(cp.hash192));
5757                         memset(cp.rand192, 0, sizeof(cp.rand192));
5758                 } else {
5759                         memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5760                         memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5761                 }
5762                 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5763                 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5764
5765                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5766                              sizeof(cp), &cp);
5767         } else {
5768                 struct hci_cp_remote_oob_data_reply cp;
5769
5770                 bacpy(&cp.bdaddr, &ev->bdaddr);
5771                 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5772                 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5773
5774                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5775                              sizeof(cp), &cp);
5776         }
5777
5778 unlock:
5779         hci_dev_unlock(hdev);
5780 }
5781
5782 #if IS_ENABLED(CONFIG_BT_HS)
5783 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
5784                                   struct sk_buff *skb)
5785 {
5786         struct hci_ev_channel_selected *ev = data;
5787         struct hci_conn *hcon;
5788
5789         bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
5790
5791         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5792         if (!hcon)
5793                 return;
5794
5795         amp_read_loc_assoc_final_data(hdev, hcon);
5796 }
5797
5798 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
5799                                       struct sk_buff *skb)
5800 {
5801         struct hci_ev_phy_link_complete *ev = data;
5802         struct hci_conn *hcon, *bredr_hcon;
5803
5804         bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
5805                    ev->status);
5806
5807         hci_dev_lock(hdev);
5808
5809         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5810         if (!hcon)
5811                 goto unlock;
5812
5813         if (!hcon->amp_mgr)
5814                 goto unlock;
5815
5816         if (ev->status) {
5817                 hci_conn_del(hcon);
5818                 goto unlock;
5819         }
5820
5821         bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5822
5823         hcon->state = BT_CONNECTED;
5824         bacpy(&hcon->dst, &bredr_hcon->dst);
5825
5826         hci_conn_hold(hcon);
5827         hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5828         hci_conn_drop(hcon);
5829
5830         hci_debugfs_create_conn(hcon);
5831         hci_conn_add_sysfs(hcon);
5832
5833         amp_physical_cfm(bredr_hcon, hcon);
5834
5835 unlock:
5836         hci_dev_unlock(hdev);
5837 }
5838
5839 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
5840                                      struct sk_buff *skb)
5841 {
5842         struct hci_ev_logical_link_complete *ev = data;
5843         struct hci_conn *hcon;
5844         struct hci_chan *hchan;
5845         struct amp_mgr *mgr;
5846
5847         bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5848                    le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
5849
5850         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5851         if (!hcon)
5852                 return;
5853
5854         /* Create AMP hchan */
5855         hchan = hci_chan_create(hcon);
5856         if (!hchan)
5857                 return;
5858
5859         hchan->handle = le16_to_cpu(ev->handle);
5860         hchan->amp = true;
5861
5862         BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5863
5864         mgr = hcon->amp_mgr;
5865         if (mgr && mgr->bredr_chan) {
5866                 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5867
5868                 l2cap_chan_lock(bredr_chan);
5869
5870                 bredr_chan->conn->mtu = hdev->block_mtu;
5871                 l2cap_logical_cfm(bredr_chan, hchan, 0);
5872                 hci_conn_hold(hcon);
5873
5874                 l2cap_chan_unlock(bredr_chan);
5875         }
5876 }
5877
5878 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
5879                                              struct sk_buff *skb)
5880 {
5881         struct hci_ev_disconn_logical_link_complete *ev = data;
5882         struct hci_chan *hchan;
5883
5884         bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
5885                    le16_to_cpu(ev->handle), ev->status);
5886
5887         if (ev->status)
5888                 return;
5889
5890         hci_dev_lock(hdev);
5891
5892         hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5893         if (!hchan || !hchan->amp)
5894                 goto unlock;
5895
5896         amp_destroy_logical_link(hchan, ev->reason);
5897
5898 unlock:
5899         hci_dev_unlock(hdev);
5900 }
5901
5902 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
5903                                              struct sk_buff *skb)
5904 {
5905         struct hci_ev_disconn_phy_link_complete *ev = data;
5906         struct hci_conn *hcon;
5907
5908         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5909
5910         if (ev->status)
5911                 return;
5912
5913         hci_dev_lock(hdev);
5914
5915         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5916         if (hcon && hcon->type == AMP_LINK) {
5917                 hcon->state = BT_CLOSED;
5918                 hci_disconn_cfm(hcon, ev->reason);
5919                 hci_conn_del(hcon);
5920         }
5921
5922         hci_dev_unlock(hdev);
5923 }
5924 #endif
5925
5926 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5927                                 u8 bdaddr_type, bdaddr_t *local_rpa)
5928 {
5929         if (conn->out) {
5930                 conn->dst_type = bdaddr_type;
5931                 conn->resp_addr_type = bdaddr_type;
5932                 bacpy(&conn->resp_addr, bdaddr);
5933
5934                 /* Check if the controller has set a Local RPA then it must be
5935                  * used instead or hdev->rpa.
5936                  */
5937                 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5938                         conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5939                         bacpy(&conn->init_addr, local_rpa);
5940                 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5941                         conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5942                         bacpy(&conn->init_addr, &conn->hdev->rpa);
5943                 } else {
5944                         hci_copy_identity_address(conn->hdev, &conn->init_addr,
5945                                                   &conn->init_addr_type);
5946                 }
5947         } else {
5948                 conn->resp_addr_type = conn->hdev->adv_addr_type;
5949                 /* Check if the controller has set a Local RPA then it must be
5950                  * used instead or hdev->rpa.
5951                  */
5952                 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5953                         conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5954                         bacpy(&conn->resp_addr, local_rpa);
5955                 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5956                         /* In case of ext adv, resp_addr will be updated in
5957                          * Adv Terminated event.
5958                          */
5959                         if (!ext_adv_capable(conn->hdev))
5960                                 bacpy(&conn->resp_addr,
5961                                       &conn->hdev->random_addr);
5962                 } else {
5963                         bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5964                 }
5965
5966                 conn->init_addr_type = bdaddr_type;
5967                 bacpy(&conn->init_addr, bdaddr);
5968
5969                 /* For incoming connections, set the default minimum
5970                  * and maximum connection interval. They will be used
5971                  * to check if the parameters are in range and if not
5972                  * trigger the connection update procedure.
5973                  */
5974                 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5975                 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5976         }
5977 }
5978
5979 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5980                                  bdaddr_t *bdaddr, u8 bdaddr_type,
5981                                  bdaddr_t *local_rpa, u8 role, u16 handle,
5982                                  u16 interval, u16 latency,
5983                                  u16 supervision_timeout)
5984 {
5985         struct hci_conn_params *params;
5986         struct hci_conn *conn;
5987         struct smp_irk *irk;
5988         u8 addr_type;
5989
5990         hci_dev_lock(hdev);
5991
5992         /* All controllers implicitly stop advertising in the event of a
5993          * connection, so ensure that the state bit is cleared.
5994          */
5995         hci_dev_clear_flag(hdev, HCI_LE_ADV);
5996
5997         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
5998         if (!conn) {
5999                 /* In case of error status and there is no connection pending
6000                  * just unlock as there is nothing to cleanup.
6001                  */
6002                 if (status)
6003                         goto unlock;
6004
6005                 conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
6006                 if (!conn) {
6007                         bt_dev_err(hdev, "no memory for new connection");
6008                         goto unlock;
6009                 }
6010
6011                 conn->dst_type = bdaddr_type;
6012
6013                 /* If we didn't have a hci_conn object previously
6014                  * but we're in central role this must be something
6015                  * initiated using an accept list. Since accept list based
6016                  * connections are not "first class citizens" we don't
6017                  * have full tracking of them. Therefore, we go ahead
6018                  * with a "best effort" approach of determining the
6019                  * initiator address based on the HCI_PRIVACY flag.
6020                  */
6021                 if (conn->out) {
6022                         conn->resp_addr_type = bdaddr_type;
6023                         bacpy(&conn->resp_addr, bdaddr);
6024                         if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6025                                 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
6026                                 bacpy(&conn->init_addr, &hdev->rpa);
6027                         } else {
6028                                 hci_copy_identity_address(hdev,
6029                                                           &conn->init_addr,
6030                                                           &conn->init_addr_type);
6031                         }
6032                 }
6033         } else {
6034                 cancel_delayed_work(&conn->le_conn_timeout);
6035         }
6036
6037         /* The HCI_LE_Connection_Complete event is only sent once per connection.
6038          * Processing it more than once per connection can corrupt kernel memory.
6039          *
6040          * As the connection handle is set here for the first time, it indicates
6041          * whether the connection is already set up.
6042          */
6043         if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
6044                 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
6045                 goto unlock;
6046         }
6047
6048         le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
6049
6050         /* Lookup the identity address from the stored connection
6051          * address and address type.
6052          *
6053          * When establishing connections to an identity address, the
6054          * connection procedure will store the resolvable random
6055          * address first. Now if it can be converted back into the
6056          * identity address, start using the identity address from
6057          * now on.
6058          */
6059         irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
6060         if (irk) {
6061                 bacpy(&conn->dst, &irk->bdaddr);
6062                 conn->dst_type = irk->addr_type;
6063         }
6064
6065         conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
6066
6067         /* All connection failure handling is taken care of by the
6068          * hci_conn_failed function which is triggered by the HCI
6069          * request completion callbacks used for connecting.
6070          */
6071         if (status || hci_conn_set_handle(conn, handle))
6072                 goto unlock;
6073
6074         /* Drop the connection if it has been aborted */
6075         if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
6076                 hci_conn_drop(conn);
6077                 goto unlock;
6078         }
6079
6080         if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
6081                 addr_type = BDADDR_LE_PUBLIC;
6082         else
6083                 addr_type = BDADDR_LE_RANDOM;
6084
6085         /* Drop the connection if the device is blocked */
6086         if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
6087                 hci_conn_drop(conn);
6088                 goto unlock;
6089         }
6090
6091         if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
6092                 mgmt_device_connected(hdev, conn, NULL, 0);
6093
6094         conn->sec_level = BT_SECURITY_LOW;
6095         conn->state = BT_CONFIG;
6096
6097         /* Store current advertising instance as connection advertising instance
6098          * when sotfware rotation is in use so it can be re-enabled when
6099          * disconnected.
6100          */
6101         if (!ext_adv_capable(hdev))
6102                 conn->adv_instance = hdev->cur_adv_instance;
6103
6104         conn->le_conn_interval = interval;
6105         conn->le_conn_latency = latency;
6106         conn->le_supv_timeout = supervision_timeout;
6107
6108         hci_debugfs_create_conn(conn);
6109         hci_conn_add_sysfs(conn);
6110
6111         /* The remote features procedure is defined for central
6112          * role only. So only in case of an initiated connection
6113          * request the remote features.
6114          *
6115          * If the local controller supports peripheral-initiated features
6116          * exchange, then requesting the remote features in peripheral
6117          * role is possible. Otherwise just transition into the
6118          * connected state without requesting the remote features.
6119          */
6120         if (conn->out ||
6121             (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
6122                 struct hci_cp_le_read_remote_features cp;
6123
6124                 cp.handle = __cpu_to_le16(conn->handle);
6125
6126                 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
6127                              sizeof(cp), &cp);
6128
6129                 hci_conn_hold(conn);
6130         } else {
6131                 conn->state = BT_CONNECTED;
6132                 hci_connect_cfm(conn, status);
6133         }
6134
6135         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
6136                                            conn->dst_type);
6137         if (params) {
6138                 hci_pend_le_list_del_init(params);
6139                 if (params->conn) {
6140                         hci_conn_drop(params->conn);
6141                         hci_conn_put(params->conn);
6142                         params->conn = NULL;
6143                 }
6144         }
6145
6146 unlock:
6147         hci_update_passive_scan(hdev);
6148         hci_dev_unlock(hdev);
6149 }
6150
6151 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
6152                                      struct sk_buff *skb)
6153 {
6154         struct hci_ev_le_conn_complete *ev = data;
6155
6156         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6157
6158         le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6159                              NULL, ev->role, le16_to_cpu(ev->handle),
6160                              le16_to_cpu(ev->interval),
6161                              le16_to_cpu(ev->latency),
6162                              le16_to_cpu(ev->supervision_timeout));
6163 }
6164
6165 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
6166                                          struct sk_buff *skb)
6167 {
6168         struct hci_ev_le_enh_conn_complete *ev = data;
6169
6170         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6171
6172         le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6173                              &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
6174                              le16_to_cpu(ev->interval),
6175                              le16_to_cpu(ev->latency),
6176                              le16_to_cpu(ev->supervision_timeout));
6177 }
6178
6179 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
6180                                     struct sk_buff *skb)
6181 {
6182         struct hci_evt_le_ext_adv_set_term *ev = data;
6183         struct hci_conn *conn;
6184         struct adv_info *adv, *n;
6185
6186         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6187
6188         /* The Bluetooth Core 5.3 specification clearly states that this event
6189          * shall not be sent when the Host disables the advertising set. So in
6190          * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
6191          *
6192          * When the Host disables an advertising set, all cleanup is done via
6193          * its command callback and not needed to be duplicated here.
6194          */
6195         if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
6196                 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
6197                 return;
6198         }
6199
6200         hci_dev_lock(hdev);
6201
6202         adv = hci_find_adv_instance(hdev, ev->handle);
6203
6204         if (ev->status) {
6205                 if (!adv)
6206                         goto unlock;
6207
6208                 /* Remove advertising as it has been terminated */
6209                 hci_remove_adv_instance(hdev, ev->handle);
6210                 mgmt_advertising_removed(NULL, hdev, ev->handle);
6211
6212                 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
6213                         if (adv->enabled)
6214                                 goto unlock;
6215                 }
6216
6217                 /* We are no longer advertising, clear HCI_LE_ADV */
6218                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
6219                 goto unlock;
6220         }
6221
6222         if (adv)
6223                 adv->enabled = false;
6224
6225         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
6226         if (conn) {
6227                 /* Store handle in the connection so the correct advertising
6228                  * instance can be re-enabled when disconnected.
6229                  */
6230                 conn->adv_instance = ev->handle;
6231
6232                 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
6233                     bacmp(&conn->resp_addr, BDADDR_ANY))
6234                         goto unlock;
6235
6236                 if (!ev->handle) {
6237                         bacpy(&conn->resp_addr, &hdev->random_addr);
6238                         goto unlock;
6239                 }
6240
6241                 if (adv)
6242                         bacpy(&conn->resp_addr, &adv->random_addr);
6243         }
6244
6245 unlock:
6246         hci_dev_unlock(hdev);
6247 }
6248
6249 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
6250                                             struct sk_buff *skb)
6251 {
6252         struct hci_ev_le_conn_update_complete *ev = data;
6253         struct hci_conn *conn;
6254
6255         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6256
6257         if (ev->status)
6258                 return;
6259
6260         hci_dev_lock(hdev);
6261
6262         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6263         if (conn) {
6264 #ifdef TIZEN_BT
6265                 if (ev->status) {
6266                         hci_dev_unlock(hdev);
6267                         mgmt_le_conn_update_failed(hdev, &conn->dst,
6268                                 conn->type, conn->dst_type, ev->status);
6269                         return;
6270                 }
6271 #endif
6272                 conn->le_conn_interval = le16_to_cpu(ev->interval);
6273                 conn->le_conn_latency = le16_to_cpu(ev->latency);
6274                 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
6275         }
6276
6277         hci_dev_unlock(hdev);
6278
6279 #ifdef TIZEN_BT
6280         mgmt_le_conn_updated(hdev, &conn->dst, conn->type,
6281                                 conn->dst_type, conn->le_conn_interval,
6282                                 conn->le_conn_latency, conn->le_supv_timeout);
6283 #endif
6284 }
6285
6286 /* This function requires the caller holds hdev->lock */
6287 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
6288                                               bdaddr_t *addr,
6289                                               u8 addr_type, bool addr_resolved,
6290                                               u8 adv_type)
6291 {
6292         struct hci_conn *conn;
6293         struct hci_conn_params *params;
6294
6295         /* If the event is not connectable don't proceed further */
6296         if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
6297                 return NULL;
6298
6299         /* Ignore if the device is blocked or hdev is suspended */
6300         if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
6301             hdev->suspended)
6302                 return NULL;
6303
6304         /* Most controller will fail if we try to create new connections
6305          * while we have an existing one in peripheral role.
6306          */
6307         if (hdev->conn_hash.le_num_peripheral > 0 &&
6308             (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
6309              !(hdev->le_states[3] & 0x10)))
6310                 return NULL;
6311
6312         /* If we're not connectable only connect devices that we have in
6313          * our pend_le_conns list.
6314          */
6315         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
6316                                            addr_type);
6317         if (!params)
6318                 return NULL;
6319
6320         if (!params->explicit_connect) {
6321                 switch (params->auto_connect) {
6322                 case HCI_AUTO_CONN_DIRECT:
6323                         /* Only devices advertising with ADV_DIRECT_IND are
6324                          * triggering a connection attempt. This is allowing
6325                          * incoming connections from peripheral devices.
6326                          */
6327                         if (adv_type != LE_ADV_DIRECT_IND)
6328                                 return NULL;
6329                         break;
6330                 case HCI_AUTO_CONN_ALWAYS:
6331                         /* Devices advertising with ADV_IND or ADV_DIRECT_IND
6332                          * are triggering a connection attempt. This means
6333                          * that incoming connections from peripheral device are
6334                          * accepted and also outgoing connections to peripheral
6335                          * devices are established when found.
6336                          */
6337                         break;
6338                 default:
6339                         return NULL;
6340                 }
6341         }
6342
6343         conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
6344                               BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
6345                               HCI_ROLE_MASTER);
6346         if (!IS_ERR(conn)) {
6347                 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6348                  * by higher layer that tried to connect, if no then
6349                  * store the pointer since we don't really have any
6350                  * other owner of the object besides the params that
6351                  * triggered it. This way we can abort the connection if
6352                  * the parameters get removed and keep the reference
6353                  * count consistent once the connection is established.
6354                  */
6355
6356                 if (!params->explicit_connect)
6357                         params->conn = hci_conn_get(conn);
6358
6359                 return conn;
6360         }
6361
6362         switch (PTR_ERR(conn)) {
6363         case -EBUSY:
6364                 /* If hci_connect() returns -EBUSY it means there is already
6365                  * an LE connection attempt going on. Since controllers don't
6366                  * support more than one connection attempt at the time, we
6367                  * don't consider this an error case.
6368                  */
6369                 break;
6370         default:
6371                 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6372                 return NULL;
6373         }
6374
6375         return NULL;
6376 }
6377
6378 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6379                                u8 bdaddr_type, bdaddr_t *direct_addr,
6380                                u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
6381                                bool ext_adv, bool ctl_time, u64 instant)
6382 {
6383         struct discovery_state *d = &hdev->discovery;
6384         struct smp_irk *irk;
6385         struct hci_conn *conn;
6386         bool match, bdaddr_resolved;
6387         u32 flags;
6388         u8 *ptr;
6389
6390         switch (type) {
6391         case LE_ADV_IND:
6392         case LE_ADV_DIRECT_IND:
6393         case LE_ADV_SCAN_IND:
6394         case LE_ADV_NONCONN_IND:
6395         case LE_ADV_SCAN_RSP:
6396                 break;
6397         default:
6398                 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6399                                        "type: 0x%02x", type);
6400                 return;
6401         }
6402
6403         if (len > max_adv_len(hdev)) {
6404                 bt_dev_err_ratelimited(hdev,
6405                                        "adv larger than maximum supported");
6406                 return;
6407         }
6408
6409         /* Find the end of the data in case the report contains padded zero
6410          * bytes at the end causing an invalid length value.
6411          *
6412          * When data is NULL, len is 0 so there is no need for extra ptr
6413          * check as 'ptr < data + 0' is already false in such case.
6414          */
6415         for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6416                 if (ptr + 1 + *ptr > data + len)
6417                         break;
6418         }
6419
6420         /* Adjust for actual length. This handles the case when remote
6421          * device is advertising with incorrect data length.
6422          */
6423         len = ptr - data;
6424
6425         /* If the direct address is present, then this report is from
6426          * a LE Direct Advertising Report event. In that case it is
6427          * important to see if the address is matching the local
6428          * controller address.
6429          */
6430         if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6431                 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6432                                                   &bdaddr_resolved);
6433
6434                 /* Only resolvable random addresses are valid for these
6435                  * kind of reports and others can be ignored.
6436                  */
6437                 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6438                         return;
6439
6440                 /* If the controller is not using resolvable random
6441                  * addresses, then this report can be ignored.
6442                  */
6443                 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6444                         return;
6445
6446                 /* If the local IRK of the controller does not match
6447                  * with the resolvable random address provided, then
6448                  * this report can be ignored.
6449                  */
6450                 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6451                         return;
6452         }
6453
6454         /* Check if we need to convert to identity address */
6455         irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6456         if (irk) {
6457                 bdaddr = &irk->bdaddr;
6458                 bdaddr_type = irk->addr_type;
6459         }
6460
6461         bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6462
6463         /* Check if we have been requested to connect to this device.
6464          *
6465          * direct_addr is set only for directed advertising reports (it is NULL
6466          * for advertising reports) and is already verified to be RPA above.
6467          */
6468         conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6469                                      type);
6470         if (!ext_adv && conn && type == LE_ADV_IND &&
6471             len <= max_adv_len(hdev)) {
6472                 /* Store report for later inclusion by
6473                  * mgmt_device_connected
6474                  */
6475                 memcpy(conn->le_adv_data, data, len);
6476                 conn->le_adv_data_len = len;
6477         }
6478
6479         if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6480                 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6481         else
6482                 flags = 0;
6483
6484         /* All scan results should be sent up for Mesh systems */
6485         if (hci_dev_test_flag(hdev, HCI_MESH)) {
6486                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6487                                   rssi, flags, data, len, NULL, 0, instant);
6488                 return;
6489         }
6490
6491         /* Passive scanning shouldn't trigger any device found events,
6492          * except for devices marked as CONN_REPORT for which we do send
6493          * device found events, or advertisement monitoring requested.
6494          */
6495         if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6496                 if (type == LE_ADV_DIRECT_IND)
6497                         return;
6498
6499                 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6500                                                bdaddr, bdaddr_type) &&
6501                     idr_is_empty(&hdev->adv_monitors_idr))
6502                         return;
6503
6504                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6505                                   rssi, flags, data, len, NULL, 0, 0);
6506                 return;
6507         }
6508
6509         /* When receiving a scan response, then there is no way to
6510          * know if the remote device is connectable or not. However
6511          * since scan responses are merged with a previously seen
6512          * advertising report, the flags field from that report
6513          * will be used.
6514          *
6515          * In the unlikely case that a controller just sends a scan
6516          * response event that doesn't match the pending report, then
6517          * it is marked as a standalone SCAN_RSP.
6518          */
6519         if (type == LE_ADV_SCAN_RSP)
6520                 flags = MGMT_DEV_FOUND_SCAN_RSP;
6521
6522         /* If there's nothing pending either store the data from this
6523          * event or send an immediate device found event if the data
6524          * should not be stored for later.
6525          */
6526         if (!ext_adv && !has_pending_adv_report(hdev)) {
6527                 /* If the report will trigger a SCAN_REQ store it for
6528                  * later merging.
6529                  */
6530                 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6531                         store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6532                                                  rssi, flags, data, len);
6533                         return;
6534                 }
6535
6536                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6537                                   rssi, flags, data, len, NULL, 0, 0);
6538                 return;
6539         }
6540
6541         /* Check if the pending report is for the same device as the new one */
6542         match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6543                  bdaddr_type == d->last_adv_addr_type);
6544
6545         /* If the pending data doesn't match this report or this isn't a
6546          * scan response (e.g. we got a duplicate ADV_IND) then force
6547          * sending of the pending data.
6548          */
6549         if (type != LE_ADV_SCAN_RSP || !match) {
6550                 /* Send out whatever is in the cache, but skip duplicates */
6551                 if (!match)
6552                         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6553                                           d->last_adv_addr_type, NULL,
6554                                           d->last_adv_rssi, d->last_adv_flags,
6555                                           d->last_adv_data,
6556                                           d->last_adv_data_len, NULL, 0, 0);
6557
6558                 /* If the new report will trigger a SCAN_REQ store it for
6559                  * later merging.
6560                  */
6561                 if (!ext_adv && (type == LE_ADV_IND ||
6562                                  type == LE_ADV_SCAN_IND)) {
6563                         store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6564                                                  rssi, flags, data, len);
6565                         return;
6566                 }
6567
6568                 /* The advertising reports cannot be merged, so clear
6569                  * the pending report and send out a device found event.
6570                  */
6571                 clear_pending_adv_report(hdev);
6572                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6573                                   rssi, flags, data, len, NULL, 0, 0);
6574                 return;
6575         }
6576
6577         /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6578          * the new event is a SCAN_RSP. We can therefore proceed with
6579          * sending a merged device found event.
6580          */
6581         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6582                           d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6583                           d->last_adv_data, d->last_adv_data_len, data, len, 0);
6584         clear_pending_adv_report(hdev);
6585 }
6586
6587 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6588                                   struct sk_buff *skb)
6589 {
6590         struct hci_ev_le_advertising_report *ev = data;
6591         u64 instant = jiffies;
6592
6593         if (!ev->num)
6594                 return;
6595
6596         hci_dev_lock(hdev);
6597
6598         while (ev->num--) {
6599                 struct hci_ev_le_advertising_info *info;
6600                 s8 rssi;
6601
6602                 info = hci_le_ev_skb_pull(hdev, skb,
6603                                           HCI_EV_LE_ADVERTISING_REPORT,
6604                                           sizeof(*info));
6605                 if (!info)
6606                         break;
6607
6608                 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6609                                         info->length + 1))
6610                         break;
6611
6612                 if (info->length <= max_adv_len(hdev)) {
6613                         rssi = info->data[info->length];
6614                         process_adv_report(hdev, info->type, &info->bdaddr,
6615                                            info->bdaddr_type, NULL, 0, rssi,
6616                                            info->data, info->length, false,
6617                                            false, instant);
6618                 } else {
6619                         bt_dev_err(hdev, "Dropping invalid advertising data");
6620                 }
6621         }
6622
6623         hci_dev_unlock(hdev);
6624 }
6625
6626 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6627 {
6628         if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6629                 switch (evt_type) {
6630                 case LE_LEGACY_ADV_IND:
6631                         return LE_ADV_IND;
6632                 case LE_LEGACY_ADV_DIRECT_IND:
6633                         return LE_ADV_DIRECT_IND;
6634                 case LE_LEGACY_ADV_SCAN_IND:
6635                         return LE_ADV_SCAN_IND;
6636                 case LE_LEGACY_NONCONN_IND:
6637                         return LE_ADV_NONCONN_IND;
6638                 case LE_LEGACY_SCAN_RSP_ADV:
6639                 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6640                         return LE_ADV_SCAN_RSP;
6641                 }
6642
6643                 goto invalid;
6644         }
6645
6646         if (evt_type & LE_EXT_ADV_CONN_IND) {
6647                 if (evt_type & LE_EXT_ADV_DIRECT_IND)
6648                         return LE_ADV_DIRECT_IND;
6649
6650                 return LE_ADV_IND;
6651         }
6652
6653         if (evt_type & LE_EXT_ADV_SCAN_RSP)
6654                 return LE_ADV_SCAN_RSP;
6655
6656         if (evt_type & LE_EXT_ADV_SCAN_IND)
6657                 return LE_ADV_SCAN_IND;
6658
6659         if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6660             evt_type & LE_EXT_ADV_DIRECT_IND)
6661                 return LE_ADV_NONCONN_IND;
6662
6663 invalid:
6664         bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6665                                evt_type);
6666
6667         return LE_ADV_INVALID;
6668 }
6669
6670 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6671                                       struct sk_buff *skb)
6672 {
6673         struct hci_ev_le_ext_adv_report *ev = data;
6674         u64 instant = jiffies;
6675
6676         if (!ev->num)
6677                 return;
6678
6679         hci_dev_lock(hdev);
6680
6681         while (ev->num--) {
6682                 struct hci_ev_le_ext_adv_info *info;
6683                 u8 legacy_evt_type;
6684                 u16 evt_type;
6685
6686                 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6687                                           sizeof(*info));
6688                 if (!info)
6689                         break;
6690
6691                 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6692                                         info->length))
6693                         break;
6694
6695                 evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
6696                 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6697                 if (legacy_evt_type != LE_ADV_INVALID) {
6698                         process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6699                                            info->bdaddr_type, NULL, 0,
6700                                            info->rssi, info->data, info->length,
6701                                            !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6702                                            false, instant);
6703                 }
6704         }
6705
6706         hci_dev_unlock(hdev);
6707 }
6708
6709 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6710 {
6711         struct hci_cp_le_pa_term_sync cp;
6712
6713         memset(&cp, 0, sizeof(cp));
6714         cp.handle = handle;
6715
6716         return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6717 }
6718
6719 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6720                                             struct sk_buff *skb)
6721 {
6722         struct hci_ev_le_pa_sync_established *ev = data;
6723         int mask = hdev->link_mode;
6724         __u8 flags = 0;
6725         struct hci_conn *pa_sync;
6726
6727         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6728
6729         hci_dev_lock(hdev);
6730
6731         hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6732
6733         mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6734         if (!(mask & HCI_LM_ACCEPT)) {
6735                 hci_le_pa_term_sync(hdev, ev->handle);
6736                 goto unlock;
6737         }
6738
6739         if (!(flags & HCI_PROTO_DEFER))
6740                 goto unlock;
6741
6742         if (ev->status) {
6743                 /* Add connection to indicate the failed PA sync event */
6744                 pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
6745                                              HCI_ROLE_SLAVE);
6746
6747                 if (!pa_sync)
6748                         goto unlock;
6749
6750                 set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
6751
6752                 /* Notify iso layer */
6753                 hci_connect_cfm(pa_sync, ev->status);
6754         }
6755
6756 unlock:
6757         hci_dev_unlock(hdev);
6758 }
6759
6760 static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
6761                                       struct sk_buff *skb)
6762 {
6763         struct hci_ev_le_per_adv_report *ev = data;
6764         int mask = hdev->link_mode;
6765         __u8 flags = 0;
6766
6767         bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
6768
6769         hci_dev_lock(hdev);
6770
6771         mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
6772         if (!(mask & HCI_LM_ACCEPT))
6773                 hci_le_pa_term_sync(hdev, ev->sync_handle);
6774
6775         hci_dev_unlock(hdev);
6776 }
6777
6778 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6779                                             struct sk_buff *skb)
6780 {
6781         struct hci_ev_le_remote_feat_complete *ev = data;
6782         struct hci_conn *conn;
6783
6784         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6785
6786         hci_dev_lock(hdev);
6787
6788         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6789         if (conn) {
6790                 if (!ev->status)
6791                         memcpy(conn->features[0], ev->features, 8);
6792
6793                 if (conn->state == BT_CONFIG) {
6794                         __u8 status;
6795
6796                         /* If the local controller supports peripheral-initiated
6797                          * features exchange, but the remote controller does
6798                          * not, then it is possible that the error code 0x1a
6799                          * for unsupported remote feature gets returned.
6800                          *
6801                          * In this specific case, allow the connection to
6802                          * transition into connected state and mark it as
6803                          * successful.
6804                          */
6805                         if (!conn->out && ev->status == 0x1a &&
6806                             (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6807                                 status = 0x00;
6808                         else
6809                                 status = ev->status;
6810
6811                         conn->state = BT_CONNECTED;
6812                         hci_connect_cfm(conn, status);
6813                         hci_conn_drop(conn);
6814                 }
6815         }
6816
6817         hci_dev_unlock(hdev);
6818 }
6819
6820 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6821                                    struct sk_buff *skb)
6822 {
6823         struct hci_ev_le_ltk_req *ev = data;
6824         struct hci_cp_le_ltk_reply cp;
6825         struct hci_cp_le_ltk_neg_reply neg;
6826         struct hci_conn *conn;
6827         struct smp_ltk *ltk;
6828
6829         bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6830
6831         hci_dev_lock(hdev);
6832
6833         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6834         if (conn == NULL)
6835                 goto not_found;
6836
6837         ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6838         if (!ltk)
6839                 goto not_found;
6840
6841         if (smp_ltk_is_sc(ltk)) {
6842                 /* With SC both EDiv and Rand are set to zero */
6843                 if (ev->ediv || ev->rand)
6844                         goto not_found;
6845         } else {
6846                 /* For non-SC keys check that EDiv and Rand match */
6847                 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6848                         goto not_found;
6849         }
6850
6851         memcpy(cp.ltk, ltk->val, ltk->enc_size);
6852         memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6853         cp.handle = cpu_to_le16(conn->handle);
6854
6855         conn->pending_sec_level = smp_ltk_sec_level(ltk);
6856
6857         conn->enc_key_size = ltk->enc_size;
6858
6859         hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6860
6861         /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6862          * temporary key used to encrypt a connection following
6863          * pairing. It is used during the Encrypted Session Setup to
6864          * distribute the keys. Later, security can be re-established
6865          * using a distributed LTK.
6866          */
6867         if (ltk->type == SMP_STK) {
6868                 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6869                 list_del_rcu(&ltk->list);
6870                 kfree_rcu(ltk, rcu);
6871         } else {
6872                 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6873         }
6874
6875         hci_dev_unlock(hdev);
6876
6877         return;
6878
6879 not_found:
6880         neg.handle = ev->handle;
6881         hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6882         hci_dev_unlock(hdev);
6883 }
6884
6885 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6886                                       u8 reason)
6887 {
6888         struct hci_cp_le_conn_param_req_neg_reply cp;
6889
6890         cp.handle = cpu_to_le16(handle);
6891         cp.reason = reason;
6892
6893         hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6894                      &cp);
6895 }
6896
6897 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6898                                              struct sk_buff *skb)
6899 {
6900         struct hci_ev_le_remote_conn_param_req *ev = data;
6901         struct hci_cp_le_conn_param_req_reply cp;
6902         struct hci_conn *hcon;
6903         u16 handle, min, max, latency, timeout;
6904
6905         bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6906
6907         handle = le16_to_cpu(ev->handle);
6908         min = le16_to_cpu(ev->interval_min);
6909         max = le16_to_cpu(ev->interval_max);
6910         latency = le16_to_cpu(ev->latency);
6911         timeout = le16_to_cpu(ev->timeout);
6912
6913         hcon = hci_conn_hash_lookup_handle(hdev, handle);
6914         if (!hcon || hcon->state != BT_CONNECTED)
6915                 return send_conn_param_neg_reply(hdev, handle,
6916                                                  HCI_ERROR_UNKNOWN_CONN_ID);
6917
6918         if (hci_check_conn_params(min, max, latency, timeout))
6919                 return send_conn_param_neg_reply(hdev, handle,
6920                                                  HCI_ERROR_INVALID_LL_PARAMS);
6921
6922         if (hcon->role == HCI_ROLE_MASTER) {
6923                 struct hci_conn_params *params;
6924                 u8 store_hint;
6925
6926                 hci_dev_lock(hdev);
6927
6928                 params = hci_conn_params_lookup(hdev, &hcon->dst,
6929                                                 hcon->dst_type);
6930                 if (params) {
6931                         params->conn_min_interval = min;
6932                         params->conn_max_interval = max;
6933                         params->conn_latency = latency;
6934                         params->supervision_timeout = timeout;
6935                         store_hint = 0x01;
6936                 } else {
6937                         store_hint = 0x00;
6938                 }
6939
6940                 hci_dev_unlock(hdev);
6941
6942                 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6943                                     store_hint, min, max, latency, timeout);
6944         }
6945
6946         cp.handle = ev->handle;
6947         cp.interval_min = ev->interval_min;
6948         cp.interval_max = ev->interval_max;
6949         cp.latency = ev->latency;
6950         cp.timeout = ev->timeout;
6951         cp.min_ce_len = 0;
6952         cp.max_ce_len = 0;
6953
6954         hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6955 }
6956
6957 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6958                                          struct sk_buff *skb)
6959 {
6960         struct hci_ev_le_direct_adv_report *ev = data;
6961         u64 instant = jiffies;
6962         int i;
6963
6964         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6965                                 flex_array_size(ev, info, ev->num)))
6966                 return;
6967
6968         if (!ev->num)
6969                 return;
6970
6971         hci_dev_lock(hdev);
6972
6973         for (i = 0; i < ev->num; i++) {
6974                 struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6975
6976                 process_adv_report(hdev, info->type, &info->bdaddr,
6977                                    info->bdaddr_type, &info->direct_addr,
6978                                    info->direct_addr_type, info->rssi, NULL, 0,
6979                                    false, false, instant);
6980         }
6981
6982         hci_dev_unlock(hdev);
6983 }
6984
6985 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6986                                   struct sk_buff *skb)
6987 {
6988         struct hci_ev_le_phy_update_complete *ev = data;
6989         struct hci_conn *conn;
6990
6991         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6992
6993         if (ev->status)
6994                 return;
6995
6996         hci_dev_lock(hdev);
6997
6998         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6999         if (!conn)
7000                 goto unlock;
7001
7002         conn->le_tx_phy = ev->tx_phy;
7003         conn->le_rx_phy = ev->rx_phy;
7004
7005 unlock:
7006         hci_dev_unlock(hdev);
7007 }
7008
7009 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
7010                                         struct sk_buff *skb)
7011 {
7012         struct hci_evt_le_cis_established *ev = data;
7013         struct hci_conn *conn;
7014         struct bt_iso_qos *qos;
7015         bool pending = false;
7016         u16 handle = __le16_to_cpu(ev->handle);
7017
7018         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7019
7020         hci_dev_lock(hdev);
7021
7022         conn = hci_conn_hash_lookup_handle(hdev, handle);
7023         if (!conn) {
7024                 bt_dev_err(hdev,
7025                            "Unable to find connection with handle 0x%4.4x",
7026                            handle);
7027                 goto unlock;
7028         }
7029
7030         if (conn->type != ISO_LINK) {
7031                 bt_dev_err(hdev,
7032                            "Invalid connection link type handle 0x%4.4x",
7033                            handle);
7034                 goto unlock;
7035         }
7036
7037         qos = &conn->iso_qos;
7038
7039         pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
7040
7041         /* Convert ISO Interval (1.25 ms slots) to SDU Interval (us) */
7042         qos->ucast.in.interval = le16_to_cpu(ev->interval) * 1250;
7043         qos->ucast.out.interval = qos->ucast.in.interval;
7044
7045         switch (conn->role) {
7046         case HCI_ROLE_SLAVE:
7047                 /* Convert Transport Latency (us) to Latency (msec) */
7048                 qos->ucast.in.latency =
7049                         DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
7050                                           1000);
7051                 qos->ucast.out.latency =
7052                         DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
7053                                           1000);
7054                 qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu);
7055                 qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu);
7056                 qos->ucast.in.phy = ev->c_phy;
7057                 qos->ucast.out.phy = ev->p_phy;
7058                 break;
7059         case HCI_ROLE_MASTER:
7060                 /* Convert Transport Latency (us) to Latency (msec) */
7061                 qos->ucast.out.latency =
7062                         DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
7063                                           1000);
7064                 qos->ucast.in.latency =
7065                         DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
7066                                           1000);
7067                 qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu);
7068                 qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu);
7069                 qos->ucast.out.phy = ev->c_phy;
7070                 qos->ucast.in.phy = ev->p_phy;
7071                 break;
7072         }
7073
7074         if (!ev->status) {
7075                 conn->state = BT_CONNECTED;
7076                 hci_debugfs_create_conn(conn);
7077                 hci_conn_add_sysfs(conn);
7078                 hci_iso_setup_path(conn);
7079                 goto unlock;
7080         }
7081
7082         conn->state = BT_CLOSED;
7083         hci_connect_cfm(conn, ev->status);
7084         hci_conn_del(conn);
7085
7086 unlock:
7087         if (pending)
7088                 hci_le_create_cis_pending(hdev);
7089
7090         hci_dev_unlock(hdev);
7091 }
7092
7093 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
7094 {
7095         struct hci_cp_le_reject_cis cp;
7096
7097         memset(&cp, 0, sizeof(cp));
7098         cp.handle = handle;
7099         cp.reason = HCI_ERROR_REJ_BAD_ADDR;
7100         hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
7101 }
7102
7103 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
7104 {
7105         struct hci_cp_le_accept_cis cp;
7106
7107         memset(&cp, 0, sizeof(cp));
7108         cp.handle = handle;
7109         hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
7110 }
7111
7112 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
7113                                struct sk_buff *skb)
7114 {
7115         struct hci_evt_le_cis_req *ev = data;
7116         u16 acl_handle, cis_handle;
7117         struct hci_conn *acl, *cis;
7118         int mask;
7119         __u8 flags = 0;
7120
7121         acl_handle = __le16_to_cpu(ev->acl_handle);
7122         cis_handle = __le16_to_cpu(ev->cis_handle);
7123
7124         bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
7125                    acl_handle, cis_handle, ev->cig_id, ev->cis_id);
7126
7127         hci_dev_lock(hdev);
7128
7129         acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
7130         if (!acl)
7131                 goto unlock;
7132
7133         mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
7134         if (!(mask & HCI_LM_ACCEPT)) {
7135                 hci_le_reject_cis(hdev, ev->cis_handle);
7136                 goto unlock;
7137         }
7138
7139         cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
7140         if (!cis) {
7141                 cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE,
7142                                    cis_handle);
7143                 if (!cis) {
7144                         hci_le_reject_cis(hdev, ev->cis_handle);
7145                         goto unlock;
7146                 }
7147         }
7148
7149         cis->iso_qos.ucast.cig = ev->cig_id;
7150         cis->iso_qos.ucast.cis = ev->cis_id;
7151
7152         if (!(flags & HCI_PROTO_DEFER)) {
7153                 hci_le_accept_cis(hdev, ev->cis_handle);
7154         } else {
7155                 cis->state = BT_CONNECT2;
7156                 hci_connect_cfm(cis, 0);
7157         }
7158
7159 unlock:
7160         hci_dev_unlock(hdev);
7161 }
7162
7163 static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data)
7164 {
7165         u8 handle = PTR_UINT(data);
7166
7167         return hci_le_terminate_big_sync(hdev, handle,
7168                                          HCI_ERROR_LOCAL_HOST_TERM);
7169 }
7170
7171 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
7172                                            struct sk_buff *skb)
7173 {
7174         struct hci_evt_le_create_big_complete *ev = data;
7175         struct hci_conn *conn;
7176         __u8 i = 0;
7177
7178         BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
7179
7180         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
7181                                 flex_array_size(ev, bis_handle, ev->num_bis)))
7182                 return;
7183
7184         hci_dev_lock(hdev);
7185         rcu_read_lock();
7186
7187         /* Connect all BISes that are bound to the BIG */
7188         list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
7189                 if (bacmp(&conn->dst, BDADDR_ANY) ||
7190                     conn->type != ISO_LINK ||
7191                     conn->iso_qos.bcast.big != ev->handle)
7192                         continue;
7193
7194                 if (hci_conn_set_handle(conn,
7195                                         __le16_to_cpu(ev->bis_handle[i++])))
7196                         continue;
7197
7198                 if (!ev->status) {
7199                         conn->state = BT_CONNECTED;
7200                         set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
7201                         rcu_read_unlock();
7202                         hci_debugfs_create_conn(conn);
7203                         hci_conn_add_sysfs(conn);
7204                         hci_iso_setup_path(conn);
7205                         rcu_read_lock();
7206                         continue;
7207                 }
7208
7209                 hci_connect_cfm(conn, ev->status);
7210                 rcu_read_unlock();
7211                 hci_conn_del(conn);
7212                 rcu_read_lock();
7213         }
7214
7215         rcu_read_unlock();
7216
7217         if (!ev->status && !i)
7218                 /* If no BISes have been connected for the BIG,
7219                  * terminate. This is in case all bound connections
7220                  * have been closed before the BIG creation
7221                  * has completed.
7222                  */
7223                 hci_cmd_sync_queue(hdev, hci_iso_term_big_sync,
7224                                    UINT_PTR(ev->handle), NULL);
7225
7226         hci_dev_unlock(hdev);
7227 }
7228
7229 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
7230                                             struct sk_buff *skb)
7231 {
7232         struct hci_evt_le_big_sync_estabilished *ev = data;
7233         struct hci_conn *bis;
7234         struct hci_conn *pa_sync;
7235         int i;
7236
7237         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7238
7239         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7240                                 flex_array_size(ev, bis, ev->num_bis)))
7241                 return;
7242
7243         hci_dev_lock(hdev);
7244
7245         if (!ev->status) {
7246                 pa_sync = hci_conn_hash_lookup_pa_sync_big_handle(hdev, ev->handle);
7247                 if (pa_sync)
7248                         /* Also mark the BIG sync established event on the
7249                          * associated PA sync hcon
7250                          */
7251                         set_bit(HCI_CONN_BIG_SYNC, &pa_sync->flags);
7252         }
7253
7254         for (i = 0; i < ev->num_bis; i++) {
7255                 u16 handle = le16_to_cpu(ev->bis[i]);
7256                 __le32 interval;
7257
7258                 bis = hci_conn_hash_lookup_handle(hdev, handle);
7259                 if (!bis) {
7260                         bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
7261                                            HCI_ROLE_SLAVE, handle);
7262                         if (!bis)
7263                                 continue;
7264                 }
7265
7266                 if (ev->status != 0x42)
7267                         /* Mark PA sync as established */
7268                         set_bit(HCI_CONN_PA_SYNC, &bis->flags);
7269
7270                 bis->iso_qos.bcast.big = ev->handle;
7271                 memset(&interval, 0, sizeof(interval));
7272                 memcpy(&interval, ev->latency, sizeof(ev->latency));
7273                 bis->iso_qos.bcast.in.interval = le32_to_cpu(interval);
7274                 /* Convert ISO Interval (1.25 ms slots) to latency (ms) */
7275                 bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
7276                 bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
7277
7278                 if (!ev->status) {
7279                         set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
7280                         hci_iso_setup_path(bis);
7281                 }
7282         }
7283
7284         /* In case BIG sync failed, notify each failed connection to
7285          * the user after all hci connections have been added
7286          */
7287         if (ev->status)
7288                 for (i = 0; i < ev->num_bis; i++) {
7289                         u16 handle = le16_to_cpu(ev->bis[i]);
7290
7291                         bis = hci_conn_hash_lookup_handle(hdev, handle);
7292
7293                         set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags);
7294                         hci_connect_cfm(bis, ev->status);
7295                 }
7296
7297         hci_dev_unlock(hdev);
7298 }
7299
7300 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7301                                            struct sk_buff *skb)
7302 {
7303         struct hci_evt_le_big_info_adv_report *ev = data;
7304         int mask = hdev->link_mode;
7305         __u8 flags = 0;
7306         struct hci_conn *pa_sync;
7307
7308         bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7309
7310         hci_dev_lock(hdev);
7311
7312         mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
7313         if (!(mask & HCI_LM_ACCEPT)) {
7314                 hci_le_pa_term_sync(hdev, ev->sync_handle);
7315                 goto unlock;
7316         }
7317
7318         if (!(flags & HCI_PROTO_DEFER))
7319                 goto unlock;
7320
7321         pa_sync = hci_conn_hash_lookup_pa_sync_handle
7322                         (hdev,
7323                         le16_to_cpu(ev->sync_handle));
7324
7325         if (pa_sync)
7326                 goto unlock;
7327
7328         /* Add connection to indicate the PA sync event */
7329         pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
7330                                      HCI_ROLE_SLAVE);
7331
7332         if (!pa_sync)
7333                 goto unlock;
7334
7335         pa_sync->sync_handle = le16_to_cpu(ev->sync_handle);
7336         set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags);
7337
7338         /* Notify iso layer */
7339         hci_connect_cfm(pa_sync, 0x00);
7340
7341 unlock:
7342         hci_dev_unlock(hdev);
7343 }
7344
7345 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7346 [_op] = { \
7347         .func = _func, \
7348         .min_len = _min_len, \
7349         .max_len = _max_len, \
7350 }
7351
7352 #define HCI_LE_EV(_op, _func, _len) \
7353         HCI_LE_EV_VL(_op, _func, _len, _len)
7354
7355 #define HCI_LE_EV_STATUS(_op, _func) \
7356         HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7357
7358 /* Entries in this table shall have their position according to the subevent
7359  * opcode they handle so the use of the macros above is recommend since it does
7360  * attempt to initialize at its proper index using Designated Initializers that
7361  * way events without a callback function can be ommited.
7362  */
7363 static const struct hci_le_ev {
7364         void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7365         u16  min_len;
7366         u16  max_len;
7367 } hci_le_ev_table[U8_MAX + 1] = {
7368         /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7369         HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7370                   sizeof(struct hci_ev_le_conn_complete)),
7371         /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7372         HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7373                      sizeof(struct hci_ev_le_advertising_report),
7374                      HCI_MAX_EVENT_SIZE),
7375         /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7376         HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7377                   hci_le_conn_update_complete_evt,
7378                   sizeof(struct hci_ev_le_conn_update_complete)),
7379         /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7380         HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7381                   hci_le_remote_feat_complete_evt,
7382                   sizeof(struct hci_ev_le_remote_feat_complete)),
7383         /* [0x05 = HCI_EV_LE_LTK_REQ] */
7384         HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7385                   sizeof(struct hci_ev_le_ltk_req)),
7386         /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7387         HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7388                   hci_le_remote_conn_param_req_evt,
7389                   sizeof(struct hci_ev_le_remote_conn_param_req)),
7390         /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7391         HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7392                   hci_le_enh_conn_complete_evt,
7393                   sizeof(struct hci_ev_le_enh_conn_complete)),
7394         /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7395         HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7396                      sizeof(struct hci_ev_le_direct_adv_report),
7397                      HCI_MAX_EVENT_SIZE),
7398         /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7399         HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7400                   sizeof(struct hci_ev_le_phy_update_complete)),
7401         /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7402         HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7403                      sizeof(struct hci_ev_le_ext_adv_report),
7404                      HCI_MAX_EVENT_SIZE),
7405         /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7406         HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7407                   hci_le_pa_sync_estabilished_evt,
7408                   sizeof(struct hci_ev_le_pa_sync_established)),
7409         /* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */
7410         HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT,
7411                                  hci_le_per_adv_report_evt,
7412                                  sizeof(struct hci_ev_le_per_adv_report),
7413                                  HCI_MAX_EVENT_SIZE),
7414         /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7415         HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7416                   sizeof(struct hci_evt_le_ext_adv_set_term)),
7417         /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7418         HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7419                   sizeof(struct hci_evt_le_cis_established)),
7420         /* [0x1a = HCI_EVT_LE_CIS_REQ] */
7421         HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7422                   sizeof(struct hci_evt_le_cis_req)),
7423         /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7424         HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7425                      hci_le_create_big_complete_evt,
7426                      sizeof(struct hci_evt_le_create_big_complete),
7427                      HCI_MAX_EVENT_SIZE),
7428         /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7429         HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7430                      hci_le_big_sync_established_evt,
7431                      sizeof(struct hci_evt_le_big_sync_estabilished),
7432                      HCI_MAX_EVENT_SIZE),
7433         /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7434         HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7435                      hci_le_big_info_adv_report_evt,
7436                      sizeof(struct hci_evt_le_big_info_adv_report),
7437                      HCI_MAX_EVENT_SIZE),
7438 };
7439
7440 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7441                             struct sk_buff *skb, u16 *opcode, u8 *status,
7442                             hci_req_complete_t *req_complete,
7443                             hci_req_complete_skb_t *req_complete_skb)
7444 {
7445         struct hci_ev_le_meta *ev = data;
7446         const struct hci_le_ev *subev;
7447
7448         bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7449
7450         /* Only match event if command OGF is for LE */
7451         if (hdev->sent_cmd &&
7452             hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
7453             hci_skb_event(hdev->sent_cmd) == ev->subevent) {
7454                 *opcode = hci_skb_opcode(hdev->sent_cmd);
7455                 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7456                                      req_complete_skb);
7457         }
7458
7459         subev = &hci_le_ev_table[ev->subevent];
7460         if (!subev->func)
7461                 return;
7462
7463         if (skb->len < subev->min_len) {
7464                 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7465                            ev->subevent, skb->len, subev->min_len);
7466                 return;
7467         }
7468
7469         /* Just warn if the length is over max_len size it still be
7470          * possible to partially parse the event so leave to callback to
7471          * decide if that is acceptable.
7472          */
7473         if (skb->len > subev->max_len)
7474                 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7475                             ev->subevent, skb->len, subev->max_len);
7476         data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7477         if (!data)
7478                 return;
7479
7480         subev->func(hdev, data, skb);
7481 }
7482
7483 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7484                                  u8 event, struct sk_buff *skb)
7485 {
7486         struct hci_ev_cmd_complete *ev;
7487         struct hci_event_hdr *hdr;
7488
7489         if (!skb)
7490                 return false;
7491
7492         hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7493         if (!hdr)
7494                 return false;
7495
7496         if (event) {
7497                 if (hdr->evt != event)
7498                         return false;
7499                 return true;
7500         }
7501
7502         /* Check if request ended in Command Status - no way to retrieve
7503          * any extra parameters in this case.
7504          */
7505         if (hdr->evt == HCI_EV_CMD_STATUS)
7506                 return false;
7507
7508         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7509                 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7510                            hdr->evt);
7511                 return false;
7512         }
7513
7514         ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7515         if (!ev)
7516                 return false;
7517
7518         if (opcode != __le16_to_cpu(ev->opcode)) {
7519                 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7520                        __le16_to_cpu(ev->opcode));
7521                 return false;
7522         }
7523
7524         return true;
7525 }
7526
7527 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7528                                   struct sk_buff *skb)
7529 {
7530         struct hci_ev_le_advertising_info *adv;
7531         struct hci_ev_le_direct_adv_info *direct_adv;
7532         struct hci_ev_le_ext_adv_info *ext_adv;
7533         const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7534         const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7535
7536         hci_dev_lock(hdev);
7537
7538         /* If we are currently suspended and this is the first BT event seen,
7539          * save the wake reason associated with the event.
7540          */
7541         if (!hdev->suspended || hdev->wake_reason)
7542                 goto unlock;
7543
7544         /* Default to remote wake. Values for wake_reason are documented in the
7545          * Bluez mgmt api docs.
7546          */
7547         hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7548
7549         /* Once configured for remote wakeup, we should only wake up for
7550          * reconnections. It's useful to see which device is waking us up so
7551          * keep track of the bdaddr of the connection event that woke us up.
7552          */
7553         if (event == HCI_EV_CONN_REQUEST) {
7554                 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7555                 hdev->wake_addr_type = BDADDR_BREDR;
7556         } else if (event == HCI_EV_CONN_COMPLETE) {
7557                 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7558                 hdev->wake_addr_type = BDADDR_BREDR;
7559         } else if (event == HCI_EV_LE_META) {
7560                 struct hci_ev_le_meta *le_ev = (void *)skb->data;
7561                 u8 subevent = le_ev->subevent;
7562                 u8 *ptr = &skb->data[sizeof(*le_ev)];
7563                 u8 num_reports = *ptr;
7564
7565                 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7566                      subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7567                      subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7568                     num_reports) {
7569                         adv = (void *)(ptr + 1);
7570                         direct_adv = (void *)(ptr + 1);
7571                         ext_adv = (void *)(ptr + 1);
7572
7573                         switch (subevent) {
7574                         case HCI_EV_LE_ADVERTISING_REPORT:
7575                                 bacpy(&hdev->wake_addr, &adv->bdaddr);
7576                                 hdev->wake_addr_type = adv->bdaddr_type;
7577                                 break;
7578                         case HCI_EV_LE_DIRECT_ADV_REPORT:
7579                                 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7580                                 hdev->wake_addr_type = direct_adv->bdaddr_type;
7581                                 break;
7582                         case HCI_EV_LE_EXT_ADV_REPORT:
7583                                 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7584                                 hdev->wake_addr_type = ext_adv->bdaddr_type;
7585                                 break;
7586                         }
7587                 }
7588         } else {
7589                 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7590         }
7591
7592 unlock:
7593         hci_dev_unlock(hdev);
7594 }
7595
7596 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7597 [_op] = { \
7598         .req = false, \
7599         .func = _func, \
7600         .min_len = _min_len, \
7601         .max_len = _max_len, \
7602 }
7603
7604 #define HCI_EV(_op, _func, _len) \
7605         HCI_EV_VL(_op, _func, _len, _len)
7606
7607 #define HCI_EV_STATUS(_op, _func) \
7608         HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7609
7610 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7611 [_op] = { \
7612         .req = true, \
7613         .func_req = _func, \
7614         .min_len = _min_len, \
7615         .max_len = _max_len, \
7616 }
7617
7618 #define HCI_EV_REQ(_op, _func, _len) \
7619         HCI_EV_REQ_VL(_op, _func, _len, _len)
7620
7621 /* Entries in this table shall have their position according to the event opcode
7622  * they handle so the use of the macros above is recommend since it does attempt
7623  * to initialize at its proper index using Designated Initializers that way
7624  * events without a callback function don't have entered.
7625  */
7626 static const struct hci_ev {
7627         bool req;
7628         union {
7629                 void (*func)(struct hci_dev *hdev, void *data,
7630                              struct sk_buff *skb);
7631                 void (*func_req)(struct hci_dev *hdev, void *data,
7632                                  struct sk_buff *skb, u16 *opcode, u8 *status,
7633                                  hci_req_complete_t *req_complete,
7634                                  hci_req_complete_skb_t *req_complete_skb);
7635         };
7636         u16  min_len;
7637         u16  max_len;
7638 } hci_ev_table[U8_MAX + 1] = {
7639         /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7640         HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7641         /* [0x02 = HCI_EV_INQUIRY_RESULT] */
7642         HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7643                   sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7644         /* [0x03 = HCI_EV_CONN_COMPLETE] */
7645         HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7646                sizeof(struct hci_ev_conn_complete)),
7647         /* [0x04 = HCI_EV_CONN_REQUEST] */
7648         HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7649                sizeof(struct hci_ev_conn_request)),
7650         /* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7651         HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7652                sizeof(struct hci_ev_disconn_complete)),
7653         /* [0x06 = HCI_EV_AUTH_COMPLETE] */
7654         HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7655                sizeof(struct hci_ev_auth_complete)),
7656         /* [0x07 = HCI_EV_REMOTE_NAME] */
7657         HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7658                sizeof(struct hci_ev_remote_name)),
7659         /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7660         HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7661                sizeof(struct hci_ev_encrypt_change)),
7662         /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7663         HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7664                hci_change_link_key_complete_evt,
7665                sizeof(struct hci_ev_change_link_key_complete)),
7666         /* [0x0b = HCI_EV_REMOTE_FEATURES] */
7667         HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7668                sizeof(struct hci_ev_remote_features)),
7669         /* [0x0e = HCI_EV_CMD_COMPLETE] */
7670         HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7671                       sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7672         /* [0x0f = HCI_EV_CMD_STATUS] */
7673         HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7674                    sizeof(struct hci_ev_cmd_status)),
7675         /* [0x10 = HCI_EV_CMD_STATUS] */
7676         HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7677                sizeof(struct hci_ev_hardware_error)),
7678         /* [0x12 = HCI_EV_ROLE_CHANGE] */
7679         HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7680                sizeof(struct hci_ev_role_change)),
7681         /* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7682         HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7683                   sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7684         /* [0x14 = HCI_EV_MODE_CHANGE] */
7685         HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7686                sizeof(struct hci_ev_mode_change)),
7687         /* [0x16 = HCI_EV_PIN_CODE_REQ] */
7688         HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7689                sizeof(struct hci_ev_pin_code_req)),
7690         /* [0x17 = HCI_EV_LINK_KEY_REQ] */
7691         HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7692                sizeof(struct hci_ev_link_key_req)),
7693         /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7694         HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7695                sizeof(struct hci_ev_link_key_notify)),
7696         /* [0x1c = HCI_EV_CLOCK_OFFSET] */
7697         HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7698                sizeof(struct hci_ev_clock_offset)),
7699         /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7700         HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7701                sizeof(struct hci_ev_pkt_type_change)),
7702         /* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7703         HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7704                sizeof(struct hci_ev_pscan_rep_mode)),
7705         /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7706         HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7707                   hci_inquiry_result_with_rssi_evt,
7708                   sizeof(struct hci_ev_inquiry_result_rssi),
7709                   HCI_MAX_EVENT_SIZE),
7710         /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7711         HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7712                sizeof(struct hci_ev_remote_ext_features)),
7713         /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7714         HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7715                sizeof(struct hci_ev_sync_conn_complete)),
7716         /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7717         HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7718                   hci_extended_inquiry_result_evt,
7719                   sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7720         /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7721         HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7722                sizeof(struct hci_ev_key_refresh_complete)),
7723         /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7724         HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7725                sizeof(struct hci_ev_io_capa_request)),
7726         /* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7727         HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7728                sizeof(struct hci_ev_io_capa_reply)),
7729         /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7730         HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7731                sizeof(struct hci_ev_user_confirm_req)),
7732         /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7733         HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7734                sizeof(struct hci_ev_user_passkey_req)),
7735         /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7736         HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7737                sizeof(struct hci_ev_remote_oob_data_request)),
7738         /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7739         HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7740                sizeof(struct hci_ev_simple_pair_complete)),
7741         /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7742         HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7743                sizeof(struct hci_ev_user_passkey_notify)),
7744         /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7745         HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7746                sizeof(struct hci_ev_keypress_notify)),
7747         /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7748         HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7749                sizeof(struct hci_ev_remote_host_features)),
7750         /* [0x3e = HCI_EV_LE_META] */
7751         HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7752                       sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7753 #if IS_ENABLED(CONFIG_BT_HS)
7754         /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
7755         HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
7756                sizeof(struct hci_ev_phy_link_complete)),
7757         /* [0x41 = HCI_EV_CHANNEL_SELECTED] */
7758         HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
7759                sizeof(struct hci_ev_channel_selected)),
7760         /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
7761         HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
7762                hci_disconn_loglink_complete_evt,
7763                sizeof(struct hci_ev_disconn_logical_link_complete)),
7764         /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
7765         HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
7766                sizeof(struct hci_ev_logical_link_complete)),
7767         /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
7768         HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
7769                hci_disconn_phylink_complete_evt,
7770                sizeof(struct hci_ev_disconn_phy_link_complete)),
7771 #endif
7772         /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
7773         HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
7774                sizeof(struct hci_ev_num_comp_blocks)),
7775 #ifdef TIZEN_BT
7776         /* [0xFF = HCI_EV_VENDOR_SPECIFIC] */
7777         HCI_EV(HCI_EV_VENDOR_SPECIFIC, hci_vendor_specific_evt,
7778                sizeof(struct hci_ev_vendor_specific)),
7779 #else
7780         /* [0xff = HCI_EV_VENDOR] */
7781         HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7782 #endif
7783 };
7784
7785 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7786                            u16 *opcode, u8 *status,
7787                            hci_req_complete_t *req_complete,
7788                            hci_req_complete_skb_t *req_complete_skb)
7789 {
7790         const struct hci_ev *ev = &hci_ev_table[event];
7791         void *data;
7792
7793         if (!ev->func)
7794                 return;
7795
7796         if (skb->len < ev->min_len) {
7797                 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7798                            event, skb->len, ev->min_len);
7799                 return;
7800         }
7801
7802         /* Just warn if the length is over max_len size it still be
7803          * possible to partially parse the event so leave to callback to
7804          * decide if that is acceptable.
7805          */
7806         if (skb->len > ev->max_len)
7807                 bt_dev_warn_ratelimited(hdev,
7808                                         "unexpected event 0x%2.2x length: %u > %u",
7809                                         event, skb->len, ev->max_len);
7810
7811         data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7812         if (!data)
7813                 return;
7814
7815         if (ev->req)
7816                 ev->func_req(hdev, data, skb, opcode, status, req_complete,
7817                              req_complete_skb);
7818         else
7819                 ev->func(hdev, data, skb);
7820 }
7821
7822 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7823 {
7824         struct hci_event_hdr *hdr = (void *) skb->data;
7825         hci_req_complete_t req_complete = NULL;
7826         hci_req_complete_skb_t req_complete_skb = NULL;
7827         struct sk_buff *orig_skb = NULL;
7828         u8 status = 0, event, req_evt = 0;
7829         u16 opcode = HCI_OP_NOP;
7830
7831         if (skb->len < sizeof(*hdr)) {
7832                 bt_dev_err(hdev, "Malformed HCI Event");
7833                 goto done;
7834         }
7835
7836         kfree_skb(hdev->recv_event);
7837         hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7838
7839         event = hdr->evt;
7840         if (!event) {
7841                 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7842                             event);
7843                 goto done;
7844         }
7845
7846         /* Only match event if command OGF is not for LE */
7847         if (hdev->sent_cmd &&
7848             hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
7849             hci_skb_event(hdev->sent_cmd) == event) {
7850                 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
7851                                      status, &req_complete, &req_complete_skb);
7852                 req_evt = event;
7853         }
7854
7855         /* If it looks like we might end up having to call
7856          * req_complete_skb, store a pristine copy of the skb since the
7857          * various handlers may modify the original one through
7858          * skb_pull() calls, etc.
7859          */
7860         if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7861             event == HCI_EV_CMD_COMPLETE)
7862                 orig_skb = skb_clone(skb, GFP_KERNEL);
7863
7864         skb_pull(skb, HCI_EVENT_HDR_SIZE);
7865
7866         /* Store wake reason if we're suspended */
7867         hci_store_wake_reason(hdev, event, skb);
7868
7869         bt_dev_dbg(hdev, "event 0x%2.2x", event);
7870
7871         hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7872                        &req_complete_skb);
7873
7874         if (req_complete) {
7875                 req_complete(hdev, status, opcode);
7876         } else if (req_complete_skb) {
7877                 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7878                         kfree_skb(orig_skb);
7879                         orig_skb = NULL;
7880                 }
7881                 req_complete_skb(hdev, status, opcode, orig_skb);
7882         }
7883
7884 done:
7885         kfree_skb(orig_skb);
7886         kfree_skb(skb);
7887         hdev->stat.evt_rx++;
7888 }