Bluetooth: Read LE Max data length command
[platform/kernel/linux-rpi.git] / net / bluetooth / hci_event.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023 NXP
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI event handling. */
27
28 #include <asm/unaligned.h>
29 #include <linux/crypto.h>
30 #include <crypto/algapi.h>
31
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "hci_debugfs.h"
38 #include "hci_codec.h"
39 #include "a2mp.h"
40 #include "amp.h"
41 #include "smp.h"
42 #include "msft.h"
43 #include "eir.h"
44
45 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
46                  "\x00\x00\x00\x00\x00\x00\x00\x00"
47
48 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
49
50 /* Handle HCI Event packets */
51
52 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
53                              u8 ev, size_t len)
54 {
55         void *data;
56
57         data = skb_pull_data(skb, len);
58         if (!data)
59                 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
60
61         return data;
62 }
63
64 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
65                              u16 op, size_t len)
66 {
67         void *data;
68
69         data = skb_pull_data(skb, len);
70         if (!data)
71                 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
72
73         return data;
74 }
75
76 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
77                                 u8 ev, size_t len)
78 {
79         void *data;
80
81         data = skb_pull_data(skb, len);
82         if (!data)
83                 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
84
85         return data;
86 }
87
88 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
89                                 struct sk_buff *skb)
90 {
91         struct hci_ev_status *rp = data;
92
93         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
94
95         /* It is possible that we receive Inquiry Complete event right
96          * before we receive Inquiry Cancel Command Complete event, in
97          * which case the latter event should have status of Command
98          * Disallowed (0x0c). This should not be treated as error, since
99          * we actually achieve what Inquiry Cancel wants to achieve,
100          * which is to end the last Inquiry session.
101          */
102         if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
103                 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
104                 rp->status = 0x00;
105         }
106
107         if (rp->status)
108                 return rp->status;
109
110         clear_bit(HCI_INQUIRY, &hdev->flags);
111         smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
112         wake_up_bit(&hdev->flags, HCI_INQUIRY);
113
114         hci_dev_lock(hdev);
115         /* Set discovery state to stopped if we're not doing LE active
116          * scanning.
117          */
118         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
119             hdev->le_scan_type != LE_SCAN_ACTIVE)
120                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
121         hci_dev_unlock(hdev);
122
123         hci_conn_check_pending(hdev);
124
125         return rp->status;
126 }
127
128 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
129                               struct sk_buff *skb)
130 {
131         struct hci_ev_status *rp = data;
132
133         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
134
135         if (rp->status)
136                 return rp->status;
137
138         hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
139
140         return rp->status;
141 }
142
143 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
144                                    struct sk_buff *skb)
145 {
146         struct hci_ev_status *rp = data;
147
148         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
149
150         if (rp->status)
151                 return rp->status;
152
153         hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
154
155         hci_conn_check_pending(hdev);
156
157         return rp->status;
158 }
159
160 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
161                                         struct sk_buff *skb)
162 {
163         struct hci_ev_status *rp = data;
164
165         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
166
167         return rp->status;
168 }
169
170 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
171                                 struct sk_buff *skb)
172 {
173         struct hci_rp_role_discovery *rp = data;
174         struct hci_conn *conn;
175
176         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
177
178         if (rp->status)
179                 return rp->status;
180
181         hci_dev_lock(hdev);
182
183         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
184         if (conn)
185                 conn->role = rp->role;
186
187         hci_dev_unlock(hdev);
188
189         return rp->status;
190 }
191
192 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
193                                   struct sk_buff *skb)
194 {
195         struct hci_rp_read_link_policy *rp = data;
196         struct hci_conn *conn;
197
198         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
199
200         if (rp->status)
201                 return rp->status;
202
203         hci_dev_lock(hdev);
204
205         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
206         if (conn)
207                 conn->link_policy = __le16_to_cpu(rp->policy);
208
209         hci_dev_unlock(hdev);
210
211         return rp->status;
212 }
213
214 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
215                                    struct sk_buff *skb)
216 {
217         struct hci_rp_write_link_policy *rp = data;
218         struct hci_conn *conn;
219         void *sent;
220 #ifdef TIZEN_BT
221         struct hci_cp_write_link_policy cp;
222         struct hci_conn *sco_conn;
223 #endif
224
225         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
226
227         if (rp->status)
228                 return rp->status;
229
230         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
231         if (!sent)
232                 return rp->status;
233
234         hci_dev_lock(hdev);
235
236         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
237         if (conn)
238                 conn->link_policy = get_unaligned_le16(sent + 2);
239
240 #ifdef TIZEN_BT
241         sco_conn = hci_conn_hash_lookup_sco(hdev);
242         if (sco_conn && bacmp(&sco_conn->dst, &conn->dst) == 0 &&
243             conn->link_policy & HCI_LP_SNIFF) {
244                 BT_ERR("SNIFF is not allowed during sco connection");
245                 cp.handle = __cpu_to_le16(conn->handle);
246                 cp.policy = __cpu_to_le16(conn->link_policy & ~HCI_LP_SNIFF);
247                 hci_send_cmd(hdev, HCI_OP_WRITE_LINK_POLICY, sizeof(cp), &cp);
248         }
249 #endif
250
251         hci_dev_unlock(hdev);
252
253         return rp->status;
254 }
255
256 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
257                                       struct sk_buff *skb)
258 {
259         struct hci_rp_read_def_link_policy *rp = data;
260
261         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
262
263         if (rp->status)
264                 return rp->status;
265
266         hdev->link_policy = __le16_to_cpu(rp->policy);
267
268         return rp->status;
269 }
270
271 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
272                                        struct sk_buff *skb)
273 {
274         struct hci_ev_status *rp = data;
275         void *sent;
276
277         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
278
279         if (rp->status)
280                 return rp->status;
281
282         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
283         if (!sent)
284                 return rp->status;
285
286         hdev->link_policy = get_unaligned_le16(sent);
287
288         return rp->status;
289 }
290
291 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
292 {
293         struct hci_ev_status *rp = data;
294
295         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
296
297         clear_bit(HCI_RESET, &hdev->flags);
298
299         if (rp->status)
300                 return rp->status;
301
302         /* Reset all non-persistent flags */
303         hci_dev_clear_volatile_flags(hdev);
304
305         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
306
307         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
308         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
309
310         memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
311         hdev->adv_data_len = 0;
312
313         memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
314         hdev->scan_rsp_data_len = 0;
315
316         hdev->le_scan_type = LE_SCAN_PASSIVE;
317
318         hdev->ssp_debug_mode = 0;
319
320         hci_bdaddr_list_clear(&hdev->le_accept_list);
321         hci_bdaddr_list_clear(&hdev->le_resolv_list);
322
323         return rp->status;
324 }
325
326 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
327                                       struct sk_buff *skb)
328 {
329         struct hci_rp_read_stored_link_key *rp = data;
330         struct hci_cp_read_stored_link_key *sent;
331
332         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
333
334         sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
335         if (!sent)
336                 return rp->status;
337
338         if (!rp->status && sent->read_all == 0x01) {
339                 hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
340                 hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
341         }
342
343         return rp->status;
344 }
345
346 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
347                                         struct sk_buff *skb)
348 {
349         struct hci_rp_delete_stored_link_key *rp = data;
350         u16 num_keys;
351
352         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
353
354         if (rp->status)
355                 return rp->status;
356
357         num_keys = le16_to_cpu(rp->num_keys);
358
359         if (num_keys <= hdev->stored_num_keys)
360                 hdev->stored_num_keys -= num_keys;
361         else
362                 hdev->stored_num_keys = 0;
363
364         return rp->status;
365 }
366
367 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
368                                   struct sk_buff *skb)
369 {
370         struct hci_ev_status *rp = data;
371         void *sent;
372
373         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
374
375         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
376         if (!sent)
377                 return rp->status;
378
379         hci_dev_lock(hdev);
380
381         if (hci_dev_test_flag(hdev, HCI_MGMT))
382                 mgmt_set_local_name_complete(hdev, sent, rp->status);
383         else if (!rp->status)
384                 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
385
386         hci_dev_unlock(hdev);
387
388         return rp->status;
389 }
390
391 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
392                                  struct sk_buff *skb)
393 {
394         struct hci_rp_read_local_name *rp = data;
395
396         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
397
398         if (rp->status)
399                 return rp->status;
400
401         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
402             hci_dev_test_flag(hdev, HCI_CONFIG))
403                 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
404
405         return rp->status;
406 }
407
408 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
409                                    struct sk_buff *skb)
410 {
411         struct hci_ev_status *rp = data;
412         void *sent;
413
414         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
415
416         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
417         if (!sent)
418                 return rp->status;
419
420         hci_dev_lock(hdev);
421
422         if (!rp->status) {
423                 __u8 param = *((__u8 *) sent);
424
425                 if (param == AUTH_ENABLED)
426                         set_bit(HCI_AUTH, &hdev->flags);
427                 else
428                         clear_bit(HCI_AUTH, &hdev->flags);
429         }
430
431         if (hci_dev_test_flag(hdev, HCI_MGMT))
432                 mgmt_auth_enable_complete(hdev, rp->status);
433
434         hci_dev_unlock(hdev);
435
436         return rp->status;
437 }
438
439 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
440                                     struct sk_buff *skb)
441 {
442         struct hci_ev_status *rp = data;
443         __u8 param;
444         void *sent;
445
446         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
447
448         if (rp->status)
449                 return rp->status;
450
451         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
452         if (!sent)
453                 return rp->status;
454
455         param = *((__u8 *) sent);
456
457         if (param)
458                 set_bit(HCI_ENCRYPT, &hdev->flags);
459         else
460                 clear_bit(HCI_ENCRYPT, &hdev->flags);
461
462         return rp->status;
463 }
464
465 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
466                                    struct sk_buff *skb)
467 {
468         struct hci_ev_status *rp = data;
469         __u8 param;
470         void *sent;
471
472         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
473
474         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
475         if (!sent)
476                 return rp->status;
477
478         param = *((__u8 *) sent);
479
480         hci_dev_lock(hdev);
481
482         if (rp->status) {
483                 hdev->discov_timeout = 0;
484                 goto done;
485         }
486
487         if (param & SCAN_INQUIRY)
488                 set_bit(HCI_ISCAN, &hdev->flags);
489         else
490                 clear_bit(HCI_ISCAN, &hdev->flags);
491
492         if (param & SCAN_PAGE)
493                 set_bit(HCI_PSCAN, &hdev->flags);
494         else
495                 clear_bit(HCI_PSCAN, &hdev->flags);
496
497 done:
498         hci_dev_unlock(hdev);
499
500         return rp->status;
501 }
502
503 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
504                                   struct sk_buff *skb)
505 {
506         struct hci_ev_status *rp = data;
507         struct hci_cp_set_event_filter *cp;
508         void *sent;
509
510         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
511
512         if (rp->status)
513                 return rp->status;
514
515         sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
516         if (!sent)
517                 return rp->status;
518
519         cp = (struct hci_cp_set_event_filter *)sent;
520
521         if (cp->flt_type == HCI_FLT_CLEAR_ALL)
522                 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
523         else
524                 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
525
526         return rp->status;
527 }
528
529 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
530                                    struct sk_buff *skb)
531 {
532         struct hci_rp_read_class_of_dev *rp = data;
533
534         if (WARN_ON(!hdev))
535                 return HCI_ERROR_UNSPECIFIED;
536
537         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
538
539         if (rp->status)
540                 return rp->status;
541
542         memcpy(hdev->dev_class, rp->dev_class, 3);
543
544         bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
545                    hdev->dev_class[1], hdev->dev_class[0]);
546
547         return rp->status;
548 }
549
550 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
551                                     struct sk_buff *skb)
552 {
553         struct hci_ev_status *rp = data;
554         void *sent;
555
556         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
557
558         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
559         if (!sent)
560                 return rp->status;
561
562         hci_dev_lock(hdev);
563
564         if (!rp->status)
565                 memcpy(hdev->dev_class, sent, 3);
566
567         if (hci_dev_test_flag(hdev, HCI_MGMT))
568                 mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
569
570         hci_dev_unlock(hdev);
571
572         return rp->status;
573 }
574
575 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
576                                     struct sk_buff *skb)
577 {
578         struct hci_rp_read_voice_setting *rp = data;
579         __u16 setting;
580
581         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
582
583         if (rp->status)
584                 return rp->status;
585
586         setting = __le16_to_cpu(rp->voice_setting);
587
588         if (hdev->voice_setting == setting)
589                 return rp->status;
590
591         hdev->voice_setting = setting;
592
593         bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
594
595         if (hdev->notify)
596                 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
597
598         return rp->status;
599 }
600
601 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
602                                      struct sk_buff *skb)
603 {
604         struct hci_ev_status *rp = data;
605         __u16 setting;
606         void *sent;
607
608         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
609
610         if (rp->status)
611                 return rp->status;
612
613         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
614         if (!sent)
615                 return rp->status;
616
617         setting = get_unaligned_le16(sent);
618
619         if (hdev->voice_setting == setting)
620                 return rp->status;
621
622         hdev->voice_setting = setting;
623
624         bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
625
626         if (hdev->notify)
627                 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
628
629         return rp->status;
630 }
631
632 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
633                                         struct sk_buff *skb)
634 {
635         struct hci_rp_read_num_supported_iac *rp = data;
636
637         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
638
639         if (rp->status)
640                 return rp->status;
641
642         hdev->num_iac = rp->num_iac;
643
644         bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
645
646         return rp->status;
647 }
648
649 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
650                                 struct sk_buff *skb)
651 {
652         struct hci_ev_status *rp = data;
653         struct hci_cp_write_ssp_mode *sent;
654
655         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
656
657         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
658         if (!sent)
659                 return rp->status;
660
661         hci_dev_lock(hdev);
662
663         if (!rp->status) {
664                 if (sent->mode)
665                         hdev->features[1][0] |= LMP_HOST_SSP;
666                 else
667                         hdev->features[1][0] &= ~LMP_HOST_SSP;
668         }
669
670         if (!rp->status) {
671                 if (sent->mode)
672                         hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
673                 else
674                         hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
675         }
676
677         hci_dev_unlock(hdev);
678
679         return rp->status;
680 }
681
682 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
683                                   struct sk_buff *skb)
684 {
685         struct hci_ev_status *rp = data;
686         struct hci_cp_write_sc_support *sent;
687
688         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
689
690         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
691         if (!sent)
692                 return rp->status;
693
694         hci_dev_lock(hdev);
695
696         if (!rp->status) {
697                 if (sent->support)
698                         hdev->features[1][0] |= LMP_HOST_SC;
699                 else
700                         hdev->features[1][0] &= ~LMP_HOST_SC;
701         }
702
703         if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
704                 if (sent->support)
705                         hci_dev_set_flag(hdev, HCI_SC_ENABLED);
706                 else
707                         hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
708         }
709
710         hci_dev_unlock(hdev);
711
712         return rp->status;
713 }
714
715 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
716                                     struct sk_buff *skb)
717 {
718         struct hci_rp_read_local_version *rp = data;
719
720         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
721
722         if (rp->status)
723                 return rp->status;
724
725         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
726             hci_dev_test_flag(hdev, HCI_CONFIG)) {
727                 hdev->hci_ver = rp->hci_ver;
728                 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
729                 hdev->lmp_ver = rp->lmp_ver;
730                 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
731                 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
732         }
733
734         return rp->status;
735 }
736
737 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
738                                    struct sk_buff *skb)
739 {
740         struct hci_rp_read_enc_key_size *rp = data;
741         struct hci_conn *conn;
742         u16 handle;
743         u8 status = rp->status;
744
745         bt_dev_dbg(hdev, "status 0x%2.2x", status);
746
747         handle = le16_to_cpu(rp->handle);
748
749         hci_dev_lock(hdev);
750
751         conn = hci_conn_hash_lookup_handle(hdev, handle);
752         if (!conn) {
753                 status = 0xFF;
754                 goto done;
755         }
756
757         /* While unexpected, the read_enc_key_size command may fail. The most
758          * secure approach is to then assume the key size is 0 to force a
759          * disconnection.
760          */
761         if (status) {
762                 bt_dev_err(hdev, "failed to read key size for handle %u",
763                            handle);
764                 conn->enc_key_size = 0;
765         } else {
766                 conn->enc_key_size = rp->key_size;
767                 status = 0;
768
769                 if (conn->enc_key_size < hdev->min_enc_key_size) {
770                         /* As slave role, the conn->state has been set to
771                          * BT_CONNECTED and l2cap conn req might not be received
772                          * yet, at this moment the l2cap layer almost does
773                          * nothing with the non-zero status.
774                          * So we also clear encrypt related bits, and then the
775                          * handler of l2cap conn req will get the right secure
776                          * state at a later time.
777                          */
778                         status = HCI_ERROR_AUTH_FAILURE;
779                         clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
780                         clear_bit(HCI_CONN_AES_CCM, &conn->flags);
781                 }
782         }
783
784         hci_encrypt_cfm(conn, status);
785
786 done:
787         hci_dev_unlock(hdev);
788
789         return status;
790 }
791
792 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
793                                      struct sk_buff *skb)
794 {
795         struct hci_rp_read_local_commands *rp = data;
796
797         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
798
799         if (rp->status)
800                 return rp->status;
801
802         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
803             hci_dev_test_flag(hdev, HCI_CONFIG))
804                 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
805
806         return rp->status;
807 }
808
809 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
810                                            struct sk_buff *skb)
811 {
812         struct hci_rp_read_auth_payload_to *rp = data;
813         struct hci_conn *conn;
814
815         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
816
817         if (rp->status)
818                 return rp->status;
819
820         hci_dev_lock(hdev);
821
822         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
823         if (conn)
824                 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
825
826         hci_dev_unlock(hdev);
827
828         return rp->status;
829 }
830
831 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
832                                             struct sk_buff *skb)
833 {
834         struct hci_rp_write_auth_payload_to *rp = data;
835         struct hci_conn *conn;
836         void *sent;
837
838         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
839
840         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
841         if (!sent)
842                 return rp->status;
843
844         hci_dev_lock(hdev);
845
846         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
847         if (!conn) {
848                 rp->status = 0xff;
849                 goto unlock;
850         }
851
852         if (!rp->status)
853                 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
854
855 unlock:
856         hci_dev_unlock(hdev);
857
858         return rp->status;
859 }
860
861 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
862                                      struct sk_buff *skb)
863 {
864         struct hci_rp_read_local_features *rp = data;
865
866         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
867
868         if (rp->status)
869                 return rp->status;
870
871         memcpy(hdev->features, rp->features, 8);
872
873         /* Adjust default settings according to features
874          * supported by device. */
875
876         if (hdev->features[0][0] & LMP_3SLOT)
877                 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
878
879         if (hdev->features[0][0] & LMP_5SLOT)
880                 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
881
882         if (hdev->features[0][1] & LMP_HV2) {
883                 hdev->pkt_type  |= (HCI_HV2);
884                 hdev->esco_type |= (ESCO_HV2);
885         }
886
887         if (hdev->features[0][1] & LMP_HV3) {
888                 hdev->pkt_type  |= (HCI_HV3);
889                 hdev->esco_type |= (ESCO_HV3);
890         }
891
892         if (lmp_esco_capable(hdev))
893                 hdev->esco_type |= (ESCO_EV3);
894
895         if (hdev->features[0][4] & LMP_EV4)
896                 hdev->esco_type |= (ESCO_EV4);
897
898         if (hdev->features[0][4] & LMP_EV5)
899                 hdev->esco_type |= (ESCO_EV5);
900
901         if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
902                 hdev->esco_type |= (ESCO_2EV3);
903
904         if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
905                 hdev->esco_type |= (ESCO_3EV3);
906
907         if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
908                 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
909
910         return rp->status;
911 }
912
913 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
914                                          struct sk_buff *skb)
915 {
916         struct hci_rp_read_local_ext_features *rp = data;
917
918         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
919
920         if (rp->status)
921                 return rp->status;
922
923         if (hdev->max_page < rp->max_page) {
924                 if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
925                              &hdev->quirks))
926                         bt_dev_warn(hdev, "broken local ext features page 2");
927                 else
928                         hdev->max_page = rp->max_page;
929         }
930
931         if (rp->page < HCI_MAX_PAGES)
932                 memcpy(hdev->features[rp->page], rp->features, 8);
933
934         return rp->status;
935 }
936
937 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
938                                         struct sk_buff *skb)
939 {
940         struct hci_rp_read_flow_control_mode *rp = data;
941
942         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
943
944         if (rp->status)
945                 return rp->status;
946
947         hdev->flow_ctl_mode = rp->mode;
948
949         return rp->status;
950 }
951
952 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
953                                   struct sk_buff *skb)
954 {
955         struct hci_rp_read_buffer_size *rp = data;
956
957         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
958
959         if (rp->status)
960                 return rp->status;
961
962         hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
963         hdev->sco_mtu  = rp->sco_mtu;
964         hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
965         hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
966
967         if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
968                 hdev->sco_mtu  = 64;
969                 hdev->sco_pkts = 8;
970         }
971
972         hdev->acl_cnt = hdev->acl_pkts;
973         hdev->sco_cnt = hdev->sco_pkts;
974
975         BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
976                hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
977
978         return rp->status;
979 }
980
981 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
982                               struct sk_buff *skb)
983 {
984         struct hci_rp_read_bd_addr *rp = data;
985
986         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
987
988         if (rp->status)
989                 return rp->status;
990
991         if (test_bit(HCI_INIT, &hdev->flags))
992                 bacpy(&hdev->bdaddr, &rp->bdaddr);
993
994         if (hci_dev_test_flag(hdev, HCI_SETUP))
995                 bacpy(&hdev->setup_addr, &rp->bdaddr);
996
997         return rp->status;
998 }
999
1000 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
1001                                          struct sk_buff *skb)
1002 {
1003         struct hci_rp_read_local_pairing_opts *rp = data;
1004
1005         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1006
1007         if (rp->status)
1008                 return rp->status;
1009
1010         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1011             hci_dev_test_flag(hdev, HCI_CONFIG)) {
1012                 hdev->pairing_opts = rp->pairing_opts;
1013                 hdev->max_enc_key_size = rp->max_key_size;
1014         }
1015
1016         return rp->status;
1017 }
1018
1019 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
1020                                          struct sk_buff *skb)
1021 {
1022         struct hci_rp_read_page_scan_activity *rp = data;
1023
1024         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1025
1026         if (rp->status)
1027                 return rp->status;
1028
1029         if (test_bit(HCI_INIT, &hdev->flags)) {
1030                 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
1031                 hdev->page_scan_window = __le16_to_cpu(rp->window);
1032         }
1033
1034         return rp->status;
1035 }
1036
1037 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1038                                           struct sk_buff *skb)
1039 {
1040         struct hci_ev_status *rp = data;
1041         struct hci_cp_write_page_scan_activity *sent;
1042
1043         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1044
1045         if (rp->status)
1046                 return rp->status;
1047
1048         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1049         if (!sent)
1050                 return rp->status;
1051
1052         hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1053         hdev->page_scan_window = __le16_to_cpu(sent->window);
1054
1055         return rp->status;
1056 }
1057
1058 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1059                                      struct sk_buff *skb)
1060 {
1061         struct hci_rp_read_page_scan_type *rp = data;
1062
1063         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1064
1065         if (rp->status)
1066                 return rp->status;
1067
1068         if (test_bit(HCI_INIT, &hdev->flags))
1069                 hdev->page_scan_type = rp->type;
1070
1071         return rp->status;
1072 }
1073
1074 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1075                                       struct sk_buff *skb)
1076 {
1077         struct hci_ev_status *rp = data;
1078         u8 *type;
1079
1080         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1081
1082         if (rp->status)
1083                 return rp->status;
1084
1085         type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1086         if (type)
1087                 hdev->page_scan_type = *type;
1088
1089         return rp->status;
1090 }
1091
1092 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
1093                                       struct sk_buff *skb)
1094 {
1095         struct hci_rp_read_data_block_size *rp = data;
1096
1097         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1098
1099         if (rp->status)
1100                 return rp->status;
1101
1102         hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
1103         hdev->block_len = __le16_to_cpu(rp->block_len);
1104         hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
1105
1106         hdev->block_cnt = hdev->num_blocks;
1107
1108         BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
1109                hdev->block_cnt, hdev->block_len);
1110
1111         return rp->status;
1112 }
1113
1114 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1115                             struct sk_buff *skb)
1116 {
1117         struct hci_rp_read_clock *rp = data;
1118         struct hci_cp_read_clock *cp;
1119         struct hci_conn *conn;
1120
1121         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1122
1123         if (rp->status)
1124                 return rp->status;
1125
1126         hci_dev_lock(hdev);
1127
1128         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1129         if (!cp)
1130                 goto unlock;
1131
1132         if (cp->which == 0x00) {
1133                 hdev->clock = le32_to_cpu(rp->clock);
1134                 goto unlock;
1135         }
1136
1137         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1138         if (conn) {
1139                 conn->clock = le32_to_cpu(rp->clock);
1140                 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1141         }
1142
1143 unlock:
1144         hci_dev_unlock(hdev);
1145         return rp->status;
1146 }
1147
1148 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
1149                                      struct sk_buff *skb)
1150 {
1151         struct hci_rp_read_local_amp_info *rp = data;
1152
1153         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1154
1155         if (rp->status)
1156                 return rp->status;
1157
1158         hdev->amp_status = rp->amp_status;
1159         hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
1160         hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
1161         hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
1162         hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
1163         hdev->amp_type = rp->amp_type;
1164         hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
1165         hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
1166         hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
1167         hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
1168
1169         return rp->status;
1170 }
1171
1172 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1173                                        struct sk_buff *skb)
1174 {
1175         struct hci_rp_read_inq_rsp_tx_power *rp = data;
1176
1177         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1178
1179         if (rp->status)
1180                 return rp->status;
1181
1182         hdev->inq_tx_power = rp->tx_power;
1183
1184         return rp->status;
1185 }
1186
1187 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1188                                              struct sk_buff *skb)
1189 {
1190         struct hci_rp_read_def_err_data_reporting *rp = data;
1191
1192         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1193
1194         if (rp->status)
1195                 return rp->status;
1196
1197         hdev->err_data_reporting = rp->err_data_reporting;
1198
1199         return rp->status;
1200 }
1201
1202 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1203                                               struct sk_buff *skb)
1204 {
1205         struct hci_ev_status *rp = data;
1206         struct hci_cp_write_def_err_data_reporting *cp;
1207
1208         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1209
1210         if (rp->status)
1211                 return rp->status;
1212
1213         cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1214         if (!cp)
1215                 return rp->status;
1216
1217         hdev->err_data_reporting = cp->err_data_reporting;
1218
1219         return rp->status;
1220 }
1221
1222 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1223                                 struct sk_buff *skb)
1224 {
1225         struct hci_rp_pin_code_reply *rp = data;
1226         struct hci_cp_pin_code_reply *cp;
1227         struct hci_conn *conn;
1228
1229         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1230
1231         hci_dev_lock(hdev);
1232
1233         if (hci_dev_test_flag(hdev, HCI_MGMT))
1234                 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1235
1236         if (rp->status)
1237                 goto unlock;
1238
1239         cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1240         if (!cp)
1241                 goto unlock;
1242
1243         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1244         if (conn)
1245                 conn->pin_length = cp->pin_len;
1246
1247 unlock:
1248         hci_dev_unlock(hdev);
1249         return rp->status;
1250 }
1251
1252 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1253                                     struct sk_buff *skb)
1254 {
1255         struct hci_rp_pin_code_neg_reply *rp = data;
1256
1257         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1258
1259         hci_dev_lock(hdev);
1260
1261         if (hci_dev_test_flag(hdev, HCI_MGMT))
1262                 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1263                                                  rp->status);
1264
1265         hci_dev_unlock(hdev);
1266
1267         return rp->status;
1268 }
1269
1270 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1271                                      struct sk_buff *skb)
1272 {
1273         struct hci_rp_le_read_buffer_size *rp = data;
1274
1275         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1276
1277         if (rp->status)
1278                 return rp->status;
1279
1280         hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1281         hdev->le_pkts = rp->le_max_pkt;
1282
1283         hdev->le_cnt = hdev->le_pkts;
1284
1285         BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1286
1287         return rp->status;
1288 }
1289
1290 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1291                                         struct sk_buff *skb)
1292 {
1293         struct hci_rp_le_read_local_features *rp = data;
1294
1295         BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1296
1297         if (rp->status)
1298                 return rp->status;
1299
1300         memcpy(hdev->le_features, rp->features, 8);
1301
1302         return rp->status;
1303 }
1304
1305 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1306                                       struct sk_buff *skb)
1307 {
1308         struct hci_rp_le_read_adv_tx_power *rp = data;
1309
1310         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1311
1312         if (rp->status)
1313                 return rp->status;
1314
1315         hdev->adv_tx_power = rp->tx_power;
1316
1317         return rp->status;
1318 }
1319
1320 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1321                                     struct sk_buff *skb)
1322 {
1323         struct hci_rp_user_confirm_reply *rp = data;
1324
1325         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1326
1327         hci_dev_lock(hdev);
1328
1329         if (hci_dev_test_flag(hdev, HCI_MGMT))
1330                 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1331                                                  rp->status);
1332
1333         hci_dev_unlock(hdev);
1334
1335         return rp->status;
1336 }
1337
1338 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1339                                         struct sk_buff *skb)
1340 {
1341         struct hci_rp_user_confirm_reply *rp = data;
1342
1343         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1344
1345         hci_dev_lock(hdev);
1346
1347         if (hci_dev_test_flag(hdev, HCI_MGMT))
1348                 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1349                                                      ACL_LINK, 0, rp->status);
1350
1351         hci_dev_unlock(hdev);
1352
1353         return rp->status;
1354 }
1355
1356 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1357                                     struct sk_buff *skb)
1358 {
1359         struct hci_rp_user_confirm_reply *rp = data;
1360
1361         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1362
1363         hci_dev_lock(hdev);
1364
1365         if (hci_dev_test_flag(hdev, HCI_MGMT))
1366                 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1367                                                  0, rp->status);
1368
1369         hci_dev_unlock(hdev);
1370
1371         return rp->status;
1372 }
1373
1374 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1375                                         struct sk_buff *skb)
1376 {
1377         struct hci_rp_user_confirm_reply *rp = data;
1378
1379         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1380
1381         hci_dev_lock(hdev);
1382
1383         if (hci_dev_test_flag(hdev, HCI_MGMT))
1384                 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1385                                                      ACL_LINK, 0, rp->status);
1386
1387         hci_dev_unlock(hdev);
1388
1389         return rp->status;
1390 }
1391
1392 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1393                                      struct sk_buff *skb)
1394 {
1395         struct hci_rp_read_local_oob_data *rp = data;
1396
1397         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1398
1399         return rp->status;
1400 }
1401
1402 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1403                                          struct sk_buff *skb)
1404 {
1405         struct hci_rp_read_local_oob_ext_data *rp = data;
1406
1407         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1408
1409         return rp->status;
1410 }
1411
1412 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1413                                     struct sk_buff *skb)
1414 {
1415         struct hci_ev_status *rp = data;
1416         bdaddr_t *sent;
1417
1418         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1419
1420         if (rp->status)
1421                 return rp->status;
1422
1423         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1424         if (!sent)
1425                 return rp->status;
1426
1427         hci_dev_lock(hdev);
1428
1429         bacpy(&hdev->random_addr, sent);
1430
1431         if (!bacmp(&hdev->rpa, sent)) {
1432                 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1433                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1434                                    secs_to_jiffies(hdev->rpa_timeout));
1435         }
1436
1437         hci_dev_unlock(hdev);
1438
1439         return rp->status;
1440 }
1441
1442 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1443                                     struct sk_buff *skb)
1444 {
1445         struct hci_ev_status *rp = data;
1446         struct hci_cp_le_set_default_phy *cp;
1447
1448         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1449
1450         if (rp->status)
1451                 return rp->status;
1452
1453         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1454         if (!cp)
1455                 return rp->status;
1456
1457         hci_dev_lock(hdev);
1458
1459         hdev->le_tx_def_phys = cp->tx_phys;
1460         hdev->le_rx_def_phys = cp->rx_phys;
1461
1462         hci_dev_unlock(hdev);
1463
1464         return rp->status;
1465 }
1466
1467 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1468                                             struct sk_buff *skb)
1469 {
1470         struct hci_ev_status *rp = data;
1471         struct hci_cp_le_set_adv_set_rand_addr *cp;
1472         struct adv_info *adv;
1473
1474         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1475
1476         if (rp->status)
1477                 return rp->status;
1478
1479         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1480         /* Update only in case the adv instance since handle 0x00 shall be using
1481          * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1482          * non-extended adverting.
1483          */
1484         if (!cp || !cp->handle)
1485                 return rp->status;
1486
1487         hci_dev_lock(hdev);
1488
1489         adv = hci_find_adv_instance(hdev, cp->handle);
1490         if (adv) {
1491                 bacpy(&adv->random_addr, &cp->bdaddr);
1492                 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1493                         adv->rpa_expired = false;
1494                         queue_delayed_work(hdev->workqueue,
1495                                            &adv->rpa_expired_cb,
1496                                            secs_to_jiffies(hdev->rpa_timeout));
1497                 }
1498         }
1499
1500         hci_dev_unlock(hdev);
1501
1502         return rp->status;
1503 }
1504
1505 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1506                                    struct sk_buff *skb)
1507 {
1508         struct hci_ev_status *rp = data;
1509         u8 *instance;
1510         int err;
1511
1512         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1513
1514         if (rp->status)
1515                 return rp->status;
1516
1517         instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1518         if (!instance)
1519                 return rp->status;
1520
1521         hci_dev_lock(hdev);
1522
1523         err = hci_remove_adv_instance(hdev, *instance);
1524         if (!err)
1525                 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1526                                          *instance);
1527
1528         hci_dev_unlock(hdev);
1529
1530         return rp->status;
1531 }
1532
1533 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1534                                    struct sk_buff *skb)
1535 {
1536         struct hci_ev_status *rp = data;
1537         struct adv_info *adv, *n;
1538         int err;
1539
1540         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1541
1542         if (rp->status)
1543                 return rp->status;
1544
1545         if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1546                 return rp->status;
1547
1548         hci_dev_lock(hdev);
1549
1550         list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1551                 u8 instance = adv->instance;
1552
1553                 err = hci_remove_adv_instance(hdev, instance);
1554                 if (!err)
1555                         mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1556                                                  hdev, instance);
1557         }
1558
1559         hci_dev_unlock(hdev);
1560
1561         return rp->status;
1562 }
1563
1564 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1565                                         struct sk_buff *skb)
1566 {
1567         struct hci_rp_le_read_transmit_power *rp = data;
1568
1569         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1570
1571         if (rp->status)
1572                 return rp->status;
1573
1574         hdev->min_le_tx_power = rp->min_le_tx_power;
1575         hdev->max_le_tx_power = rp->max_le_tx_power;
1576
1577         return rp->status;
1578 }
1579
1580 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1581                                      struct sk_buff *skb)
1582 {
1583         struct hci_ev_status *rp = data;
1584         struct hci_cp_le_set_privacy_mode *cp;
1585         struct hci_conn_params *params;
1586
1587         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1588
1589         if (rp->status)
1590                 return rp->status;
1591
1592         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1593         if (!cp)
1594                 return rp->status;
1595
1596         hci_dev_lock(hdev);
1597
1598         params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1599         if (params)
1600                 WRITE_ONCE(params->privacy_mode, cp->mode);
1601
1602         hci_dev_unlock(hdev);
1603
1604         return rp->status;
1605 }
1606
1607 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1608                                    struct sk_buff *skb)
1609 {
1610         struct hci_ev_status *rp = data;
1611         __u8 *sent;
1612
1613         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1614
1615         if (rp->status)
1616                 return rp->status;
1617
1618         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1619         if (!sent)
1620                 return rp->status;
1621
1622         hci_dev_lock(hdev);
1623
1624         /* If we're doing connection initiation as peripheral. Set a
1625          * timeout in case something goes wrong.
1626          */
1627         if (*sent) {
1628                 struct hci_conn *conn;
1629
1630                 hci_dev_set_flag(hdev, HCI_LE_ADV);
1631
1632                 conn = hci_lookup_le_connect(hdev);
1633                 if (conn)
1634                         queue_delayed_work(hdev->workqueue,
1635                                            &conn->le_conn_timeout,
1636                                            conn->conn_timeout);
1637         } else {
1638                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1639         }
1640
1641         hci_dev_unlock(hdev);
1642
1643         return rp->status;
1644 }
1645
1646 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1647                                        struct sk_buff *skb)
1648 {
1649         struct hci_cp_le_set_ext_adv_enable *cp;
1650         struct hci_cp_ext_adv_set *set;
1651         struct adv_info *adv = NULL, *n;
1652         struct hci_ev_status *rp = data;
1653
1654         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1655
1656         if (rp->status)
1657                 return rp->status;
1658
1659         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1660         if (!cp)
1661                 return rp->status;
1662
1663         set = (void *)cp->data;
1664
1665         hci_dev_lock(hdev);
1666
1667         if (cp->num_of_sets)
1668                 adv = hci_find_adv_instance(hdev, set->handle);
1669
1670         if (cp->enable) {
1671                 struct hci_conn *conn;
1672
1673                 hci_dev_set_flag(hdev, HCI_LE_ADV);
1674
1675                 if (adv && !adv->periodic)
1676                         adv->enabled = true;
1677
1678                 conn = hci_lookup_le_connect(hdev);
1679                 if (conn)
1680                         queue_delayed_work(hdev->workqueue,
1681                                            &conn->le_conn_timeout,
1682                                            conn->conn_timeout);
1683         } else {
1684                 if (cp->num_of_sets) {
1685                         if (adv)
1686                                 adv->enabled = false;
1687
1688                         /* If just one instance was disabled check if there are
1689                          * any other instance enabled before clearing HCI_LE_ADV
1690                          */
1691                         list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1692                                                  list) {
1693                                 if (adv->enabled)
1694                                         goto unlock;
1695                         }
1696                 } else {
1697                         /* All instances shall be considered disabled */
1698                         list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1699                                                  list)
1700                                 adv->enabled = false;
1701                 }
1702
1703                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1704         }
1705
1706 unlock:
1707         hci_dev_unlock(hdev);
1708         return rp->status;
1709 }
1710
1711 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1712                                    struct sk_buff *skb)
1713 {
1714         struct hci_cp_le_set_scan_param *cp;
1715         struct hci_ev_status *rp = data;
1716
1717         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1718
1719         if (rp->status)
1720                 return rp->status;
1721
1722         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1723         if (!cp)
1724                 return rp->status;
1725
1726         hci_dev_lock(hdev);
1727
1728         hdev->le_scan_type = cp->type;
1729
1730         hci_dev_unlock(hdev);
1731
1732         return rp->status;
1733 }
1734
1735 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1736                                        struct sk_buff *skb)
1737 {
1738         struct hci_cp_le_set_ext_scan_params *cp;
1739         struct hci_ev_status *rp = data;
1740         struct hci_cp_le_scan_phy_params *phy_param;
1741
1742         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1743
1744         if (rp->status)
1745                 return rp->status;
1746
1747         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1748         if (!cp)
1749                 return rp->status;
1750
1751         phy_param = (void *)cp->data;
1752
1753         hci_dev_lock(hdev);
1754
1755         hdev->le_scan_type = phy_param->type;
1756
1757         hci_dev_unlock(hdev);
1758
1759         return rp->status;
1760 }
1761
1762 static bool has_pending_adv_report(struct hci_dev *hdev)
1763 {
1764         struct discovery_state *d = &hdev->discovery;
1765
1766         return bacmp(&d->last_adv_addr, BDADDR_ANY);
1767 }
1768
1769 static void clear_pending_adv_report(struct hci_dev *hdev)
1770 {
1771         struct discovery_state *d = &hdev->discovery;
1772
1773         bacpy(&d->last_adv_addr, BDADDR_ANY);
1774         d->last_adv_data_len = 0;
1775 }
1776
1777 #ifndef TIZEN_BT
1778 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1779                                      u8 bdaddr_type, s8 rssi, u32 flags,
1780                                      u8 *data, u8 len)
1781 {
1782         struct discovery_state *d = &hdev->discovery;
1783
1784         if (len > max_adv_len(hdev))
1785                 return;
1786
1787         bacpy(&d->last_adv_addr, bdaddr);
1788         d->last_adv_addr_type = bdaddr_type;
1789         d->last_adv_rssi = rssi;
1790         d->last_adv_flags = flags;
1791         memcpy(d->last_adv_data, data, len);
1792         d->last_adv_data_len = len;
1793 }
1794 #endif
1795
1796 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1797 {
1798         hci_dev_lock(hdev);
1799
1800         switch (enable) {
1801         case LE_SCAN_ENABLE:
1802                 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1803                 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1804                         clear_pending_adv_report(hdev);
1805                 if (hci_dev_test_flag(hdev, HCI_MESH))
1806                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1807                 break;
1808
1809         case LE_SCAN_DISABLE:
1810                 /* We do this here instead of when setting DISCOVERY_STOPPED
1811                  * since the latter would potentially require waiting for
1812                  * inquiry to stop too.
1813                  */
1814                 if (has_pending_adv_report(hdev)) {
1815                         struct discovery_state *d = &hdev->discovery;
1816
1817                         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1818                                           d->last_adv_addr_type, NULL,
1819                                           d->last_adv_rssi, d->last_adv_flags,
1820                                           d->last_adv_data,
1821                                           d->last_adv_data_len, NULL, 0, 0);
1822                 }
1823
1824                 /* Cancel this timer so that we don't try to disable scanning
1825                  * when it's already disabled.
1826                  */
1827                 cancel_delayed_work(&hdev->le_scan_disable);
1828
1829                 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1830
1831                 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1832                  * interrupted scanning due to a connect request. Mark
1833                  * therefore discovery as stopped.
1834                  */
1835                 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1836 #ifndef TIZEN_BT /* The below line is kernel bug. */
1837                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1838 #else
1839                         hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
1840 #endif
1841                 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1842                          hdev->discovery.state == DISCOVERY_FINDING)
1843                         queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1844
1845                 break;
1846
1847         default:
1848                 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1849                            enable);
1850                 break;
1851         }
1852
1853         hci_dev_unlock(hdev);
1854 }
1855
1856 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1857                                     struct sk_buff *skb)
1858 {
1859         struct hci_cp_le_set_scan_enable *cp;
1860         struct hci_ev_status *rp = data;
1861
1862         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1863
1864         if (rp->status)
1865                 return rp->status;
1866
1867         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1868         if (!cp)
1869                 return rp->status;
1870
1871         le_set_scan_enable_complete(hdev, cp->enable);
1872
1873         return rp->status;
1874 }
1875
1876 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1877                                         struct sk_buff *skb)
1878 {
1879         struct hci_cp_le_set_ext_scan_enable *cp;
1880         struct hci_ev_status *rp = data;
1881
1882         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1883
1884         if (rp->status)
1885                 return rp->status;
1886
1887         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1888         if (!cp)
1889                 return rp->status;
1890
1891         le_set_scan_enable_complete(hdev, cp->enable);
1892
1893         return rp->status;
1894 }
1895
1896 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1897                                       struct sk_buff *skb)
1898 {
1899         struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1900
1901         bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1902                    rp->num_of_sets);
1903
1904         if (rp->status)
1905                 return rp->status;
1906
1907         hdev->le_num_of_adv_sets = rp->num_of_sets;
1908
1909         return rp->status;
1910 }
1911
1912 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1913                                           struct sk_buff *skb)
1914 {
1915         struct hci_rp_le_read_accept_list_size *rp = data;
1916
1917         bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1918
1919         if (rp->status)
1920                 return rp->status;
1921
1922         hdev->le_accept_list_size = rp->size;
1923
1924         return rp->status;
1925 }
1926
1927 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1928                                       struct sk_buff *skb)
1929 {
1930         struct hci_ev_status *rp = data;
1931
1932         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1933
1934         if (rp->status)
1935                 return rp->status;
1936
1937         hci_dev_lock(hdev);
1938         hci_bdaddr_list_clear(&hdev->le_accept_list);
1939         hci_dev_unlock(hdev);
1940
1941         return rp->status;
1942 }
1943
1944 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1945                                        struct sk_buff *skb)
1946 {
1947         struct hci_cp_le_add_to_accept_list *sent;
1948         struct hci_ev_status *rp = data;
1949
1950         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1951
1952         if (rp->status)
1953                 return rp->status;
1954
1955         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1956         if (!sent)
1957                 return rp->status;
1958
1959         hci_dev_lock(hdev);
1960         hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1961                             sent->bdaddr_type);
1962         hci_dev_unlock(hdev);
1963
1964         return rp->status;
1965 }
1966
1967 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1968                                          struct sk_buff *skb)
1969 {
1970         struct hci_cp_le_del_from_accept_list *sent;
1971         struct hci_ev_status *rp = data;
1972
1973         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1974
1975         if (rp->status)
1976                 return rp->status;
1977
1978         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1979         if (!sent)
1980                 return rp->status;
1981
1982         hci_dev_lock(hdev);
1983         hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1984                             sent->bdaddr_type);
1985         hci_dev_unlock(hdev);
1986
1987         return rp->status;
1988 }
1989
1990 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1991                                           struct sk_buff *skb)
1992 {
1993         struct hci_rp_le_read_supported_states *rp = data;
1994
1995         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1996
1997         if (rp->status)
1998                 return rp->status;
1999
2000         memcpy(hdev->le_states, rp->le_states, 8);
2001
2002         return rp->status;
2003 }
2004
2005 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
2006                                       struct sk_buff *skb)
2007 {
2008         struct hci_rp_le_read_def_data_len *rp = data;
2009
2010         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2011
2012         if (rp->status)
2013                 return rp->status;
2014
2015         hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
2016         hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
2017
2018         return rp->status;
2019 }
2020
2021 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
2022                                        struct sk_buff *skb)
2023 {
2024         struct hci_cp_le_write_def_data_len *sent;
2025         struct hci_ev_status *rp = data;
2026
2027         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2028
2029         if (rp->status)
2030                 return rp->status;
2031
2032         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
2033         if (!sent)
2034                 return rp->status;
2035
2036         hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
2037         hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
2038
2039         return rp->status;
2040 }
2041
2042 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
2043                                        struct sk_buff *skb)
2044 {
2045         struct hci_cp_le_add_to_resolv_list *sent;
2046         struct hci_ev_status *rp = data;
2047
2048         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2049
2050         if (rp->status)
2051                 return rp->status;
2052
2053         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
2054         if (!sent)
2055                 return rp->status;
2056
2057         hci_dev_lock(hdev);
2058         hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2059                                 sent->bdaddr_type, sent->peer_irk,
2060                                 sent->local_irk);
2061         hci_dev_unlock(hdev);
2062
2063         return rp->status;
2064 }
2065
2066 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2067                                          struct sk_buff *skb)
2068 {
2069         struct hci_cp_le_del_from_resolv_list *sent;
2070         struct hci_ev_status *rp = data;
2071
2072         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2073
2074         if (rp->status)
2075                 return rp->status;
2076
2077         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2078         if (!sent)
2079                 return rp->status;
2080
2081         hci_dev_lock(hdev);
2082         hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2083                             sent->bdaddr_type);
2084         hci_dev_unlock(hdev);
2085
2086         return rp->status;
2087 }
2088
2089 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2090                                       struct sk_buff *skb)
2091 {
2092         struct hci_ev_status *rp = data;
2093
2094         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2095
2096         if (rp->status)
2097                 return rp->status;
2098
2099         hci_dev_lock(hdev);
2100         hci_bdaddr_list_clear(&hdev->le_resolv_list);
2101         hci_dev_unlock(hdev);
2102
2103         return rp->status;
2104 }
2105
2106 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2107                                           struct sk_buff *skb)
2108 {
2109         struct hci_rp_le_read_resolv_list_size *rp = data;
2110
2111         bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2112
2113         if (rp->status)
2114                 return rp->status;
2115
2116         hdev->le_resolv_list_size = rp->size;
2117
2118         return rp->status;
2119 }
2120
2121 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2122                                                struct sk_buff *skb)
2123 {
2124         struct hci_ev_status *rp = data;
2125         __u8 *sent;
2126
2127         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2128
2129         if (rp->status)
2130                 return rp->status;
2131
2132         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2133         if (!sent)
2134                 return rp->status;
2135
2136         hci_dev_lock(hdev);
2137
2138         if (*sent)
2139                 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2140         else
2141                 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2142
2143         hci_dev_unlock(hdev);
2144
2145         return rp->status;
2146 }
2147
2148 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2149                                       struct sk_buff *skb)
2150 {
2151         struct hci_rp_le_read_max_data_len *rp = data;
2152
2153         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2154
2155 #ifndef TIZEN_BT
2156         if (rp->status)
2157                 return rp->status;
2158 #else
2159         hci_dev_lock(hdev);
2160 #endif
2161
2162         hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2163         hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2164         hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2165         hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2166
2167 #ifdef TIZEN_BT
2168         mgmt_le_read_maximum_data_length_complete(hdev, rp->status);
2169         hci_dev_unlock(hdev);
2170 #endif
2171
2172         return rp->status;
2173 }
2174
2175 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2176                                          struct sk_buff *skb)
2177 {
2178         struct hci_cp_write_le_host_supported *sent;
2179         struct hci_ev_status *rp = data;
2180
2181         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2182
2183         if (rp->status)
2184                 return rp->status;
2185
2186         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2187         if (!sent)
2188                 return rp->status;
2189
2190         hci_dev_lock(hdev);
2191
2192         if (sent->le) {
2193                 hdev->features[1][0] |= LMP_HOST_LE;
2194                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2195         } else {
2196                 hdev->features[1][0] &= ~LMP_HOST_LE;
2197                 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2198                 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2199         }
2200
2201         if (sent->simul)
2202                 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2203         else
2204                 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2205
2206         hci_dev_unlock(hdev);
2207
2208         return rp->status;
2209 }
2210
2211 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2212                                struct sk_buff *skb)
2213 {
2214         struct hci_cp_le_set_adv_param *cp;
2215         struct hci_ev_status *rp = data;
2216
2217         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2218
2219         if (rp->status)
2220                 return rp->status;
2221
2222         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2223         if (!cp)
2224                 return rp->status;
2225
2226         hci_dev_lock(hdev);
2227         hdev->adv_addr_type = cp->own_address_type;
2228         hci_dev_unlock(hdev);
2229
2230         return rp->status;
2231 }
2232
2233 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2234                                    struct sk_buff *skb)
2235 {
2236         struct hci_rp_le_set_ext_adv_params *rp = data;
2237         struct hci_cp_le_set_ext_adv_params *cp;
2238         struct adv_info *adv_instance;
2239
2240         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2241
2242         if (rp->status)
2243                 return rp->status;
2244
2245         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2246         if (!cp)
2247                 return rp->status;
2248
2249         hci_dev_lock(hdev);
2250         hdev->adv_addr_type = cp->own_addr_type;
2251         if (!cp->handle) {
2252                 /* Store in hdev for instance 0 */
2253                 hdev->adv_tx_power = rp->tx_power;
2254         } else {
2255                 adv_instance = hci_find_adv_instance(hdev, cp->handle);
2256                 if (adv_instance)
2257                         adv_instance->tx_power = rp->tx_power;
2258         }
2259         /* Update adv data as tx power is known now */
2260         hci_update_adv_data(hdev, cp->handle);
2261
2262         hci_dev_unlock(hdev);
2263
2264         return rp->status;
2265 }
2266
2267 #ifdef TIZEN_BT
2268 static u8 hci_cc_enable_rssi(struct hci_dev *hdev, void *data,
2269                              struct sk_buff *skb)
2270 {
2271         struct hci_cc_rsp_enable_rssi *rp = data;
2272
2273         BT_DBG("hci_cc_enable_rssi - %s status 0x%2.2x Event_LE_ext_Opcode 0x%2.2x",
2274                hdev->name, rp->status, rp->le_ext_opcode);
2275
2276         mgmt_enable_rssi_cc(hdev, rp, rp->status);
2277
2278         return rp->status;
2279 }
2280
2281 static u8 hci_cc_get_raw_rssi(struct hci_dev *hdev, void *data,
2282                               struct sk_buff *skb)
2283 {
2284         struct hci_cc_rp_get_raw_rssi *rp = data;
2285
2286         BT_DBG("hci_cc_get_raw_rssi- %s Get Raw Rssi Response[%2.2x %4.4x %2.2X]",
2287                hdev->name, rp->status, rp->conn_handle, rp->rssi_dbm);
2288
2289         mgmt_raw_rssi_response(hdev, rp, rp->status);
2290
2291         return rp->status;
2292 }
2293
2294 static void hci_vendor_ext_rssi_link_alert_evt(struct hci_dev *hdev,
2295                                                struct sk_buff *skb)
2296 {
2297         struct hci_ev_vendor_specific_rssi_alert *ev = (void *)skb->data;
2298
2299         BT_DBG("RSSI event LE_RSSI_LINK_ALERT %X", LE_RSSI_LINK_ALERT);
2300
2301         mgmt_rssi_alert_evt(hdev, ev->conn_handle, ev->alert_type,
2302                             ev->rssi_dbm);
2303 }
2304
2305 static void hci_vendor_specific_group_ext_evt(struct hci_dev *hdev,
2306                                               struct sk_buff *skb)
2307 {
2308         struct hci_ev_ext_vendor_specific *ev = (void *)skb->data;
2309         __u8 event_le_ext_sub_code;
2310
2311         BT_DBG("RSSI event LE_META_VENDOR_SPECIFIC_GROUP_EVENT: %X",
2312                LE_META_VENDOR_SPECIFIC_GROUP_EVENT);
2313
2314         skb_pull(skb, sizeof(*ev));
2315         event_le_ext_sub_code = ev->event_le_ext_sub_code;
2316
2317         switch (event_le_ext_sub_code) {
2318         case LE_RSSI_LINK_ALERT:
2319                 hci_vendor_ext_rssi_link_alert_evt(hdev, skb);
2320                 break;
2321
2322         default:
2323                 break;
2324         }
2325 }
2326
2327 static void hci_vendor_multi_adv_state_change_evt(struct hci_dev *hdev,
2328                                                   struct sk_buff *skb)
2329 {
2330         struct hci_ev_vendor_specific_multi_adv_state *ev = (void *)skb->data;
2331
2332         BT_DBG("LE_MULTI_ADV_STATE_CHANGE_SUB_EVENT");
2333
2334         mgmt_multi_adv_state_change_evt(hdev, ev->adv_instance,
2335                                         ev->state_change_reason,
2336                                         ev->connection_handle);
2337 }
2338
2339 static void hci_vendor_specific_evt(struct hci_dev *hdev, void *data,
2340                                     struct sk_buff *skb)
2341 {
2342         struct hci_ev_vendor_specific *ev = (void *)skb->data;
2343         __u8 event_sub_code;
2344
2345         BT_DBG("hci_vendor_specific_evt");
2346
2347         skb_pull(skb, sizeof(*ev));
2348         event_sub_code = ev->event_sub_code;
2349
2350         switch (event_sub_code) {
2351         case LE_META_VENDOR_SPECIFIC_GROUP_EVENT:
2352                 hci_vendor_specific_group_ext_evt(hdev, skb);
2353                 break;
2354
2355         case LE_MULTI_ADV_STATE_CHANGE_SUB_EVENT:
2356                 hci_vendor_multi_adv_state_change_evt(hdev, skb);
2357                 break;
2358
2359         default:
2360                 break;
2361         }
2362 }
2363 #endif
2364
2365 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2366                            struct sk_buff *skb)
2367 {
2368         struct hci_rp_read_rssi *rp = data;
2369         struct hci_conn *conn;
2370
2371         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2372
2373         if (rp->status)
2374                 return rp->status;
2375
2376         hci_dev_lock(hdev);
2377
2378         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2379         if (conn)
2380                 conn->rssi = rp->rssi;
2381
2382         hci_dev_unlock(hdev);
2383
2384         return rp->status;
2385 }
2386
2387 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2388                                struct sk_buff *skb)
2389 {
2390         struct hci_cp_read_tx_power *sent;
2391         struct hci_rp_read_tx_power *rp = data;
2392         struct hci_conn *conn;
2393
2394         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2395
2396         if (rp->status)
2397                 return rp->status;
2398
2399         sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2400         if (!sent)
2401                 return rp->status;
2402
2403         hci_dev_lock(hdev);
2404
2405         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2406         if (!conn)
2407                 goto unlock;
2408
2409         switch (sent->type) {
2410         case 0x00:
2411                 conn->tx_power = rp->tx_power;
2412                 break;
2413         case 0x01:
2414                 conn->max_tx_power = rp->tx_power;
2415                 break;
2416         }
2417
2418 unlock:
2419         hci_dev_unlock(hdev);
2420         return rp->status;
2421 }
2422
2423 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2424                                       struct sk_buff *skb)
2425 {
2426         struct hci_ev_status *rp = data;
2427         u8 *mode;
2428
2429         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2430
2431         if (rp->status)
2432                 return rp->status;
2433
2434         mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2435         if (mode)
2436                 hdev->ssp_debug_mode = *mode;
2437
2438         return rp->status;
2439 }
2440
2441 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2442 {
2443         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2444
2445         if (status) {
2446                 hci_conn_check_pending(hdev);
2447                 return;
2448         }
2449
2450         if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
2451                 set_bit(HCI_INQUIRY, &hdev->flags);
2452 }
2453
2454 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2455 {
2456         struct hci_cp_create_conn *cp;
2457         struct hci_conn *conn;
2458
2459         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2460
2461         cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2462         if (!cp)
2463                 return;
2464
2465         hci_dev_lock(hdev);
2466
2467         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2468
2469         bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2470
2471         if (status) {
2472                 if (conn && conn->state == BT_CONNECT) {
2473                         if (status != 0x0c || conn->attempt > 2) {
2474                                 conn->state = BT_CLOSED;
2475                                 hci_connect_cfm(conn, status);
2476                                 hci_conn_del(conn);
2477                         } else
2478                                 conn->state = BT_CONNECT2;
2479                 }
2480         } else {
2481                 if (!conn) {
2482                         conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
2483                                                   HCI_ROLE_MASTER);
2484                         if (!conn)
2485                                 bt_dev_err(hdev, "no memory for new connection");
2486                 }
2487         }
2488
2489         hci_dev_unlock(hdev);
2490 }
2491
2492 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2493 {
2494         struct hci_cp_add_sco *cp;
2495         struct hci_conn *acl;
2496         struct hci_link *link;
2497         __u16 handle;
2498
2499         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2500
2501         if (!status)
2502                 return;
2503
2504         cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2505         if (!cp)
2506                 return;
2507
2508         handle = __le16_to_cpu(cp->handle);
2509
2510         bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2511
2512         hci_dev_lock(hdev);
2513
2514         acl = hci_conn_hash_lookup_handle(hdev, handle);
2515         if (acl) {
2516                 link = list_first_entry_or_null(&acl->link_list,
2517                                                 struct hci_link, list);
2518                 if (link && link->conn) {
2519                         link->conn->state = BT_CLOSED;
2520
2521                         hci_connect_cfm(link->conn, status);
2522                         hci_conn_del(link->conn);
2523                 }
2524         }
2525
2526         hci_dev_unlock(hdev);
2527 }
2528
2529 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2530 {
2531         struct hci_cp_auth_requested *cp;
2532         struct hci_conn *conn;
2533
2534         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2535
2536         if (!status)
2537                 return;
2538
2539         cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2540         if (!cp)
2541                 return;
2542
2543         hci_dev_lock(hdev);
2544
2545         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2546         if (conn) {
2547                 if (conn->state == BT_CONFIG) {
2548                         hci_connect_cfm(conn, status);
2549                         hci_conn_drop(conn);
2550                 }
2551         }
2552
2553         hci_dev_unlock(hdev);
2554 }
2555
2556 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2557 {
2558         struct hci_cp_set_conn_encrypt *cp;
2559         struct hci_conn *conn;
2560
2561         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2562
2563         if (!status)
2564                 return;
2565
2566         cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2567         if (!cp)
2568                 return;
2569
2570         hci_dev_lock(hdev);
2571
2572         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2573         if (conn) {
2574                 if (conn->state == BT_CONFIG) {
2575                         hci_connect_cfm(conn, status);
2576                         hci_conn_drop(conn);
2577                 }
2578         }
2579
2580         hci_dev_unlock(hdev);
2581 }
2582
2583 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2584                                     struct hci_conn *conn)
2585 {
2586         if (conn->state != BT_CONFIG || !conn->out)
2587                 return 0;
2588
2589         if (conn->pending_sec_level == BT_SECURITY_SDP)
2590                 return 0;
2591
2592         /* Only request authentication for SSP connections or non-SSP
2593          * devices with sec_level MEDIUM or HIGH or if MITM protection
2594          * is requested.
2595          */
2596         if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2597             conn->pending_sec_level != BT_SECURITY_FIPS &&
2598             conn->pending_sec_level != BT_SECURITY_HIGH &&
2599             conn->pending_sec_level != BT_SECURITY_MEDIUM)
2600                 return 0;
2601
2602         return 1;
2603 }
2604
2605 static int hci_resolve_name(struct hci_dev *hdev,
2606                                    struct inquiry_entry *e)
2607 {
2608         struct hci_cp_remote_name_req cp;
2609
2610         memset(&cp, 0, sizeof(cp));
2611
2612         bacpy(&cp.bdaddr, &e->data.bdaddr);
2613         cp.pscan_rep_mode = e->data.pscan_rep_mode;
2614         cp.pscan_mode = e->data.pscan_mode;
2615         cp.clock_offset = e->data.clock_offset;
2616
2617         return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2618 }
2619
2620 static bool hci_resolve_next_name(struct hci_dev *hdev)
2621 {
2622         struct discovery_state *discov = &hdev->discovery;
2623         struct inquiry_entry *e;
2624
2625         if (list_empty(&discov->resolve))
2626                 return false;
2627
2628         /* We should stop if we already spent too much time resolving names. */
2629         if (time_after(jiffies, discov->name_resolve_timeout)) {
2630                 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2631                 return false;
2632         }
2633
2634         e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2635         if (!e)
2636                 return false;
2637
2638         if (hci_resolve_name(hdev, e) == 0) {
2639                 e->name_state = NAME_PENDING;
2640                 return true;
2641         }
2642
2643         return false;
2644 }
2645
2646 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2647                                    bdaddr_t *bdaddr, u8 *name, u8 name_len)
2648 {
2649         struct discovery_state *discov = &hdev->discovery;
2650         struct inquiry_entry *e;
2651
2652 #ifdef TIZEN_BT
2653         /* Update the mgmt connected state if necessary. Be careful with
2654          * conn objects that exist but are not (yet) connected however.
2655          * Only those in BT_CONFIG or BT_CONNECTED states can be
2656          * considered connected.
2657          */
2658         if (conn &&
2659             (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) {
2660                 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2661                         mgmt_device_connected(hdev, conn, name, name_len);
2662                 else
2663                         mgmt_device_name_update(hdev, bdaddr, name, name_len);
2664         }
2665 #else
2666         if (conn &&
2667             (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2668             !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2669                 mgmt_device_connected(hdev, conn, name, name_len);
2670 #endif
2671
2672         if (discov->state == DISCOVERY_STOPPED)
2673                 return;
2674
2675         if (discov->state == DISCOVERY_STOPPING)
2676                 goto discov_complete;
2677
2678         if (discov->state != DISCOVERY_RESOLVING)
2679                 return;
2680
2681         e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2682         /* If the device was not found in a list of found devices names of which
2683          * are pending. there is no need to continue resolving a next name as it
2684          * will be done upon receiving another Remote Name Request Complete
2685          * Event */
2686         if (!e)
2687                 return;
2688
2689         list_del(&e->list);
2690
2691         e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2692         mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2693                          name, name_len);
2694
2695         if (hci_resolve_next_name(hdev))
2696                 return;
2697
2698 discov_complete:
2699         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2700 }
2701
2702 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2703 {
2704         struct hci_cp_remote_name_req *cp;
2705         struct hci_conn *conn;
2706
2707         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2708
2709         /* If successful wait for the name req complete event before
2710          * checking for the need to do authentication */
2711         if (!status)
2712                 return;
2713
2714         cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2715         if (!cp)
2716                 return;
2717
2718         hci_dev_lock(hdev);
2719
2720         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2721
2722         if (hci_dev_test_flag(hdev, HCI_MGMT))
2723                 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2724
2725         if (!conn)
2726                 goto unlock;
2727
2728         if (!hci_outgoing_auth_needed(hdev, conn))
2729                 goto unlock;
2730
2731         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2732                 struct hci_cp_auth_requested auth_cp;
2733
2734                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2735
2736                 auth_cp.handle = __cpu_to_le16(conn->handle);
2737                 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2738                              sizeof(auth_cp), &auth_cp);
2739         }
2740
2741 unlock:
2742         hci_dev_unlock(hdev);
2743 }
2744
2745 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2746 {
2747         struct hci_cp_read_remote_features *cp;
2748         struct hci_conn *conn;
2749
2750         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2751
2752         if (!status)
2753                 return;
2754
2755         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2756         if (!cp)
2757                 return;
2758
2759         hci_dev_lock(hdev);
2760
2761         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2762         if (conn) {
2763                 if (conn->state == BT_CONFIG) {
2764                         hci_connect_cfm(conn, status);
2765                         hci_conn_drop(conn);
2766                 }
2767         }
2768
2769         hci_dev_unlock(hdev);
2770 }
2771
2772 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2773 {
2774         struct hci_cp_read_remote_ext_features *cp;
2775         struct hci_conn *conn;
2776
2777         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2778
2779         if (!status)
2780                 return;
2781
2782         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2783         if (!cp)
2784                 return;
2785
2786         hci_dev_lock(hdev);
2787
2788         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2789         if (conn) {
2790                 if (conn->state == BT_CONFIG) {
2791                         hci_connect_cfm(conn, status);
2792                         hci_conn_drop(conn);
2793                 }
2794         }
2795
2796         hci_dev_unlock(hdev);
2797 }
2798
2799 static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle,
2800                                        __u8 status)
2801 {
2802         struct hci_conn *acl;
2803         struct hci_link *link;
2804
2805         bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status);
2806
2807         hci_dev_lock(hdev);
2808
2809         acl = hci_conn_hash_lookup_handle(hdev, handle);
2810         if (acl) {
2811                 link = list_first_entry_or_null(&acl->link_list,
2812                                                 struct hci_link, list);
2813                 if (link && link->conn) {
2814                         link->conn->state = BT_CLOSED;
2815
2816                         hci_connect_cfm(link->conn, status);
2817                         hci_conn_del(link->conn);
2818                 }
2819         }
2820
2821         hci_dev_unlock(hdev);
2822 }
2823
2824 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2825 {
2826         struct hci_cp_setup_sync_conn *cp;
2827
2828         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2829
2830         if (!status)
2831                 return;
2832
2833         cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2834         if (!cp)
2835                 return;
2836
2837         hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2838 }
2839
2840 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2841 {
2842         struct hci_cp_enhanced_setup_sync_conn *cp;
2843
2844         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2845
2846         if (!status)
2847                 return;
2848
2849         cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2850         if (!cp)
2851                 return;
2852
2853         hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2854 }
2855
2856 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2857 {
2858         struct hci_cp_sniff_mode *cp;
2859         struct hci_conn *conn;
2860
2861         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2862
2863         if (!status)
2864                 return;
2865
2866         cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2867         if (!cp)
2868                 return;
2869
2870         hci_dev_lock(hdev);
2871
2872         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2873         if (conn) {
2874                 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2875
2876                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2877                         hci_sco_setup(conn, status);
2878         }
2879
2880         hci_dev_unlock(hdev);
2881 }
2882
2883 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2884 {
2885         struct hci_cp_exit_sniff_mode *cp;
2886         struct hci_conn *conn;
2887
2888         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2889
2890         if (!status)
2891                 return;
2892
2893         cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2894         if (!cp)
2895                 return;
2896
2897         hci_dev_lock(hdev);
2898
2899         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2900         if (conn) {
2901                 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2902
2903                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2904                         hci_sco_setup(conn, status);
2905         }
2906
2907         hci_dev_unlock(hdev);
2908 }
2909
2910 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2911 {
2912         struct hci_cp_disconnect *cp;
2913         struct hci_conn_params *params;
2914         struct hci_conn *conn;
2915         bool mgmt_conn;
2916
2917         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2918
2919         /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2920          * otherwise cleanup the connection immediately.
2921          */
2922         if (!status && !hdev->suspended)
2923                 return;
2924
2925         cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2926         if (!cp)
2927                 return;
2928
2929         hci_dev_lock(hdev);
2930
2931         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2932         if (!conn)
2933                 goto unlock;
2934
2935         if (status) {
2936                 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2937                                        conn->dst_type, status);
2938
2939                 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2940                         hdev->cur_adv_instance = conn->adv_instance;
2941                         hci_enable_advertising(hdev);
2942                 }
2943
2944                 /* Inform sockets conn is gone before we delete it */
2945                 hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
2946
2947                 goto done;
2948         }
2949
2950         mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2951
2952         if (conn->type == ACL_LINK) {
2953                 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2954                         hci_remove_link_key(hdev, &conn->dst);
2955         }
2956
2957         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2958         if (params) {
2959                 switch (params->auto_connect) {
2960                 case HCI_AUTO_CONN_LINK_LOSS:
2961                         if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2962                                 break;
2963                         fallthrough;
2964
2965                 case HCI_AUTO_CONN_DIRECT:
2966                 case HCI_AUTO_CONN_ALWAYS:
2967                         hci_pend_le_list_del_init(params);
2968                         hci_pend_le_list_add(params, &hdev->pend_le_conns);
2969                         break;
2970
2971                 default:
2972                         break;
2973                 }
2974         }
2975
2976         mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2977                                  cp->reason, mgmt_conn);
2978
2979         hci_disconn_cfm(conn, cp->reason);
2980
2981 done:
2982         /* If the disconnection failed for any reason, the upper layer
2983          * does not retry to disconnect in current implementation.
2984          * Hence, we need to do some basic cleanup here and re-enable
2985          * advertising if necessary.
2986          */
2987         hci_conn_del(conn);
2988 unlock:
2989         hci_dev_unlock(hdev);
2990 }
2991
2992 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2993 {
2994         /* When using controller based address resolution, then the new
2995          * address types 0x02 and 0x03 are used. These types need to be
2996          * converted back into either public address or random address type
2997          */
2998         switch (type) {
2999         case ADDR_LE_DEV_PUBLIC_RESOLVED:
3000                 if (resolved)
3001                         *resolved = true;
3002                 return ADDR_LE_DEV_PUBLIC;
3003         case ADDR_LE_DEV_RANDOM_RESOLVED:
3004                 if (resolved)
3005                         *resolved = true;
3006                 return ADDR_LE_DEV_RANDOM;
3007         }
3008
3009         if (resolved)
3010                 *resolved = false;
3011         return type;
3012 }
3013
3014 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
3015                               u8 peer_addr_type, u8 own_address_type,
3016                               u8 filter_policy)
3017 {
3018         struct hci_conn *conn;
3019
3020         conn = hci_conn_hash_lookup_le(hdev, peer_addr,
3021                                        peer_addr_type);
3022         if (!conn)
3023                 return;
3024
3025         own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
3026
3027         /* Store the initiator and responder address information which
3028          * is needed for SMP. These values will not change during the
3029          * lifetime of the connection.
3030          */
3031         conn->init_addr_type = own_address_type;
3032         if (own_address_type == ADDR_LE_DEV_RANDOM)
3033                 bacpy(&conn->init_addr, &hdev->random_addr);
3034         else
3035                 bacpy(&conn->init_addr, &hdev->bdaddr);
3036
3037         conn->resp_addr_type = peer_addr_type;
3038         bacpy(&conn->resp_addr, peer_addr);
3039 }
3040
3041 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
3042 {
3043         struct hci_cp_le_create_conn *cp;
3044
3045         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3046
3047         /* All connection failure handling is taken care of by the
3048          * hci_conn_failed function which is triggered by the HCI
3049          * request completion callbacks used for connecting.
3050          */
3051         if (status)
3052                 return;
3053
3054         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
3055         if (!cp)
3056                 return;
3057
3058         hci_dev_lock(hdev);
3059
3060         cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
3061                           cp->own_address_type, cp->filter_policy);
3062
3063         hci_dev_unlock(hdev);
3064 }
3065
3066 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
3067 {
3068         struct hci_cp_le_ext_create_conn *cp;
3069
3070         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3071
3072         /* All connection failure handling is taken care of by the
3073          * hci_conn_failed function which is triggered by the HCI
3074          * request completion callbacks used for connecting.
3075          */
3076         if (status)
3077                 return;
3078
3079         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
3080         if (!cp)
3081                 return;
3082
3083         hci_dev_lock(hdev);
3084
3085         cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
3086                           cp->own_addr_type, cp->filter_policy);
3087
3088         hci_dev_unlock(hdev);
3089 }
3090
3091 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
3092 {
3093         struct hci_cp_le_read_remote_features *cp;
3094         struct hci_conn *conn;
3095
3096         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3097
3098         if (!status)
3099                 return;
3100
3101         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
3102         if (!cp)
3103                 return;
3104
3105         hci_dev_lock(hdev);
3106
3107         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3108         if (conn) {
3109                 if (conn->state == BT_CONFIG) {
3110                         hci_connect_cfm(conn, status);
3111                         hci_conn_drop(conn);
3112                 }
3113         }
3114
3115         hci_dev_unlock(hdev);
3116 }
3117
3118 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
3119 {
3120         struct hci_cp_le_start_enc *cp;
3121         struct hci_conn *conn;
3122
3123         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3124
3125         if (!status)
3126                 return;
3127
3128         hci_dev_lock(hdev);
3129
3130         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
3131         if (!cp)
3132                 goto unlock;
3133
3134         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3135         if (!conn)
3136                 goto unlock;
3137
3138         if (conn->state != BT_CONNECTED)
3139                 goto unlock;
3140
3141         hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3142         hci_conn_drop(conn);
3143
3144 unlock:
3145         hci_dev_unlock(hdev);
3146 }
3147
3148 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
3149 {
3150         struct hci_cp_switch_role *cp;
3151         struct hci_conn *conn;
3152
3153         BT_DBG("%s status 0x%2.2x", hdev->name, status);
3154
3155         if (!status)
3156                 return;
3157
3158         cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
3159         if (!cp)
3160                 return;
3161
3162         hci_dev_lock(hdev);
3163
3164         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
3165         if (conn)
3166                 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3167
3168         hci_dev_unlock(hdev);
3169 }
3170
3171 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
3172                                      struct sk_buff *skb)
3173 {
3174         struct hci_ev_status *ev = data;
3175         struct discovery_state *discov = &hdev->discovery;
3176         struct inquiry_entry *e;
3177
3178         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3179
3180         hci_conn_check_pending(hdev);
3181
3182         if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
3183                 return;
3184
3185         smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
3186         wake_up_bit(&hdev->flags, HCI_INQUIRY);
3187
3188         if (!hci_dev_test_flag(hdev, HCI_MGMT))
3189                 return;
3190
3191         hci_dev_lock(hdev);
3192
3193         if (discov->state != DISCOVERY_FINDING)
3194                 goto unlock;
3195
3196         if (list_empty(&discov->resolve)) {
3197                 /* When BR/EDR inquiry is active and no LE scanning is in
3198                  * progress, then change discovery state to indicate completion.
3199                  *
3200                  * When running LE scanning and BR/EDR inquiry simultaneously
3201                  * and the LE scan already finished, then change the discovery
3202                  * state to indicate completion.
3203                  */
3204                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3205                     !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3206                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3207                 goto unlock;
3208         }
3209
3210         e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3211         if (e && hci_resolve_name(hdev, e) == 0) {
3212                 e->name_state = NAME_PENDING;
3213                 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3214                 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3215         } else {
3216                 /* When BR/EDR inquiry is active and no LE scanning is in
3217                  * progress, then change discovery state to indicate completion.
3218                  *
3219                  * When running LE scanning and BR/EDR inquiry simultaneously
3220                  * and the LE scan already finished, then change the discovery
3221                  * state to indicate completion.
3222                  */
3223                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3224                     !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3225                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3226         }
3227
3228 unlock:
3229         hci_dev_unlock(hdev);
3230 }
3231
3232 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3233                                    struct sk_buff *skb)
3234 {
3235         struct hci_ev_inquiry_result *ev = edata;
3236         struct inquiry_data data;
3237         int i;
3238
3239         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3240                              flex_array_size(ev, info, ev->num)))
3241                 return;
3242
3243         bt_dev_dbg(hdev, "num %d", ev->num);
3244
3245         if (!ev->num)
3246                 return;
3247
3248         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3249                 return;
3250
3251         hci_dev_lock(hdev);
3252
3253         for (i = 0; i < ev->num; i++) {
3254                 struct inquiry_info *info = &ev->info[i];
3255                 u32 flags;
3256
3257                 bacpy(&data.bdaddr, &info->bdaddr);
3258                 data.pscan_rep_mode     = info->pscan_rep_mode;
3259                 data.pscan_period_mode  = info->pscan_period_mode;
3260                 data.pscan_mode         = info->pscan_mode;
3261                 memcpy(data.dev_class, info->dev_class, 3);
3262                 data.clock_offset       = info->clock_offset;
3263                 data.rssi               = HCI_RSSI_INVALID;
3264                 data.ssp_mode           = 0x00;
3265
3266                 flags = hci_inquiry_cache_update(hdev, &data, false);
3267
3268                 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3269                                   info->dev_class, HCI_RSSI_INVALID,
3270                                   flags, NULL, 0, NULL, 0, 0);
3271         }
3272
3273         hci_dev_unlock(hdev);
3274 }
3275
3276 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3277                                   struct sk_buff *skb)
3278 {
3279         struct hci_ev_conn_complete *ev = data;
3280         struct hci_conn *conn;
3281         u8 status = ev->status;
3282
3283         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3284
3285         hci_dev_lock(hdev);
3286
3287         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3288         if (!conn) {
3289                 /* In case of error status and there is no connection pending
3290                  * just unlock as there is nothing to cleanup.
3291                  */
3292                 if (ev->status)
3293                         goto unlock;
3294
3295                 /* Connection may not exist if auto-connected. Check the bredr
3296                  * allowlist to see if this device is allowed to auto connect.
3297                  * If link is an ACL type, create a connection class
3298                  * automatically.
3299                  *
3300                  * Auto-connect will only occur if the event filter is
3301                  * programmed with a given address. Right now, event filter is
3302                  * only used during suspend.
3303                  */
3304                 if (ev->link_type == ACL_LINK &&
3305                     hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3306                                                       &ev->bdaddr,
3307                                                       BDADDR_BREDR)) {
3308                         conn = hci_conn_add_unset(hdev, ev->link_type,
3309                                                   &ev->bdaddr, HCI_ROLE_SLAVE);
3310                         if (!conn) {
3311                                 bt_dev_err(hdev, "no memory for new conn");
3312                                 goto unlock;
3313                         }
3314                 } else {
3315                         if (ev->link_type != SCO_LINK)
3316                                 goto unlock;
3317
3318                         conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3319                                                        &ev->bdaddr);
3320                         if (!conn)
3321                                 goto unlock;
3322
3323                         conn->type = SCO_LINK;
3324                 }
3325         }
3326
3327         /* The HCI_Connection_Complete event is only sent once per connection.
3328          * Processing it more than once per connection can corrupt kernel memory.
3329          *
3330          * As the connection handle is set here for the first time, it indicates
3331          * whether the connection is already set up.
3332          */
3333         if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
3334                 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3335                 goto unlock;
3336         }
3337
3338         if (!status) {
3339                 status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
3340                 if (status)
3341                         goto done;
3342
3343                 if (conn->type == ACL_LINK) {
3344                         conn->state = BT_CONFIG;
3345                         hci_conn_hold(conn);
3346
3347                         if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3348                             !hci_find_link_key(hdev, &ev->bdaddr))
3349                                 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3350                         else
3351                                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3352                 } else
3353                         conn->state = BT_CONNECTED;
3354
3355                 hci_debugfs_create_conn(conn);
3356                 hci_conn_add_sysfs(conn);
3357
3358                 if (test_bit(HCI_AUTH, &hdev->flags))
3359                         set_bit(HCI_CONN_AUTH, &conn->flags);
3360
3361                 if (test_bit(HCI_ENCRYPT, &hdev->flags))
3362                         set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3363
3364                 /* Get remote features */
3365                 if (conn->type == ACL_LINK) {
3366                         struct hci_cp_read_remote_features cp;
3367                         cp.handle = ev->handle;
3368                         hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3369                                      sizeof(cp), &cp);
3370
3371                         hci_update_scan(hdev);
3372                 }
3373
3374                 /* Set packet type for incoming connection */
3375                 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3376                         struct hci_cp_change_conn_ptype cp;
3377                         cp.handle = ev->handle;
3378                         cp.pkt_type = cpu_to_le16(conn->pkt_type);
3379                         hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3380                                      &cp);
3381                 }
3382
3383 #ifdef TIZEN_BT
3384                 if (get_link_mode(conn) & HCI_LM_MASTER)
3385                         hci_conn_change_supervision_timeout(conn,
3386                                         LINK_SUPERVISION_TIMEOUT);
3387 #endif
3388         }
3389
3390         if (conn->type == ACL_LINK)
3391                 hci_sco_setup(conn, ev->status);
3392
3393 done:
3394         if (status) {
3395                 hci_conn_failed(conn, status);
3396         } else if (ev->link_type == SCO_LINK) {
3397                 switch (conn->setting & SCO_AIRMODE_MASK) {
3398                 case SCO_AIRMODE_CVSD:
3399                         if (hdev->notify)
3400                                 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3401                         break;
3402                 }
3403
3404                 hci_connect_cfm(conn, status);
3405         }
3406
3407 unlock:
3408         hci_dev_unlock(hdev);
3409
3410         hci_conn_check_pending(hdev);
3411 }
3412
3413 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3414 {
3415         struct hci_cp_reject_conn_req cp;
3416
3417         bacpy(&cp.bdaddr, bdaddr);
3418         cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3419         hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3420 }
3421
3422 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3423                                  struct sk_buff *skb)
3424 {
3425         struct hci_ev_conn_request *ev = data;
3426         int mask = hdev->link_mode;
3427         struct inquiry_entry *ie;
3428         struct hci_conn *conn;
3429         __u8 flags = 0;
3430
3431         bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3432
3433         /* Reject incoming connection from device with same BD ADDR against
3434          * CVE-2020-26555
3435          */
3436         if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
3437                 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
3438                            &ev->bdaddr);
3439                 hci_reject_conn(hdev, &ev->bdaddr);
3440                 return;
3441         }
3442
3443         mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3444                                       &flags);
3445
3446         if (!(mask & HCI_LM_ACCEPT)) {
3447                 hci_reject_conn(hdev, &ev->bdaddr);
3448                 return;
3449         }
3450
3451         hci_dev_lock(hdev);
3452
3453         if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3454                                    BDADDR_BREDR)) {
3455                 hci_reject_conn(hdev, &ev->bdaddr);
3456                 goto unlock;
3457         }
3458
3459         /* Require HCI_CONNECTABLE or an accept list entry to accept the
3460          * connection. These features are only touched through mgmt so
3461          * only do the checks if HCI_MGMT is set.
3462          */
3463         if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3464             !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3465             !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3466                                                BDADDR_BREDR)) {
3467                 hci_reject_conn(hdev, &ev->bdaddr);
3468                 goto unlock;
3469         }
3470
3471         /* Connection accepted */
3472
3473         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3474         if (ie)
3475                 memcpy(ie->data.dev_class, ev->dev_class, 3);
3476
3477 #ifdef TIZEN_BT
3478                 if ((ev->link_type == SCO_LINK || ev->link_type == ESCO_LINK) &&
3479                     hci_conn_hash_lookup_sco(hdev)) {
3480                         struct hci_cp_reject_conn_req cp;
3481
3482                         bacpy(&cp.bdaddr, &ev->bdaddr);
3483                         cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
3484                         hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ,
3485                                      sizeof(cp), &cp);
3486                         hci_dev_unlock(hdev);
3487                         return;
3488                 }
3489 #endif
3490
3491         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3492                         &ev->bdaddr);
3493         if (!conn) {
3494                 conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
3495                                           HCI_ROLE_SLAVE);
3496                 if (!conn) {
3497                         bt_dev_err(hdev, "no memory for new connection");
3498                         goto unlock;
3499                 }
3500         }
3501
3502         memcpy(conn->dev_class, ev->dev_class, 3);
3503
3504         hci_dev_unlock(hdev);
3505
3506         if (ev->link_type == ACL_LINK ||
3507             (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3508                 struct hci_cp_accept_conn_req cp;
3509                 conn->state = BT_CONNECT;
3510
3511                 bacpy(&cp.bdaddr, &ev->bdaddr);
3512
3513                 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3514                         cp.role = 0x00; /* Become central */
3515                 else
3516                         cp.role = 0x01; /* Remain peripheral */
3517
3518                 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3519         } else if (!(flags & HCI_PROTO_DEFER)) {
3520                 struct hci_cp_accept_sync_conn_req cp;
3521                 conn->state = BT_CONNECT;
3522
3523                 bacpy(&cp.bdaddr, &ev->bdaddr);
3524                 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3525
3526                 cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
3527                 cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
3528                 cp.max_latency    = cpu_to_le16(0xffff);
3529                 cp.content_format = cpu_to_le16(hdev->voice_setting);
3530                 cp.retrans_effort = 0xff;
3531
3532                 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3533                              &cp);
3534         } else {
3535                 conn->state = BT_CONNECT2;
3536                 hci_connect_cfm(conn, 0);
3537         }
3538
3539         return;
3540 unlock:
3541         hci_dev_unlock(hdev);
3542 }
3543
3544 static u8 hci_to_mgmt_reason(u8 err)
3545 {
3546         switch (err) {
3547         case HCI_ERROR_CONNECTION_TIMEOUT:
3548                 return MGMT_DEV_DISCONN_TIMEOUT;
3549         case HCI_ERROR_REMOTE_USER_TERM:
3550         case HCI_ERROR_REMOTE_LOW_RESOURCES:
3551         case HCI_ERROR_REMOTE_POWER_OFF:
3552                 return MGMT_DEV_DISCONN_REMOTE;
3553         case HCI_ERROR_LOCAL_HOST_TERM:
3554                 return MGMT_DEV_DISCONN_LOCAL_HOST;
3555         default:
3556                 return MGMT_DEV_DISCONN_UNKNOWN;
3557         }
3558 }
3559
3560 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3561                                      struct sk_buff *skb)
3562 {
3563         struct hci_ev_disconn_complete *ev = data;
3564         u8 reason;
3565         struct hci_conn_params *params;
3566         struct hci_conn *conn;
3567         bool mgmt_connected;
3568
3569         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3570
3571         hci_dev_lock(hdev);
3572
3573         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3574         if (!conn)
3575                 goto unlock;
3576
3577         if (ev->status) {
3578                 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3579                                        conn->dst_type, ev->status);
3580                 goto unlock;
3581         }
3582
3583         conn->state = BT_CLOSED;
3584
3585         mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3586
3587         if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3588                 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3589         else
3590                 reason = hci_to_mgmt_reason(ev->reason);
3591
3592         mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3593                                 reason, mgmt_connected);
3594
3595         if (conn->type == ACL_LINK) {
3596                 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3597                         hci_remove_link_key(hdev, &conn->dst);
3598
3599                 hci_update_scan(hdev);
3600         }
3601
3602         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3603         if (params) {
3604                 switch (params->auto_connect) {
3605                 case HCI_AUTO_CONN_LINK_LOSS:
3606                         if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3607                                 break;
3608                         fallthrough;
3609
3610                 case HCI_AUTO_CONN_DIRECT:
3611                 case HCI_AUTO_CONN_ALWAYS:
3612                         hci_pend_le_list_del_init(params);
3613                         hci_pend_le_list_add(params, &hdev->pend_le_conns);
3614                         hci_update_passive_scan(hdev);
3615                         break;
3616
3617                 default:
3618                         break;
3619                 }
3620         }
3621
3622         hci_disconn_cfm(conn, ev->reason);
3623
3624         /* Re-enable advertising if necessary, since it might
3625          * have been disabled by the connection. From the
3626          * HCI_LE_Set_Advertise_Enable command description in
3627          * the core specification (v4.0):
3628          * "The Controller shall continue advertising until the Host
3629          * issues an LE_Set_Advertise_Enable command with
3630          * Advertising_Enable set to 0x00 (Advertising is disabled)
3631          * or until a connection is created or until the Advertising
3632          * is timed out due to Directed Advertising."
3633          */
3634         if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3635                 hdev->cur_adv_instance = conn->adv_instance;
3636                 hci_enable_advertising(hdev);
3637         }
3638
3639         hci_conn_del(conn);
3640
3641 #ifdef TIZEN_BT
3642         if (conn->type == ACL_LINK && !hci_conn_num(hdev, ACL_LINK)) {
3643                 int iscan;
3644                 int pscan;
3645
3646                 iscan = test_bit(HCI_ISCAN, &hdev->flags);
3647                 pscan = test_bit(HCI_PSCAN, &hdev->flags);
3648                 if (!iscan && !pscan) {
3649                         u8 scan_enable = SCAN_PAGE;
3650
3651                         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE,
3652                                      sizeof(scan_enable), &scan_enable);
3653                 }
3654         }
3655 #endif
3656
3657 unlock:
3658         hci_dev_unlock(hdev);
3659 }
3660
3661 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3662                                   struct sk_buff *skb)
3663 {
3664         struct hci_ev_auth_complete *ev = data;
3665         struct hci_conn *conn;
3666
3667         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3668
3669         hci_dev_lock(hdev);
3670
3671         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3672         if (!conn)
3673                 goto unlock;
3674
3675 #ifdef TIZEN_BT
3676         /*  PIN or Key Missing patch */
3677         BT_DBG("remote_auth %x, remote_cap %x, auth_type %x, io_capability %x",
3678                conn->remote_auth, conn->remote_cap,
3679                conn->auth_type, conn->io_capability);
3680
3681         if (ev->status == 0x06 && hci_conn_ssp_enabled(conn)) {
3682                 struct hci_cp_auth_requested cp;
3683
3684                 BT_DBG("Pin or key missing");
3685                 hci_remove_link_key(hdev, &conn->dst);
3686                 cp.handle = cpu_to_le16(conn->handle);
3687                 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
3688                              sizeof(cp), &cp);
3689                 goto unlock;
3690         }
3691 #endif
3692
3693         if (!ev->status) {
3694                 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3695                 set_bit(HCI_CONN_AUTH, &conn->flags);
3696                 conn->sec_level = conn->pending_sec_level;
3697         } else {
3698                 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3699                         set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3700
3701                 mgmt_auth_failed(conn, ev->status);
3702         }
3703
3704         clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3705
3706         if (conn->state == BT_CONFIG) {
3707                 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3708                         struct hci_cp_set_conn_encrypt cp;
3709                         cp.handle  = ev->handle;
3710                         cp.encrypt = 0x01;
3711                         hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3712                                      &cp);
3713                 } else {
3714                         conn->state = BT_CONNECTED;
3715                         hci_connect_cfm(conn, ev->status);
3716                         hci_conn_drop(conn);
3717                 }
3718         } else {
3719                 hci_auth_cfm(conn, ev->status);
3720
3721                 hci_conn_hold(conn);
3722                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3723                 hci_conn_drop(conn);
3724         }
3725
3726         if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3727                 if (!ev->status) {
3728                         struct hci_cp_set_conn_encrypt cp;
3729                         cp.handle  = ev->handle;
3730                         cp.encrypt = 0x01;
3731                         hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3732                                      &cp);
3733                 } else {
3734                         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3735                         hci_encrypt_cfm(conn, ev->status);
3736                 }
3737         }
3738
3739 unlock:
3740         hci_dev_unlock(hdev);
3741 }
3742
3743 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3744                                 struct sk_buff *skb)
3745 {
3746         struct hci_ev_remote_name *ev = data;
3747         struct hci_conn *conn;
3748
3749         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3750
3751         hci_conn_check_pending(hdev);
3752
3753         hci_dev_lock(hdev);
3754
3755         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3756
3757         if (!hci_dev_test_flag(hdev, HCI_MGMT))
3758                 goto check_auth;
3759
3760         if (ev->status == 0)
3761                 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3762                                        strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3763         else
3764                 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3765
3766 check_auth:
3767         if (!conn)
3768                 goto unlock;
3769
3770         if (!hci_outgoing_auth_needed(hdev, conn))
3771                 goto unlock;
3772
3773         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3774                 struct hci_cp_auth_requested cp;
3775
3776                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3777
3778                 cp.handle = __cpu_to_le16(conn->handle);
3779                 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3780         }
3781
3782 unlock:
3783         hci_dev_unlock(hdev);
3784 }
3785
3786 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3787                                    struct sk_buff *skb)
3788 {
3789         struct hci_ev_encrypt_change *ev = data;
3790         struct hci_conn *conn;
3791
3792         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3793
3794         hci_dev_lock(hdev);
3795
3796         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3797         if (!conn)
3798                 goto unlock;
3799
3800         if (!ev->status) {
3801                 if (ev->encrypt) {
3802                         /* Encryption implies authentication */
3803                         set_bit(HCI_CONN_AUTH, &conn->flags);
3804                         set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3805                         conn->sec_level = conn->pending_sec_level;
3806
3807                         /* P-256 authentication key implies FIPS */
3808                         if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3809                                 set_bit(HCI_CONN_FIPS, &conn->flags);
3810
3811                         if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3812                             conn->type == LE_LINK)
3813                                 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3814                 } else {
3815                         clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3816                         clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3817                 }
3818         }
3819
3820         /* We should disregard the current RPA and generate a new one
3821          * whenever the encryption procedure fails.
3822          */
3823         if (ev->status && conn->type == LE_LINK) {
3824                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3825                 hci_adv_instances_set_rpa_expired(hdev, true);
3826         }
3827
3828         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3829
3830         /* Check link security requirements are met */
3831         if (!hci_conn_check_link_mode(conn))
3832                 ev->status = HCI_ERROR_AUTH_FAILURE;
3833
3834         if (ev->status && conn->state == BT_CONNECTED) {
3835                 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3836                         set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3837
3838                 /* Notify upper layers so they can cleanup before
3839                  * disconnecting.
3840                  */
3841                 hci_encrypt_cfm(conn, ev->status);
3842                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3843                 hci_conn_drop(conn);
3844                 goto unlock;
3845         }
3846
3847         /* Try reading the encryption key size for encrypted ACL links */
3848         if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3849                 struct hci_cp_read_enc_key_size cp;
3850
3851                 /* Only send HCI_Read_Encryption_Key_Size if the
3852                  * controller really supports it. If it doesn't, assume
3853                  * the default size (16).
3854                  */
3855                 if (!(hdev->commands[20] & 0x10)) {
3856                         conn->enc_key_size = HCI_LINK_KEY_SIZE;
3857                         goto notify;
3858                 }
3859
3860                 cp.handle = cpu_to_le16(conn->handle);
3861                 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3862                                  sizeof(cp), &cp)) {
3863                         bt_dev_err(hdev, "sending read key size failed");
3864                         conn->enc_key_size = HCI_LINK_KEY_SIZE;
3865                         goto notify;
3866                 }
3867
3868                 goto unlock;
3869         }
3870
3871         /* Set the default Authenticated Payload Timeout after
3872          * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3873          * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3874          * sent when the link is active and Encryption is enabled, the conn
3875          * type can be either LE or ACL and controller must support LMP Ping.
3876          * Ensure for AES-CCM encryption as well.
3877          */
3878         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3879             test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3880             ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3881              (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3882                 struct hci_cp_write_auth_payload_to cp;
3883
3884                 cp.handle = cpu_to_le16(conn->handle);
3885                 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3886                 if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3887                                  sizeof(cp), &cp))
3888                         bt_dev_err(hdev, "write auth payload timeout failed");
3889         }
3890
3891 notify:
3892         hci_encrypt_cfm(conn, ev->status);
3893
3894 unlock:
3895         hci_dev_unlock(hdev);
3896 }
3897
3898 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3899                                              struct sk_buff *skb)
3900 {
3901         struct hci_ev_change_link_key_complete *ev = data;
3902         struct hci_conn *conn;
3903
3904         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3905
3906         hci_dev_lock(hdev);
3907
3908         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3909         if (conn) {
3910                 if (!ev->status)
3911                         set_bit(HCI_CONN_SECURE, &conn->flags);
3912
3913                 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3914
3915                 hci_key_change_cfm(conn, ev->status);
3916         }
3917
3918         hci_dev_unlock(hdev);
3919 }
3920
3921 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3922                                     struct sk_buff *skb)
3923 {
3924         struct hci_ev_remote_features *ev = data;
3925         struct hci_conn *conn;
3926
3927         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3928
3929         hci_dev_lock(hdev);
3930
3931         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3932         if (!conn)
3933                 goto unlock;
3934
3935         if (!ev->status)
3936                 memcpy(conn->features[0], ev->features, 8);
3937
3938         if (conn->state != BT_CONFIG)
3939                 goto unlock;
3940
3941         if (!ev->status && lmp_ext_feat_capable(hdev) &&
3942             lmp_ext_feat_capable(conn)) {
3943                 struct hci_cp_read_remote_ext_features cp;
3944                 cp.handle = ev->handle;
3945                 cp.page = 0x01;
3946                 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3947                              sizeof(cp), &cp);
3948                 goto unlock;
3949         }
3950
3951         if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3952                 struct hci_cp_remote_name_req cp;
3953                 memset(&cp, 0, sizeof(cp));
3954                 bacpy(&cp.bdaddr, &conn->dst);
3955                 cp.pscan_rep_mode = 0x02;
3956                 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3957         } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3958                 mgmt_device_connected(hdev, conn, NULL, 0);
3959
3960         if (!hci_outgoing_auth_needed(hdev, conn)) {
3961                 conn->state = BT_CONNECTED;
3962                 hci_connect_cfm(conn, ev->status);
3963                 hci_conn_drop(conn);
3964         }
3965
3966 unlock:
3967         hci_dev_unlock(hdev);
3968 }
3969
3970 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3971 {
3972         cancel_delayed_work(&hdev->cmd_timer);
3973
3974         rcu_read_lock();
3975         if (!test_bit(HCI_RESET, &hdev->flags)) {
3976                 if (ncmd) {
3977                         cancel_delayed_work(&hdev->ncmd_timer);
3978                         atomic_set(&hdev->cmd_cnt, 1);
3979                 } else {
3980                         if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3981                                 queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3982                                                    HCI_NCMD_TIMEOUT);
3983                 }
3984         }
3985         rcu_read_unlock();
3986 }
3987
3988 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3989                                         struct sk_buff *skb)
3990 {
3991         struct hci_rp_le_read_buffer_size_v2 *rp = data;
3992
3993         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3994
3995         if (rp->status)
3996                 return rp->status;
3997
3998         hdev->le_mtu   = __le16_to_cpu(rp->acl_mtu);
3999         hdev->le_pkts  = rp->acl_max_pkt;
4000         hdev->iso_mtu  = __le16_to_cpu(rp->iso_mtu);
4001         hdev->iso_pkts = rp->iso_max_pkt;
4002
4003         hdev->le_cnt  = hdev->le_pkts;
4004         hdev->iso_cnt = hdev->iso_pkts;
4005
4006         BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
4007                hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
4008
4009         return rp->status;
4010 }
4011
4012 static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status)
4013 {
4014         struct hci_conn *conn, *tmp;
4015
4016         lockdep_assert_held(&hdev->lock);
4017
4018         list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
4019                 if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY) ||
4020                     conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig)
4021                         continue;
4022
4023                 if (HCI_CONN_HANDLE_UNSET(conn->handle))
4024                         hci_conn_failed(conn, status);
4025         }
4026 }
4027
4028 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
4029                                    struct sk_buff *skb)
4030 {
4031         struct hci_rp_le_set_cig_params *rp = data;
4032         struct hci_cp_le_set_cig_params *cp;
4033         struct hci_conn *conn;
4034         u8 status = rp->status;
4035         bool pending = false;
4036         int i;
4037
4038         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4039
4040         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS);
4041         if (!rp->status && (!cp || rp->num_handles != cp->num_cis ||
4042                             rp->cig_id != cp->cig_id)) {
4043                 bt_dev_err(hdev, "unexpected Set CIG Parameters response data");
4044                 status = HCI_ERROR_UNSPECIFIED;
4045         }
4046
4047         hci_dev_lock(hdev);
4048
4049         /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554
4050          *
4051          * If the Status return parameter is non-zero, then the state of the CIG
4052          * and its CIS configurations shall not be changed by the command. If
4053          * the CIG did not already exist, it shall not be created.
4054          */
4055         if (status) {
4056                 /* Keep current configuration, fail only the unbound CIS */
4057                 hci_unbound_cis_failed(hdev, rp->cig_id, status);
4058                 goto unlock;
4059         }
4060
4061         /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553
4062          *
4063          * If the Status return parameter is zero, then the Controller shall
4064          * set the Connection_Handle arrayed return parameter to the connection
4065          * handle(s) corresponding to the CIS configurations specified in
4066          * the CIS_IDs command parameter, in the same order.
4067          */
4068         for (i = 0; i < rp->num_handles; ++i) {
4069                 conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id,
4070                                                 cp->cis[i].cis_id);
4071                 if (!conn || !bacmp(&conn->dst, BDADDR_ANY))
4072                         continue;
4073
4074                 if (conn->state != BT_BOUND && conn->state != BT_CONNECT)
4075                         continue;
4076
4077                 if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i])))
4078                         continue;
4079
4080                 if (conn->state == BT_CONNECT)
4081                         pending = true;
4082         }
4083
4084 unlock:
4085         if (pending)
4086                 hci_le_create_cis_pending(hdev);
4087
4088         hci_dev_unlock(hdev);
4089
4090         return rp->status;
4091 }
4092
4093 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
4094                                    struct sk_buff *skb)
4095 {
4096         struct hci_rp_le_setup_iso_path *rp = data;
4097         struct hci_cp_le_setup_iso_path *cp;
4098         struct hci_conn *conn;
4099
4100         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4101
4102         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
4103         if (!cp)
4104                 return rp->status;
4105
4106         hci_dev_lock(hdev);
4107
4108         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
4109         if (!conn)
4110                 goto unlock;
4111
4112         if (rp->status) {
4113                 hci_connect_cfm(conn, rp->status);
4114                 hci_conn_del(conn);
4115                 goto unlock;
4116         }
4117
4118         switch (cp->direction) {
4119         /* Input (Host to Controller) */
4120         case 0x00:
4121                 /* Only confirm connection if output only */
4122                 if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu)
4123                         hci_connect_cfm(conn, rp->status);
4124                 break;
4125         /* Output (Controller to Host) */
4126         case 0x01:
4127                 /* Confirm connection since conn->iso_qos is always configured
4128                  * last.
4129                  */
4130                 hci_connect_cfm(conn, rp->status);
4131                 break;
4132         }
4133
4134 unlock:
4135         hci_dev_unlock(hdev);
4136         return rp->status;
4137 }
4138
4139 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
4140 {
4141         bt_dev_dbg(hdev, "status 0x%2.2x", status);
4142 }
4143
4144 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
4145                                    struct sk_buff *skb)
4146 {
4147         struct hci_ev_status *rp = data;
4148         struct hci_cp_le_set_per_adv_params *cp;
4149
4150         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4151
4152         if (rp->status)
4153                 return rp->status;
4154
4155         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
4156         if (!cp)
4157                 return rp->status;
4158
4159         /* TODO: set the conn state */
4160         return rp->status;
4161 }
4162
4163 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
4164                                        struct sk_buff *skb)
4165 {
4166         struct hci_ev_status *rp = data;
4167         struct hci_cp_le_set_per_adv_enable *cp;
4168         struct adv_info *adv = NULL, *n;
4169         u8 per_adv_cnt = 0;
4170
4171         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4172
4173         if (rp->status)
4174                 return rp->status;
4175
4176         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
4177         if (!cp)
4178                 return rp->status;
4179
4180         hci_dev_lock(hdev);
4181
4182         adv = hci_find_adv_instance(hdev, cp->handle);
4183
4184         if (cp->enable) {
4185                 hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
4186
4187                 if (adv)
4188                         adv->enabled = true;
4189         } else {
4190                 /* If just one instance was disabled check if there are
4191                  * any other instance enabled before clearing HCI_LE_PER_ADV.
4192                  * The current periodic adv instance will be marked as
4193                  * disabled once extended advertising is also disabled.
4194                  */
4195                 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
4196                                          list) {
4197                         if (adv->periodic && adv->enabled)
4198                                 per_adv_cnt++;
4199                 }
4200
4201                 if (per_adv_cnt > 1)
4202                         goto unlock;
4203
4204                 hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
4205         }
4206
4207 unlock:
4208         hci_dev_unlock(hdev);
4209
4210         return rp->status;
4211 }
4212
4213 #define HCI_CC_VL(_op, _func, _min, _max) \
4214 { \
4215         .op = _op, \
4216         .func = _func, \
4217         .min_len = _min, \
4218         .max_len = _max, \
4219 }
4220
4221 #define HCI_CC(_op, _func, _len) \
4222         HCI_CC_VL(_op, _func, _len, _len)
4223
4224 #define HCI_CC_STATUS(_op, _func) \
4225         HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4226
4227 static const struct hci_cc {
4228         u16  op;
4229         u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4230         u16  min_len;
4231         u16  max_len;
4232 } hci_cc_table[] = {
4233         HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4234         HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4235         HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4236         HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
4237                       hci_cc_remote_name_req_cancel),
4238         HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4239                sizeof(struct hci_rp_role_discovery)),
4240         HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4241                sizeof(struct hci_rp_read_link_policy)),
4242         HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4243                sizeof(struct hci_rp_write_link_policy)),
4244         HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4245                sizeof(struct hci_rp_read_def_link_policy)),
4246         HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4247                       hci_cc_write_def_link_policy),
4248         HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4249         HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4250                sizeof(struct hci_rp_read_stored_link_key)),
4251         HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4252                sizeof(struct hci_rp_delete_stored_link_key)),
4253         HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4254         HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4255                sizeof(struct hci_rp_read_local_name)),
4256         HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4257         HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4258         HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4259         HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4260         HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4261                sizeof(struct hci_rp_read_class_of_dev)),
4262         HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4263         HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4264                sizeof(struct hci_rp_read_voice_setting)),
4265         HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4266         HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4267                sizeof(struct hci_rp_read_num_supported_iac)),
4268         HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4269         HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4270         HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4271                sizeof(struct hci_rp_read_auth_payload_to)),
4272         HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4273                sizeof(struct hci_rp_write_auth_payload_to)),
4274         HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4275                sizeof(struct hci_rp_read_local_version)),
4276         HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4277                sizeof(struct hci_rp_read_local_commands)),
4278         HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4279                sizeof(struct hci_rp_read_local_features)),
4280         HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4281                sizeof(struct hci_rp_read_local_ext_features)),
4282         HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4283                sizeof(struct hci_rp_read_buffer_size)),
4284         HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4285                sizeof(struct hci_rp_read_bd_addr)),
4286         HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4287                sizeof(struct hci_rp_read_local_pairing_opts)),
4288         HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4289                sizeof(struct hci_rp_read_page_scan_activity)),
4290         HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4291                       hci_cc_write_page_scan_activity),
4292         HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4293                sizeof(struct hci_rp_read_page_scan_type)),
4294         HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4295         HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
4296                sizeof(struct hci_rp_read_data_block_size)),
4297         HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
4298                sizeof(struct hci_rp_read_flow_control_mode)),
4299         HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
4300                sizeof(struct hci_rp_read_local_amp_info)),
4301         HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4302                sizeof(struct hci_rp_read_clock)),
4303         HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4304                sizeof(struct hci_rp_read_enc_key_size)),
4305         HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4306                sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4307         HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4308                hci_cc_read_def_err_data_reporting,
4309                sizeof(struct hci_rp_read_def_err_data_reporting)),
4310         HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4311                       hci_cc_write_def_err_data_reporting),
4312         HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4313                sizeof(struct hci_rp_pin_code_reply)),
4314         HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4315                sizeof(struct hci_rp_pin_code_neg_reply)),
4316         HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4317                sizeof(struct hci_rp_read_local_oob_data)),
4318         HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4319                sizeof(struct hci_rp_read_local_oob_ext_data)),
4320         HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4321                sizeof(struct hci_rp_le_read_buffer_size)),
4322         HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4323                sizeof(struct hci_rp_le_read_local_features)),
4324         HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4325                sizeof(struct hci_rp_le_read_adv_tx_power)),
4326         HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4327                sizeof(struct hci_rp_user_confirm_reply)),
4328         HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4329                sizeof(struct hci_rp_user_confirm_reply)),
4330         HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4331                sizeof(struct hci_rp_user_confirm_reply)),
4332         HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4333                sizeof(struct hci_rp_user_confirm_reply)),
4334         HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4335         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4336         HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4337         HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4338         HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4339                hci_cc_le_read_accept_list_size,
4340                sizeof(struct hci_rp_le_read_accept_list_size)),
4341         HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4342         HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4343                       hci_cc_le_add_to_accept_list),
4344         HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4345                       hci_cc_le_del_from_accept_list),
4346         HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4347                sizeof(struct hci_rp_le_read_supported_states)),
4348         HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4349                sizeof(struct hci_rp_le_read_def_data_len)),
4350         HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4351                       hci_cc_le_write_def_data_len),
4352         HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4353                       hci_cc_le_add_to_resolv_list),
4354         HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4355                       hci_cc_le_del_from_resolv_list),
4356         HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4357                       hci_cc_le_clear_resolv_list),
4358         HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4359                sizeof(struct hci_rp_le_read_resolv_list_size)),
4360         HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4361                       hci_cc_le_set_addr_resolution_enable),
4362         HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4363                sizeof(struct hci_rp_le_read_max_data_len)),
4364         HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4365                       hci_cc_write_le_host_supported),
4366         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4367         HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4368                sizeof(struct hci_rp_read_rssi)),
4369         HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4370                sizeof(struct hci_rp_read_tx_power)),
4371         HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4372         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4373                       hci_cc_le_set_ext_scan_param),
4374         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4375                       hci_cc_le_set_ext_scan_enable),
4376         HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4377         HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4378                hci_cc_le_read_num_adv_sets,
4379                sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4380         HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4381                sizeof(struct hci_rp_le_set_ext_adv_params)),
4382         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4383                       hci_cc_le_set_ext_adv_enable),
4384         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4385                       hci_cc_le_set_adv_set_random_addr),
4386         HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4387         HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4388         HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4389         HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4390                       hci_cc_le_set_per_adv_enable),
4391         HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4392                sizeof(struct hci_rp_le_read_transmit_power)),
4393 #ifdef TIZEN_BT
4394         HCI_CC(HCI_OP_ENABLE_RSSI, hci_cc_enable_rssi,
4395                sizeof(struct hci_cc_rsp_enable_rssi)),
4396         HCI_CC(HCI_OP_GET_RAW_RSSI, hci_cc_get_raw_rssi,
4397                sizeof(struct hci_cc_rp_get_raw_rssi)),
4398 #endif
4399         HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4400         HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4401                sizeof(struct hci_rp_le_read_buffer_size_v2)),
4402         HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4403                   sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4404         HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4405                sizeof(struct hci_rp_le_setup_iso_path)),
4406 };
4407
4408 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4409                       struct sk_buff *skb)
4410 {
4411         void *data;
4412
4413         if (skb->len < cc->min_len) {
4414                 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4415                            cc->op, skb->len, cc->min_len);
4416                 return HCI_ERROR_UNSPECIFIED;
4417         }
4418
4419         /* Just warn if the length is over max_len size it still be possible to
4420          * partially parse the cc so leave to callback to decide if that is
4421          * acceptable.
4422          */
4423         if (skb->len > cc->max_len)
4424                 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4425                             cc->op, skb->len, cc->max_len);
4426
4427         data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4428         if (!data)
4429                 return HCI_ERROR_UNSPECIFIED;
4430
4431         return cc->func(hdev, data, skb);
4432 }
4433
4434 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4435                                  struct sk_buff *skb, u16 *opcode, u8 *status,
4436                                  hci_req_complete_t *req_complete,
4437                                  hci_req_complete_skb_t *req_complete_skb)
4438 {
4439         struct hci_ev_cmd_complete *ev = data;
4440         int i;
4441
4442         *opcode = __le16_to_cpu(ev->opcode);
4443
4444         bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4445
4446         for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4447                 if (hci_cc_table[i].op == *opcode) {
4448                         *status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4449                         break;
4450                 }
4451         }
4452
4453         if (i == ARRAY_SIZE(hci_cc_table)) {
4454                 /* Unknown opcode, assume byte 0 contains the status, so
4455                  * that e.g. __hci_cmd_sync() properly returns errors
4456                  * for vendor specific commands send by HCI drivers.
4457                  * If a vendor doesn't actually follow this convention we may
4458                  * need to introduce a vendor CC table in order to properly set
4459                  * the status.
4460                  */
4461                 *status = skb->data[0];
4462         }
4463
4464         handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4465
4466         hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4467                              req_complete_skb);
4468
4469         if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4470                 bt_dev_err(hdev,
4471                            "unexpected event for opcode 0x%4.4x", *opcode);
4472                 return;
4473         }
4474
4475         if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4476                 queue_work(hdev->workqueue, &hdev->cmd_work);
4477 }
4478
4479 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4480 {
4481         struct hci_cp_le_create_cis *cp;
4482         bool pending = false;
4483         int i;
4484
4485         bt_dev_dbg(hdev, "status 0x%2.2x", status);
4486
4487         if (!status)
4488                 return;
4489
4490         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4491         if (!cp)
4492                 return;
4493
4494         hci_dev_lock(hdev);
4495
4496         /* Remove connection if command failed */
4497         for (i = 0; cp->num_cis; cp->num_cis--, i++) {
4498                 struct hci_conn *conn;
4499                 u16 handle;
4500
4501                 handle = __le16_to_cpu(cp->cis[i].cis_handle);
4502
4503                 conn = hci_conn_hash_lookup_handle(hdev, handle);
4504                 if (conn) {
4505                         if (test_and_clear_bit(HCI_CONN_CREATE_CIS,
4506                                                &conn->flags))
4507                                 pending = true;
4508                         conn->state = BT_CLOSED;
4509                         hci_connect_cfm(conn, status);
4510                         hci_conn_del(conn);
4511                 }
4512         }
4513
4514         if (pending)
4515                 hci_le_create_cis_pending(hdev);
4516
4517         hci_dev_unlock(hdev);
4518 }
4519
4520 #define HCI_CS(_op, _func) \
4521 { \
4522         .op = _op, \
4523         .func = _func, \
4524 }
4525
4526 static const struct hci_cs {
4527         u16  op;
4528         void (*func)(struct hci_dev *hdev, __u8 status);
4529 } hci_cs_table[] = {
4530         HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4531         HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4532         HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4533         HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4534         HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4535         HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4536         HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4537         HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4538         HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4539                hci_cs_read_remote_ext_features),
4540         HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4541         HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4542                hci_cs_enhanced_setup_sync_conn),
4543         HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4544         HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4545         HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4546         HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4547         HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4548         HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4549         HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4550         HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4551         HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4552 };
4553
4554 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4555                                struct sk_buff *skb, u16 *opcode, u8 *status,
4556                                hci_req_complete_t *req_complete,
4557                                hci_req_complete_skb_t *req_complete_skb)
4558 {
4559         struct hci_ev_cmd_status *ev = data;
4560         int i;
4561
4562         *opcode = __le16_to_cpu(ev->opcode);
4563         *status = ev->status;
4564
4565         bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4566
4567         for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4568                 if (hci_cs_table[i].op == *opcode) {
4569                         hci_cs_table[i].func(hdev, ev->status);
4570                         break;
4571                 }
4572         }
4573
4574         handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4575
4576         /* Indicate request completion if the command failed. Also, if
4577          * we're not waiting for a special event and we get a success
4578          * command status we should try to flag the request as completed
4579          * (since for this kind of commands there will not be a command
4580          * complete event).
4581          */
4582         if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
4583                 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4584                                      req_complete_skb);
4585                 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4586                         bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4587                                    *opcode);
4588                         return;
4589                 }
4590         }
4591
4592         if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4593                 queue_work(hdev->workqueue, &hdev->cmd_work);
4594 }
4595
4596 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4597                                    struct sk_buff *skb)
4598 {
4599         struct hci_ev_hardware_error *ev = data;
4600
4601         bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4602
4603 #ifdef TIZEN_BT
4604         hci_dev_lock(hdev);
4605         mgmt_hardware_error(hdev, ev->code);
4606         hci_dev_unlock(hdev);
4607 #endif
4608         hdev->hw_error_code = ev->code;
4609
4610         queue_work(hdev->req_workqueue, &hdev->error_reset);
4611 }
4612
4613 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4614                                 struct sk_buff *skb)
4615 {
4616         struct hci_ev_role_change *ev = data;
4617         struct hci_conn *conn;
4618
4619         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4620
4621         hci_dev_lock(hdev);
4622
4623         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4624         if (conn) {
4625                 if (!ev->status)
4626                         conn->role = ev->role;
4627
4628                 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4629
4630                 hci_role_switch_cfm(conn, ev->status, ev->role);
4631 #ifdef TIZEN_BT
4632                 if (!ev->status && (get_link_mode(conn) & HCI_LM_MASTER))
4633                         hci_conn_change_supervision_timeout(conn,
4634                                         LINK_SUPERVISION_TIMEOUT);
4635 #endif
4636         }
4637
4638         hci_dev_unlock(hdev);
4639 }
4640
4641 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4642                                   struct sk_buff *skb)
4643 {
4644         struct hci_ev_num_comp_pkts *ev = data;
4645         int i;
4646
4647         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4648                              flex_array_size(ev, handles, ev->num)))
4649                 return;
4650
4651         if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4652                 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4653                 return;
4654         }
4655
4656         bt_dev_dbg(hdev, "num %d", ev->num);
4657
4658         for (i = 0; i < ev->num; i++) {
4659                 struct hci_comp_pkts_info *info = &ev->handles[i];
4660                 struct hci_conn *conn;
4661                 __u16  handle, count;
4662
4663                 handle = __le16_to_cpu(info->handle);
4664                 count  = __le16_to_cpu(info->count);
4665
4666                 conn = hci_conn_hash_lookup_handle(hdev, handle);
4667                 if (!conn)
4668                         continue;
4669
4670                 conn->sent -= count;
4671
4672                 switch (conn->type) {
4673                 case ACL_LINK:
4674                         hdev->acl_cnt += count;
4675                         if (hdev->acl_cnt > hdev->acl_pkts)
4676                                 hdev->acl_cnt = hdev->acl_pkts;
4677                         break;
4678
4679                 case LE_LINK:
4680                         if (hdev->le_pkts) {
4681                                 hdev->le_cnt += count;
4682                                 if (hdev->le_cnt > hdev->le_pkts)
4683                                         hdev->le_cnt = hdev->le_pkts;
4684                         } else {
4685                                 hdev->acl_cnt += count;
4686                                 if (hdev->acl_cnt > hdev->acl_pkts)
4687                                         hdev->acl_cnt = hdev->acl_pkts;
4688                         }
4689                         break;
4690
4691                 case SCO_LINK:
4692                         hdev->sco_cnt += count;
4693                         if (hdev->sco_cnt > hdev->sco_pkts)
4694                                 hdev->sco_cnt = hdev->sco_pkts;
4695                         break;
4696
4697                 case ISO_LINK:
4698                         if (hdev->iso_pkts) {
4699                                 hdev->iso_cnt += count;
4700                                 if (hdev->iso_cnt > hdev->iso_pkts)
4701                                         hdev->iso_cnt = hdev->iso_pkts;
4702                         } else if (hdev->le_pkts) {
4703                                 hdev->le_cnt += count;
4704                                 if (hdev->le_cnt > hdev->le_pkts)
4705                                         hdev->le_cnt = hdev->le_pkts;
4706                         } else {
4707                                 hdev->acl_cnt += count;
4708                                 if (hdev->acl_cnt > hdev->acl_pkts)
4709                                         hdev->acl_cnt = hdev->acl_pkts;
4710                         }
4711                         break;
4712
4713                 default:
4714                         bt_dev_err(hdev, "unknown type %d conn %p",
4715                                    conn->type, conn);
4716                         break;
4717                 }
4718         }
4719
4720         queue_work(hdev->workqueue, &hdev->tx_work);
4721 }
4722
4723 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4724                                                  __u16 handle)
4725 {
4726         struct hci_chan *chan;
4727
4728         switch (hdev->dev_type) {
4729         case HCI_PRIMARY:
4730                 return hci_conn_hash_lookup_handle(hdev, handle);
4731         case HCI_AMP:
4732                 chan = hci_chan_lookup_handle(hdev, handle);
4733                 if (chan)
4734                         return chan->conn;
4735                 break;
4736         default:
4737                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4738                 break;
4739         }
4740
4741         return NULL;
4742 }
4743
4744 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
4745                                     struct sk_buff *skb)
4746 {
4747         struct hci_ev_num_comp_blocks *ev = data;
4748         int i;
4749
4750         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
4751                              flex_array_size(ev, handles, ev->num_hndl)))
4752                 return;
4753
4754         if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4755                 bt_dev_err(hdev, "wrong event for mode %d",
4756                            hdev->flow_ctl_mode);
4757                 return;
4758         }
4759
4760         bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
4761                    ev->num_hndl);
4762
4763         for (i = 0; i < ev->num_hndl; i++) {
4764                 struct hci_comp_blocks_info *info = &ev->handles[i];
4765                 struct hci_conn *conn = NULL;
4766                 __u16  handle, block_count;
4767
4768                 handle = __le16_to_cpu(info->handle);
4769                 block_count = __le16_to_cpu(info->blocks);
4770
4771                 conn = __hci_conn_lookup_handle(hdev, handle);
4772                 if (!conn)
4773                         continue;
4774
4775                 conn->sent -= block_count;
4776
4777                 switch (conn->type) {
4778                 case ACL_LINK:
4779                 case AMP_LINK:
4780                         hdev->block_cnt += block_count;
4781                         if (hdev->block_cnt > hdev->num_blocks)
4782                                 hdev->block_cnt = hdev->num_blocks;
4783                         break;
4784
4785                 default:
4786                         bt_dev_err(hdev, "unknown type %d conn %p",
4787                                    conn->type, conn);
4788                         break;
4789                 }
4790         }
4791
4792         queue_work(hdev->workqueue, &hdev->tx_work);
4793 }
4794
4795 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4796                                 struct sk_buff *skb)
4797 {
4798         struct hci_ev_mode_change *ev = data;
4799         struct hci_conn *conn;
4800
4801         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4802
4803         hci_dev_lock(hdev);
4804
4805         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4806         if (conn) {
4807                 conn->mode = ev->mode;
4808
4809                 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4810                                         &conn->flags)) {
4811                         if (conn->mode == HCI_CM_ACTIVE)
4812                                 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4813                         else
4814                                 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4815                 }
4816
4817                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4818                         hci_sco_setup(conn, ev->status);
4819         }
4820
4821         hci_dev_unlock(hdev);
4822 }
4823
4824 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4825                                      struct sk_buff *skb)
4826 {
4827         struct hci_ev_pin_code_req *ev = data;
4828         struct hci_conn *conn;
4829
4830         bt_dev_dbg(hdev, "");
4831
4832         hci_dev_lock(hdev);
4833
4834         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4835         if (!conn)
4836                 goto unlock;
4837
4838         if (conn->state == BT_CONNECTED) {
4839                 hci_conn_hold(conn);
4840                 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4841                 hci_conn_drop(conn);
4842         }
4843
4844         if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4845             !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4846                 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4847                              sizeof(ev->bdaddr), &ev->bdaddr);
4848         } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4849                 u8 secure;
4850
4851                 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4852                         secure = 1;
4853                 else
4854                         secure = 0;
4855
4856                 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4857         }
4858
4859 unlock:
4860         hci_dev_unlock(hdev);
4861 }
4862
4863 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4864 {
4865         if (key_type == HCI_LK_CHANGED_COMBINATION)
4866                 return;
4867
4868         conn->pin_length = pin_len;
4869         conn->key_type = key_type;
4870
4871         switch (key_type) {
4872         case HCI_LK_LOCAL_UNIT:
4873         case HCI_LK_REMOTE_UNIT:
4874         case HCI_LK_DEBUG_COMBINATION:
4875                 return;
4876         case HCI_LK_COMBINATION:
4877                 if (pin_len == 16)
4878                         conn->pending_sec_level = BT_SECURITY_HIGH;
4879                 else
4880                         conn->pending_sec_level = BT_SECURITY_MEDIUM;
4881                 break;
4882         case HCI_LK_UNAUTH_COMBINATION_P192:
4883         case HCI_LK_UNAUTH_COMBINATION_P256:
4884                 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4885                 break;
4886         case HCI_LK_AUTH_COMBINATION_P192:
4887                 conn->pending_sec_level = BT_SECURITY_HIGH;
4888                 break;
4889         case HCI_LK_AUTH_COMBINATION_P256:
4890                 conn->pending_sec_level = BT_SECURITY_FIPS;
4891                 break;
4892         }
4893 }
4894
4895 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4896                                      struct sk_buff *skb)
4897 {
4898         struct hci_ev_link_key_req *ev = data;
4899         struct hci_cp_link_key_reply cp;
4900         struct hci_conn *conn;
4901         struct link_key *key;
4902
4903         bt_dev_dbg(hdev, "");
4904
4905         if (!hci_dev_test_flag(hdev, HCI_MGMT))
4906                 return;
4907
4908         hci_dev_lock(hdev);
4909
4910         key = hci_find_link_key(hdev, &ev->bdaddr);
4911         if (!key) {
4912                 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4913                 goto not_found;
4914         }
4915
4916         bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4917
4918         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4919         if (conn) {
4920                 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4921
4922                 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4923                      key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4924                     conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4925                         bt_dev_dbg(hdev, "ignoring unauthenticated key");
4926                         goto not_found;
4927                 }
4928
4929                 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4930                     (conn->pending_sec_level == BT_SECURITY_HIGH ||
4931                      conn->pending_sec_level == BT_SECURITY_FIPS)) {
4932                         bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4933                         goto not_found;
4934                 }
4935
4936                 conn_set_key(conn, key->type, key->pin_len);
4937         }
4938
4939         bacpy(&cp.bdaddr, &ev->bdaddr);
4940         memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4941
4942         hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4943
4944         hci_dev_unlock(hdev);
4945
4946         return;
4947
4948 not_found:
4949         hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4950         hci_dev_unlock(hdev);
4951 }
4952
4953 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4954                                     struct sk_buff *skb)
4955 {
4956         struct hci_ev_link_key_notify *ev = data;
4957         struct hci_conn *conn;
4958         struct link_key *key;
4959         bool persistent;
4960         u8 pin_len = 0;
4961
4962         bt_dev_dbg(hdev, "");
4963
4964         hci_dev_lock(hdev);
4965
4966         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4967         if (!conn)
4968                 goto unlock;
4969
4970         /* Ignore NULL link key against CVE-2020-26555 */
4971         if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4972                 bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4973                            &ev->bdaddr);
4974                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4975                 hci_conn_drop(conn);
4976                 goto unlock;
4977         }
4978
4979         hci_conn_hold(conn);
4980         conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4981         hci_conn_drop(conn);
4982
4983         set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4984         conn_set_key(conn, ev->key_type, conn->pin_length);
4985
4986         if (!hci_dev_test_flag(hdev, HCI_MGMT))
4987                 goto unlock;
4988
4989         key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4990                                 ev->key_type, pin_len, &persistent);
4991         if (!key)
4992                 goto unlock;
4993
4994         /* Update connection information since adding the key will have
4995          * fixed up the type in the case of changed combination keys.
4996          */
4997         if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4998                 conn_set_key(conn, key->type, key->pin_len);
4999
5000         mgmt_new_link_key(hdev, key, persistent);
5001
5002         /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
5003          * is set. If it's not set simply remove the key from the kernel
5004          * list (we've still notified user space about it but with
5005          * store_hint being 0).
5006          */
5007         if (key->type == HCI_LK_DEBUG_COMBINATION &&
5008             !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
5009                 list_del_rcu(&key->list);
5010                 kfree_rcu(key, rcu);
5011                 goto unlock;
5012         }
5013
5014         if (persistent)
5015                 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
5016         else
5017                 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
5018
5019 unlock:
5020         hci_dev_unlock(hdev);
5021 }
5022
5023 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
5024                                  struct sk_buff *skb)
5025 {
5026         struct hci_ev_clock_offset *ev = data;
5027         struct hci_conn *conn;
5028
5029         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5030
5031         hci_dev_lock(hdev);
5032
5033         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5034         if (conn && !ev->status) {
5035                 struct inquiry_entry *ie;
5036
5037                 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
5038                 if (ie) {
5039                         ie->data.clock_offset = ev->clock_offset;
5040                         ie->timestamp = jiffies;
5041                 }
5042         }
5043
5044         hci_dev_unlock(hdev);
5045 }
5046
5047 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
5048                                     struct sk_buff *skb)
5049 {
5050         struct hci_ev_pkt_type_change *ev = data;
5051         struct hci_conn *conn;
5052
5053         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5054
5055         hci_dev_lock(hdev);
5056
5057         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5058         if (conn && !ev->status)
5059                 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
5060
5061         hci_dev_unlock(hdev);
5062 }
5063
5064 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
5065                                    struct sk_buff *skb)
5066 {
5067         struct hci_ev_pscan_rep_mode *ev = data;
5068         struct inquiry_entry *ie;
5069
5070         bt_dev_dbg(hdev, "");
5071
5072         hci_dev_lock(hdev);
5073
5074         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5075         if (ie) {
5076                 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
5077                 ie->timestamp = jiffies;
5078         }
5079
5080         hci_dev_unlock(hdev);
5081 }
5082
5083 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
5084                                              struct sk_buff *skb)
5085 {
5086         struct hci_ev_inquiry_result_rssi *ev = edata;
5087         struct inquiry_data data;
5088         int i;
5089
5090         bt_dev_dbg(hdev, "num_rsp %d", ev->num);
5091
5092         if (!ev->num)
5093                 return;
5094
5095         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5096                 return;
5097
5098         hci_dev_lock(hdev);
5099
5100         if (skb->len == array_size(ev->num,
5101                                    sizeof(struct inquiry_info_rssi_pscan))) {
5102                 struct inquiry_info_rssi_pscan *info;
5103
5104                 for (i = 0; i < ev->num; i++) {
5105                         u32 flags;
5106
5107                         info = hci_ev_skb_pull(hdev, skb,
5108                                                HCI_EV_INQUIRY_RESULT_WITH_RSSI,
5109                                                sizeof(*info));
5110                         if (!info) {
5111                                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
5112                                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
5113                                 goto unlock;
5114                         }
5115
5116                         bacpy(&data.bdaddr, &info->bdaddr);
5117                         data.pscan_rep_mode     = info->pscan_rep_mode;
5118                         data.pscan_period_mode  = info->pscan_period_mode;
5119                         data.pscan_mode         = info->pscan_mode;
5120                         memcpy(data.dev_class, info->dev_class, 3);
5121                         data.clock_offset       = info->clock_offset;
5122                         data.rssi               = info->rssi;
5123                         data.ssp_mode           = 0x00;
5124
5125                         flags = hci_inquiry_cache_update(hdev, &data, false);
5126
5127                         mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5128                                           info->dev_class, info->rssi,
5129                                           flags, NULL, 0, NULL, 0, 0);
5130                 }
5131         } else if (skb->len == array_size(ev->num,
5132                                           sizeof(struct inquiry_info_rssi))) {
5133                 struct inquiry_info_rssi *info;
5134
5135                 for (i = 0; i < ev->num; i++) {
5136                         u32 flags;
5137
5138                         info = hci_ev_skb_pull(hdev, skb,
5139                                                HCI_EV_INQUIRY_RESULT_WITH_RSSI,
5140                                                sizeof(*info));
5141                         if (!info) {
5142                                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
5143                                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
5144                                 goto unlock;
5145                         }
5146
5147                         bacpy(&data.bdaddr, &info->bdaddr);
5148                         data.pscan_rep_mode     = info->pscan_rep_mode;
5149                         data.pscan_period_mode  = info->pscan_period_mode;
5150                         data.pscan_mode         = 0x00;
5151                         memcpy(data.dev_class, info->dev_class, 3);
5152                         data.clock_offset       = info->clock_offset;
5153                         data.rssi               = info->rssi;
5154                         data.ssp_mode           = 0x00;
5155
5156                         flags = hci_inquiry_cache_update(hdev, &data, false);
5157
5158                         mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5159                                           info->dev_class, info->rssi,
5160                                           flags, NULL, 0, NULL, 0, 0);
5161                 }
5162         } else {
5163                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
5164                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
5165         }
5166 unlock:
5167         hci_dev_unlock(hdev);
5168 }
5169
5170 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
5171                                         struct sk_buff *skb)
5172 {
5173         struct hci_ev_remote_ext_features *ev = data;
5174         struct hci_conn *conn;
5175
5176         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5177
5178         hci_dev_lock(hdev);
5179
5180         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5181         if (!conn)
5182                 goto unlock;
5183
5184         if (ev->page < HCI_MAX_PAGES)
5185                 memcpy(conn->features[ev->page], ev->features, 8);
5186
5187         if (!ev->status && ev->page == 0x01) {
5188                 struct inquiry_entry *ie;
5189
5190                 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
5191                 if (ie)
5192                         ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5193
5194                 if (ev->features[0] & LMP_HOST_SSP) {
5195                         set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5196                 } else {
5197                         /* It is mandatory by the Bluetooth specification that
5198                          * Extended Inquiry Results are only used when Secure
5199                          * Simple Pairing is enabled, but some devices violate
5200                          * this.
5201                          *
5202                          * To make these devices work, the internal SSP
5203                          * enabled flag needs to be cleared if the remote host
5204                          * features do not indicate SSP support */
5205                         clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5206                 }
5207
5208                 if (ev->features[0] & LMP_HOST_SC)
5209                         set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
5210         }
5211
5212         if (conn->state != BT_CONFIG)
5213                 goto unlock;
5214
5215         if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
5216                 struct hci_cp_remote_name_req cp;
5217                 memset(&cp, 0, sizeof(cp));
5218                 bacpy(&cp.bdaddr, &conn->dst);
5219                 cp.pscan_rep_mode = 0x02;
5220                 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
5221         } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5222                 mgmt_device_connected(hdev, conn, NULL, 0);
5223
5224         if (!hci_outgoing_auth_needed(hdev, conn)) {
5225                 conn->state = BT_CONNECTED;
5226                 hci_connect_cfm(conn, ev->status);
5227                 hci_conn_drop(conn);
5228         }
5229
5230 unlock:
5231         hci_dev_unlock(hdev);
5232 }
5233
5234 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
5235                                        struct sk_buff *skb)
5236 {
5237         struct hci_ev_sync_conn_complete *ev = data;
5238         struct hci_conn *conn;
5239         u8 status = ev->status;
5240
5241         switch (ev->link_type) {
5242         case SCO_LINK:
5243         case ESCO_LINK:
5244                 break;
5245         default:
5246                 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
5247                  * for HCI_Synchronous_Connection_Complete is limited to
5248                  * either SCO or eSCO
5249                  */
5250                 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
5251                 return;
5252         }
5253
5254         bt_dev_dbg(hdev, "status 0x%2.2x", status);
5255
5256         hci_dev_lock(hdev);
5257
5258         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
5259         if (!conn) {
5260                 if (ev->link_type == ESCO_LINK)
5261                         goto unlock;
5262
5263                 /* When the link type in the event indicates SCO connection
5264                  * and lookup of the connection object fails, then check
5265                  * if an eSCO connection object exists.
5266                  *
5267                  * The core limits the synchronous connections to either
5268                  * SCO or eSCO. The eSCO connection is preferred and tried
5269                  * to be setup first and until successfully established,
5270                  * the link type will be hinted as eSCO.
5271                  */
5272                 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
5273                 if (!conn)
5274                         goto unlock;
5275         }
5276
5277         /* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
5278          * Processing it more than once per connection can corrupt kernel memory.
5279          *
5280          * As the connection handle is set here for the first time, it indicates
5281          * whether the connection is already set up.
5282          */
5283         if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5284                 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
5285                 goto unlock;
5286         }
5287
5288         switch (status) {
5289         case 0x00:
5290                 status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
5291                 if (status) {
5292                         conn->state = BT_CLOSED;
5293                         break;
5294                 }
5295
5296                 conn->state  = BT_CONNECTED;
5297                 conn->type   = ev->link_type;
5298
5299                 hci_debugfs_create_conn(conn);
5300                 hci_conn_add_sysfs(conn);
5301                 break;
5302
5303         case 0x10:      /* Connection Accept Timeout */
5304         case 0x0d:      /* Connection Rejected due to Limited Resources */
5305         case 0x11:      /* Unsupported Feature or Parameter Value */
5306         case 0x1c:      /* SCO interval rejected */
5307         case 0x1a:      /* Unsupported Remote Feature */
5308         case 0x1e:      /* Invalid LMP Parameters */
5309         case 0x1f:      /* Unspecified error */
5310         case 0x20:      /* Unsupported LMP Parameter value */
5311                 if (conn->out) {
5312                         conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5313                                         (hdev->esco_type & EDR_ESCO_MASK);
5314                         if (hci_setup_sync(conn, conn->parent->handle))
5315                                 goto unlock;
5316                 }
5317                 fallthrough;
5318
5319         default:
5320                 conn->state = BT_CLOSED;
5321                 break;
5322         }
5323
5324         bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5325         /* Notify only in case of SCO over HCI transport data path which
5326          * is zero and non-zero value shall be non-HCI transport data path
5327          */
5328         if (conn->codec.data_path == 0 && hdev->notify) {
5329                 switch (ev->air_mode) {
5330                 case 0x02:
5331                         hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5332                         break;
5333                 case 0x03:
5334                         hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5335                         break;
5336                 }
5337         }
5338
5339         hci_connect_cfm(conn, status);
5340         if (status)
5341                 hci_conn_del(conn);
5342
5343 unlock:
5344         hci_dev_unlock(hdev);
5345 }
5346
5347 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5348 {
5349         size_t parsed = 0;
5350
5351         while (parsed < eir_len) {
5352                 u8 field_len = eir[0];
5353
5354                 if (field_len == 0)
5355                         return parsed;
5356
5357                 parsed += field_len + 1;
5358                 eir += field_len + 1;
5359         }
5360
5361         return eir_len;
5362 }
5363
5364 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5365                                             struct sk_buff *skb)
5366 {
5367         struct hci_ev_ext_inquiry_result *ev = edata;
5368         struct inquiry_data data;
5369         size_t eir_len;
5370         int i;
5371
5372         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5373                              flex_array_size(ev, info, ev->num)))
5374                 return;
5375
5376         bt_dev_dbg(hdev, "num %d", ev->num);
5377
5378         if (!ev->num)
5379                 return;
5380
5381         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5382                 return;
5383
5384         hci_dev_lock(hdev);
5385
5386         for (i = 0; i < ev->num; i++) {
5387                 struct extended_inquiry_info *info = &ev->info[i];
5388                 u32 flags;
5389                 bool name_known;
5390
5391                 bacpy(&data.bdaddr, &info->bdaddr);
5392                 data.pscan_rep_mode     = info->pscan_rep_mode;
5393                 data.pscan_period_mode  = info->pscan_period_mode;
5394                 data.pscan_mode         = 0x00;
5395                 memcpy(data.dev_class, info->dev_class, 3);
5396                 data.clock_offset       = info->clock_offset;
5397                 data.rssi               = info->rssi;
5398                 data.ssp_mode           = 0x01;
5399
5400                 if (hci_dev_test_flag(hdev, HCI_MGMT))
5401                         name_known = eir_get_data(info->data,
5402                                                   sizeof(info->data),
5403                                                   EIR_NAME_COMPLETE, NULL);
5404                 else
5405                         name_known = true;
5406
5407                 flags = hci_inquiry_cache_update(hdev, &data, name_known);
5408
5409                 eir_len = eir_get_length(info->data, sizeof(info->data));
5410
5411                 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5412                                   info->dev_class, info->rssi,
5413                                   flags, info->data, eir_len, NULL, 0, 0);
5414         }
5415
5416         hci_dev_unlock(hdev);
5417 }
5418
5419 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5420                                          struct sk_buff *skb)
5421 {
5422         struct hci_ev_key_refresh_complete *ev = data;
5423         struct hci_conn *conn;
5424
5425         bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5426                    __le16_to_cpu(ev->handle));
5427
5428         hci_dev_lock(hdev);
5429
5430         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5431         if (!conn)
5432                 goto unlock;
5433
5434         /* For BR/EDR the necessary steps are taken through the
5435          * auth_complete event.
5436          */
5437         if (conn->type != LE_LINK)
5438                 goto unlock;
5439
5440         if (!ev->status)
5441                 conn->sec_level = conn->pending_sec_level;
5442
5443         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5444
5445         if (ev->status && conn->state == BT_CONNECTED) {
5446                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5447                 hci_conn_drop(conn);
5448                 goto unlock;
5449         }
5450
5451         if (conn->state == BT_CONFIG) {
5452                 if (!ev->status)
5453                         conn->state = BT_CONNECTED;
5454
5455                 hci_connect_cfm(conn, ev->status);
5456                 hci_conn_drop(conn);
5457         } else {
5458                 hci_auth_cfm(conn, ev->status);
5459
5460                 hci_conn_hold(conn);
5461                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5462                 hci_conn_drop(conn);
5463         }
5464
5465 unlock:
5466         hci_dev_unlock(hdev);
5467 }
5468
5469 static u8 hci_get_auth_req(struct hci_conn *conn)
5470 {
5471 #ifdef TIZEN_BT
5472         if (conn->remote_auth == HCI_AT_GENERAL_BONDING_MITM) {
5473                 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5474                     conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5475                         return HCI_AT_GENERAL_BONDING_MITM;
5476         }
5477 #endif
5478
5479         /* If remote requests no-bonding follow that lead */
5480         if (conn->remote_auth == HCI_AT_NO_BONDING ||
5481             conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5482                 return conn->remote_auth | (conn->auth_type & 0x01);
5483
5484         /* If both remote and local have enough IO capabilities, require
5485          * MITM protection
5486          */
5487         if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5488             conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5489                 return conn->remote_auth | 0x01;
5490
5491         /* No MITM protection possible so ignore remote requirement */
5492         return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5493 }
5494
5495 static u8 bredr_oob_data_present(struct hci_conn *conn)
5496 {
5497         struct hci_dev *hdev = conn->hdev;
5498         struct oob_data *data;
5499
5500         data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5501         if (!data)
5502                 return 0x00;
5503
5504         if (bredr_sc_enabled(hdev)) {
5505                 /* When Secure Connections is enabled, then just
5506                  * return the present value stored with the OOB
5507                  * data. The stored value contains the right present
5508                  * information. However it can only be trusted when
5509                  * not in Secure Connection Only mode.
5510                  */
5511                 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5512                         return data->present;
5513
5514                 /* When Secure Connections Only mode is enabled, then
5515                  * the P-256 values are required. If they are not
5516                  * available, then do not declare that OOB data is
5517                  * present.
5518                  */
5519                 if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
5520                     !crypto_memneq(data->hash256, ZERO_KEY, 16))
5521                         return 0x00;
5522
5523                 return 0x02;
5524         }
5525
5526         /* When Secure Connections is not enabled or actually
5527          * not supported by the hardware, then check that if
5528          * P-192 data values are present.
5529          */
5530         if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
5531             !crypto_memneq(data->hash192, ZERO_KEY, 16))
5532                 return 0x00;
5533
5534         return 0x01;
5535 }
5536
5537 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5538                                     struct sk_buff *skb)
5539 {
5540         struct hci_ev_io_capa_request *ev = data;
5541         struct hci_conn *conn;
5542
5543         bt_dev_dbg(hdev, "");
5544
5545         hci_dev_lock(hdev);
5546
5547         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5548         if (!conn || !hci_conn_ssp_enabled(conn))
5549                 goto unlock;
5550
5551         hci_conn_hold(conn);
5552
5553         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5554                 goto unlock;
5555
5556         /* Allow pairing if we're pairable, the initiators of the
5557          * pairing or if the remote is not requesting bonding.
5558          */
5559         if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5560             test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5561             (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5562                 struct hci_cp_io_capability_reply cp;
5563
5564                 bacpy(&cp.bdaddr, &ev->bdaddr);
5565                 /* Change the IO capability from KeyboardDisplay
5566                  * to DisplayYesNo as it is not supported by BT spec. */
5567                 cp.capability = (conn->io_capability == 0x04) ?
5568                                 HCI_IO_DISPLAY_YESNO : conn->io_capability;
5569
5570                 /* If we are initiators, there is no remote information yet */
5571                 if (conn->remote_auth == 0xff) {
5572                         /* Request MITM protection if our IO caps allow it
5573                          * except for the no-bonding case.
5574                          */
5575                         if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5576                             conn->auth_type != HCI_AT_NO_BONDING)
5577                                 conn->auth_type |= 0x01;
5578                 } else {
5579                         conn->auth_type = hci_get_auth_req(conn);
5580                 }
5581
5582                 /* If we're not bondable, force one of the non-bondable
5583                  * authentication requirement values.
5584                  */
5585                 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5586                         conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5587
5588                 cp.authentication = conn->auth_type;
5589                 cp.oob_data = bredr_oob_data_present(conn);
5590
5591                 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5592                              sizeof(cp), &cp);
5593         } else {
5594                 struct hci_cp_io_capability_neg_reply cp;
5595
5596                 bacpy(&cp.bdaddr, &ev->bdaddr);
5597                 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5598
5599                 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5600                              sizeof(cp), &cp);
5601         }
5602
5603 unlock:
5604         hci_dev_unlock(hdev);
5605 }
5606
5607 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5608                                   struct sk_buff *skb)
5609 {
5610         struct hci_ev_io_capa_reply *ev = data;
5611         struct hci_conn *conn;
5612
5613         bt_dev_dbg(hdev, "");
5614
5615         hci_dev_lock(hdev);
5616
5617         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5618         if (!conn)
5619                 goto unlock;
5620
5621         conn->remote_cap = ev->capability;
5622         conn->remote_auth = ev->authentication;
5623
5624 unlock:
5625         hci_dev_unlock(hdev);
5626 }
5627
5628 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5629                                          struct sk_buff *skb)
5630 {
5631         struct hci_ev_user_confirm_req *ev = data;
5632         int loc_mitm, rem_mitm, confirm_hint = 0;
5633         struct hci_conn *conn;
5634
5635         bt_dev_dbg(hdev, "");
5636
5637         hci_dev_lock(hdev);
5638
5639         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5640                 goto unlock;
5641
5642         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5643         if (!conn)
5644                 goto unlock;
5645
5646         loc_mitm = (conn->auth_type & 0x01);
5647         rem_mitm = (conn->remote_auth & 0x01);
5648
5649         /* If we require MITM but the remote device can't provide that
5650          * (it has NoInputNoOutput) then reject the confirmation
5651          * request. We check the security level here since it doesn't
5652          * necessarily match conn->auth_type.
5653          */
5654         if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5655             conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5656                 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5657                 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5658                              sizeof(ev->bdaddr), &ev->bdaddr);
5659                 goto unlock;
5660         }
5661
5662         /* If no side requires MITM protection; auto-accept */
5663         if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5664             (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5665
5666                 /* If we're not the initiators request authorization to
5667                  * proceed from user space (mgmt_user_confirm with
5668                  * confirm_hint set to 1). The exception is if neither
5669                  * side had MITM or if the local IO capability is
5670                  * NoInputNoOutput, in which case we do auto-accept
5671                  */
5672                 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5673                     conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5674                     (loc_mitm || rem_mitm)) {
5675                         bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5676                         confirm_hint = 1;
5677                         goto confirm;
5678                 }
5679
5680                 /* If there already exists link key in local host, leave the
5681                  * decision to user space since the remote device could be
5682                  * legitimate or malicious.
5683                  */
5684                 if (hci_find_link_key(hdev, &ev->bdaddr)) {
5685                         bt_dev_dbg(hdev, "Local host already has link key");
5686                         confirm_hint = 1;
5687                         goto confirm;
5688                 }
5689
5690                 BT_DBG("Auto-accept of user confirmation with %ums delay",
5691                        hdev->auto_accept_delay);
5692
5693                 if (hdev->auto_accept_delay > 0) {
5694                         int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5695                         queue_delayed_work(conn->hdev->workqueue,
5696                                            &conn->auto_accept_work, delay);
5697                         goto unlock;
5698                 }
5699
5700                 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5701                              sizeof(ev->bdaddr), &ev->bdaddr);
5702                 goto unlock;
5703         }
5704
5705 confirm:
5706         mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5707                                   le32_to_cpu(ev->passkey), confirm_hint);
5708
5709 unlock:
5710         hci_dev_unlock(hdev);
5711 }
5712
5713 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5714                                          struct sk_buff *skb)
5715 {
5716         struct hci_ev_user_passkey_req *ev = data;
5717
5718         bt_dev_dbg(hdev, "");
5719
5720         if (hci_dev_test_flag(hdev, HCI_MGMT))
5721                 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5722 }
5723
5724 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5725                                         struct sk_buff *skb)
5726 {
5727         struct hci_ev_user_passkey_notify *ev = data;
5728         struct hci_conn *conn;
5729
5730         bt_dev_dbg(hdev, "");
5731
5732         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5733         if (!conn)
5734                 return;
5735
5736         conn->passkey_notify = __le32_to_cpu(ev->passkey);
5737         conn->passkey_entered = 0;
5738
5739         if (hci_dev_test_flag(hdev, HCI_MGMT))
5740                 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5741                                          conn->dst_type, conn->passkey_notify,
5742                                          conn->passkey_entered);
5743 }
5744
5745 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5746                                     struct sk_buff *skb)
5747 {
5748         struct hci_ev_keypress_notify *ev = data;
5749         struct hci_conn *conn;
5750
5751         bt_dev_dbg(hdev, "");
5752
5753         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5754         if (!conn)
5755                 return;
5756
5757         switch (ev->type) {
5758         case HCI_KEYPRESS_STARTED:
5759                 conn->passkey_entered = 0;
5760                 return;
5761
5762         case HCI_KEYPRESS_ENTERED:
5763                 conn->passkey_entered++;
5764                 break;
5765
5766         case HCI_KEYPRESS_ERASED:
5767                 conn->passkey_entered--;
5768                 break;
5769
5770         case HCI_KEYPRESS_CLEARED:
5771                 conn->passkey_entered = 0;
5772                 break;
5773
5774         case HCI_KEYPRESS_COMPLETED:
5775                 return;
5776         }
5777
5778         if (hci_dev_test_flag(hdev, HCI_MGMT))
5779                 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5780                                          conn->dst_type, conn->passkey_notify,
5781                                          conn->passkey_entered);
5782 }
5783
5784 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5785                                          struct sk_buff *skb)
5786 {
5787         struct hci_ev_simple_pair_complete *ev = data;
5788         struct hci_conn *conn;
5789
5790         bt_dev_dbg(hdev, "");
5791
5792         hci_dev_lock(hdev);
5793
5794         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5795         if (!conn || !hci_conn_ssp_enabled(conn))
5796                 goto unlock;
5797
5798         /* Reset the authentication requirement to unknown */
5799         conn->remote_auth = 0xff;
5800
5801         /* To avoid duplicate auth_failed events to user space we check
5802          * the HCI_CONN_AUTH_PEND flag which will be set if we
5803          * initiated the authentication. A traditional auth_complete
5804          * event gets always produced as initiator and is also mapped to
5805          * the mgmt_auth_failed event */
5806         if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5807                 mgmt_auth_failed(conn, ev->status);
5808
5809         hci_conn_drop(conn);
5810
5811 unlock:
5812         hci_dev_unlock(hdev);
5813 }
5814
5815 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5816                                          struct sk_buff *skb)
5817 {
5818         struct hci_ev_remote_host_features *ev = data;
5819         struct inquiry_entry *ie;
5820         struct hci_conn *conn;
5821
5822         bt_dev_dbg(hdev, "");
5823
5824         hci_dev_lock(hdev);
5825
5826         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5827         if (conn)
5828                 memcpy(conn->features[1], ev->features, 8);
5829
5830         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5831         if (ie)
5832                 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5833
5834         hci_dev_unlock(hdev);
5835 }
5836
5837 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5838                                             struct sk_buff *skb)
5839 {
5840         struct hci_ev_remote_oob_data_request *ev = edata;
5841         struct oob_data *data;
5842
5843         bt_dev_dbg(hdev, "");
5844
5845         hci_dev_lock(hdev);
5846
5847         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5848                 goto unlock;
5849
5850         data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5851         if (!data) {
5852                 struct hci_cp_remote_oob_data_neg_reply cp;
5853
5854                 bacpy(&cp.bdaddr, &ev->bdaddr);
5855                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5856                              sizeof(cp), &cp);
5857                 goto unlock;
5858         }
5859
5860         if (bredr_sc_enabled(hdev)) {
5861                 struct hci_cp_remote_oob_ext_data_reply cp;
5862
5863                 bacpy(&cp.bdaddr, &ev->bdaddr);
5864                 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5865                         memset(cp.hash192, 0, sizeof(cp.hash192));
5866                         memset(cp.rand192, 0, sizeof(cp.rand192));
5867                 } else {
5868                         memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5869                         memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5870                 }
5871                 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5872                 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5873
5874                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5875                              sizeof(cp), &cp);
5876         } else {
5877                 struct hci_cp_remote_oob_data_reply cp;
5878
5879                 bacpy(&cp.bdaddr, &ev->bdaddr);
5880                 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5881                 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5882
5883                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5884                              sizeof(cp), &cp);
5885         }
5886
5887 unlock:
5888         hci_dev_unlock(hdev);
5889 }
5890
5891 #if IS_ENABLED(CONFIG_BT_HS)
5892 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
5893                                   struct sk_buff *skb)
5894 {
5895         struct hci_ev_channel_selected *ev = data;
5896         struct hci_conn *hcon;
5897
5898         bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
5899
5900         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5901         if (!hcon)
5902                 return;
5903
5904         amp_read_loc_assoc_final_data(hdev, hcon);
5905 }
5906
5907 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
5908                                       struct sk_buff *skb)
5909 {
5910         struct hci_ev_phy_link_complete *ev = data;
5911         struct hci_conn *hcon, *bredr_hcon;
5912
5913         bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
5914                    ev->status);
5915
5916         hci_dev_lock(hdev);
5917
5918         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5919         if (!hcon)
5920                 goto unlock;
5921
5922         if (!hcon->amp_mgr)
5923                 goto unlock;
5924
5925         if (ev->status) {
5926                 hci_conn_del(hcon);
5927                 goto unlock;
5928         }
5929
5930         bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5931
5932         hcon->state = BT_CONNECTED;
5933         bacpy(&hcon->dst, &bredr_hcon->dst);
5934
5935         hci_conn_hold(hcon);
5936         hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5937         hci_conn_drop(hcon);
5938
5939         hci_debugfs_create_conn(hcon);
5940         hci_conn_add_sysfs(hcon);
5941
5942         amp_physical_cfm(bredr_hcon, hcon);
5943
5944 unlock:
5945         hci_dev_unlock(hdev);
5946 }
5947
5948 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
5949                                      struct sk_buff *skb)
5950 {
5951         struct hci_ev_logical_link_complete *ev = data;
5952         struct hci_conn *hcon;
5953         struct hci_chan *hchan;
5954         struct amp_mgr *mgr;
5955
5956         bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5957                    le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
5958
5959         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5960         if (!hcon)
5961                 return;
5962
5963         /* Create AMP hchan */
5964         hchan = hci_chan_create(hcon);
5965         if (!hchan)
5966                 return;
5967
5968         hchan->handle = le16_to_cpu(ev->handle);
5969         hchan->amp = true;
5970
5971         BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5972
5973         mgr = hcon->amp_mgr;
5974         if (mgr && mgr->bredr_chan) {
5975                 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5976
5977                 l2cap_chan_lock(bredr_chan);
5978
5979                 bredr_chan->conn->mtu = hdev->block_mtu;
5980                 l2cap_logical_cfm(bredr_chan, hchan, 0);
5981                 hci_conn_hold(hcon);
5982
5983                 l2cap_chan_unlock(bredr_chan);
5984         }
5985 }
5986
5987 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
5988                                              struct sk_buff *skb)
5989 {
5990         struct hci_ev_disconn_logical_link_complete *ev = data;
5991         struct hci_chan *hchan;
5992
5993         bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
5994                    le16_to_cpu(ev->handle), ev->status);
5995
5996         if (ev->status)
5997                 return;
5998
5999         hci_dev_lock(hdev);
6000
6001         hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
6002         if (!hchan || !hchan->amp)
6003                 goto unlock;
6004
6005         amp_destroy_logical_link(hchan, ev->reason);
6006
6007 unlock:
6008         hci_dev_unlock(hdev);
6009 }
6010
6011 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
6012                                              struct sk_buff *skb)
6013 {
6014         struct hci_ev_disconn_phy_link_complete *ev = data;
6015         struct hci_conn *hcon;
6016
6017         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6018
6019         if (ev->status)
6020                 return;
6021
6022         hci_dev_lock(hdev);
6023
6024         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
6025         if (hcon && hcon->type == AMP_LINK) {
6026                 hcon->state = BT_CLOSED;
6027                 hci_disconn_cfm(hcon, ev->reason);
6028                 hci_conn_del(hcon);
6029         }
6030
6031         hci_dev_unlock(hdev);
6032 }
6033 #endif
6034
6035 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
6036                                 u8 bdaddr_type, bdaddr_t *local_rpa)
6037 {
6038         if (conn->out) {
6039                 conn->dst_type = bdaddr_type;
6040                 conn->resp_addr_type = bdaddr_type;
6041                 bacpy(&conn->resp_addr, bdaddr);
6042
6043                 /* Check if the controller has set a Local RPA then it must be
6044                  * used instead or hdev->rpa.
6045                  */
6046                 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
6047                         conn->init_addr_type = ADDR_LE_DEV_RANDOM;
6048                         bacpy(&conn->init_addr, local_rpa);
6049                 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
6050                         conn->init_addr_type = ADDR_LE_DEV_RANDOM;
6051                         bacpy(&conn->init_addr, &conn->hdev->rpa);
6052                 } else {
6053                         hci_copy_identity_address(conn->hdev, &conn->init_addr,
6054                                                   &conn->init_addr_type);
6055                 }
6056         } else {
6057                 conn->resp_addr_type = conn->hdev->adv_addr_type;
6058                 /* Check if the controller has set a Local RPA then it must be
6059                  * used instead or hdev->rpa.
6060                  */
6061                 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
6062                         conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
6063                         bacpy(&conn->resp_addr, local_rpa);
6064                 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
6065                         /* In case of ext adv, resp_addr will be updated in
6066                          * Adv Terminated event.
6067                          */
6068                         if (!ext_adv_capable(conn->hdev))
6069                                 bacpy(&conn->resp_addr,
6070                                       &conn->hdev->random_addr);
6071                 } else {
6072                         bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
6073                 }
6074
6075                 conn->init_addr_type = bdaddr_type;
6076                 bacpy(&conn->init_addr, bdaddr);
6077
6078                 /* For incoming connections, set the default minimum
6079                  * and maximum connection interval. They will be used
6080                  * to check if the parameters are in range and if not
6081                  * trigger the connection update procedure.
6082                  */
6083                 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
6084                 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
6085         }
6086 }
6087
6088 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
6089                                  bdaddr_t *bdaddr, u8 bdaddr_type,
6090                                  bdaddr_t *local_rpa, u8 role, u16 handle,
6091                                  u16 interval, u16 latency,
6092                                  u16 supervision_timeout)
6093 {
6094         struct hci_conn_params *params;
6095         struct hci_conn *conn;
6096         struct smp_irk *irk;
6097         u8 addr_type;
6098
6099         hci_dev_lock(hdev);
6100
6101         /* All controllers implicitly stop advertising in the event of a
6102          * connection, so ensure that the state bit is cleared.
6103          */
6104         hci_dev_clear_flag(hdev, HCI_LE_ADV);
6105
6106         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
6107         if (!conn) {
6108                 /* In case of error status and there is no connection pending
6109                  * just unlock as there is nothing to cleanup.
6110                  */
6111                 if (status)
6112                         goto unlock;
6113
6114                 conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
6115                 if (!conn) {
6116                         bt_dev_err(hdev, "no memory for new connection");
6117                         goto unlock;
6118                 }
6119
6120                 conn->dst_type = bdaddr_type;
6121
6122                 /* If we didn't have a hci_conn object previously
6123                  * but we're in central role this must be something
6124                  * initiated using an accept list. Since accept list based
6125                  * connections are not "first class citizens" we don't
6126                  * have full tracking of them. Therefore, we go ahead
6127                  * with a "best effort" approach of determining the
6128                  * initiator address based on the HCI_PRIVACY flag.
6129                  */
6130                 if (conn->out) {
6131                         conn->resp_addr_type = bdaddr_type;
6132                         bacpy(&conn->resp_addr, bdaddr);
6133                         if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6134                                 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
6135                                 bacpy(&conn->init_addr, &hdev->rpa);
6136                         } else {
6137                                 hci_copy_identity_address(hdev,
6138                                                           &conn->init_addr,
6139                                                           &conn->init_addr_type);
6140                         }
6141                 }
6142         } else {
6143 #ifdef TIZEN_BT
6144                 /* LE auto connect */
6145                 bacpy(&conn->dst, bdaddr);
6146 #endif
6147                 cancel_delayed_work(&conn->le_conn_timeout);
6148         }
6149
6150         /* The HCI_LE_Connection_Complete event is only sent once per connection.
6151          * Processing it more than once per connection can corrupt kernel memory.
6152          *
6153          * As the connection handle is set here for the first time, it indicates
6154          * whether the connection is already set up.
6155          */
6156         if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
6157                 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
6158                 goto unlock;
6159         }
6160
6161         le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
6162
6163         /* Lookup the identity address from the stored connection
6164          * address and address type.
6165          *
6166          * When establishing connections to an identity address, the
6167          * connection procedure will store the resolvable random
6168          * address first. Now if it can be converted back into the
6169          * identity address, start using the identity address from
6170          * now on.
6171          */
6172         irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
6173         if (irk) {
6174                 bacpy(&conn->dst, &irk->bdaddr);
6175                 conn->dst_type = irk->addr_type;
6176         }
6177
6178         conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
6179
6180         /* All connection failure handling is taken care of by the
6181          * hci_conn_failed function which is triggered by the HCI
6182          * request completion callbacks used for connecting.
6183          */
6184         if (status || hci_conn_set_handle(conn, handle))
6185                 goto unlock;
6186
6187         /* Drop the connection if it has been aborted */
6188         if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
6189                 hci_conn_drop(conn);
6190                 goto unlock;
6191         }
6192
6193         if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
6194                 addr_type = BDADDR_LE_PUBLIC;
6195         else
6196                 addr_type = BDADDR_LE_RANDOM;
6197
6198         /* Drop the connection if the device is blocked */
6199         if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
6200                 hci_conn_drop(conn);
6201                 goto unlock;
6202         }
6203
6204         if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
6205                 mgmt_device_connected(hdev, conn, NULL, 0);
6206
6207         conn->sec_level = BT_SECURITY_LOW;
6208         conn->state = BT_CONFIG;
6209
6210         /* Store current advertising instance as connection advertising instance
6211          * when sotfware rotation is in use so it can be re-enabled when
6212          * disconnected.
6213          */
6214         if (!ext_adv_capable(hdev))
6215                 conn->adv_instance = hdev->cur_adv_instance;
6216
6217         conn->le_conn_interval = interval;
6218         conn->le_conn_latency = latency;
6219         conn->le_supv_timeout = supervision_timeout;
6220
6221         hci_debugfs_create_conn(conn);
6222         hci_conn_add_sysfs(conn);
6223
6224         /* The remote features procedure is defined for central
6225          * role only. So only in case of an initiated connection
6226          * request the remote features.
6227          *
6228          * If the local controller supports peripheral-initiated features
6229          * exchange, then requesting the remote features in peripheral
6230          * role is possible. Otherwise just transition into the
6231          * connected state without requesting the remote features.
6232          */
6233         if (conn->out ||
6234             (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
6235                 struct hci_cp_le_read_remote_features cp;
6236
6237                 cp.handle = __cpu_to_le16(conn->handle);
6238
6239                 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
6240                              sizeof(cp), &cp);
6241
6242                 hci_conn_hold(conn);
6243         } else {
6244                 conn->state = BT_CONNECTED;
6245                 hci_connect_cfm(conn, status);
6246         }
6247
6248         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
6249                                            conn->dst_type);
6250         if (params) {
6251                 hci_pend_le_list_del_init(params);
6252                 if (params->conn) {
6253                         hci_conn_drop(params->conn);
6254                         hci_conn_put(params->conn);
6255                         params->conn = NULL;
6256                 }
6257         }
6258
6259 unlock:
6260         hci_update_passive_scan(hdev);
6261         hci_dev_unlock(hdev);
6262 }
6263
6264 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
6265                                      struct sk_buff *skb)
6266 {
6267         struct hci_ev_le_conn_complete *ev = data;
6268
6269         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6270
6271         le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6272                              NULL, ev->role, le16_to_cpu(ev->handle),
6273                              le16_to_cpu(ev->interval),
6274                              le16_to_cpu(ev->latency),
6275                              le16_to_cpu(ev->supervision_timeout));
6276 }
6277
6278 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
6279                                          struct sk_buff *skb)
6280 {
6281         struct hci_ev_le_enh_conn_complete *ev = data;
6282
6283         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6284
6285         le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6286                              &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
6287                              le16_to_cpu(ev->interval),
6288                              le16_to_cpu(ev->latency),
6289                              le16_to_cpu(ev->supervision_timeout));
6290 }
6291
6292 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
6293                                     struct sk_buff *skb)
6294 {
6295         struct hci_evt_le_ext_adv_set_term *ev = data;
6296         struct hci_conn *conn;
6297         struct adv_info *adv, *n;
6298
6299         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6300
6301         /* The Bluetooth Core 5.3 specification clearly states that this event
6302          * shall not be sent when the Host disables the advertising set. So in
6303          * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
6304          *
6305          * When the Host disables an advertising set, all cleanup is done via
6306          * its command callback and not needed to be duplicated here.
6307          */
6308         if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
6309                 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
6310                 return;
6311         }
6312
6313         hci_dev_lock(hdev);
6314
6315         adv = hci_find_adv_instance(hdev, ev->handle);
6316
6317         if (ev->status) {
6318                 if (!adv)
6319                         goto unlock;
6320
6321                 /* Remove advertising as it has been terminated */
6322                 hci_remove_adv_instance(hdev, ev->handle);
6323                 mgmt_advertising_removed(NULL, hdev, ev->handle);
6324
6325                 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
6326                         if (adv->enabled)
6327                                 goto unlock;
6328                 }
6329
6330                 /* We are no longer advertising, clear HCI_LE_ADV */
6331                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
6332                 goto unlock;
6333         }
6334
6335         if (adv)
6336                 adv->enabled = false;
6337
6338         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
6339         if (conn) {
6340                 /* Store handle in the connection so the correct advertising
6341                  * instance can be re-enabled when disconnected.
6342                  */
6343                 conn->adv_instance = ev->handle;
6344
6345                 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
6346                     bacmp(&conn->resp_addr, BDADDR_ANY))
6347                         goto unlock;
6348
6349                 if (!ev->handle) {
6350                         bacpy(&conn->resp_addr, &hdev->random_addr);
6351                         goto unlock;
6352                 }
6353
6354                 if (adv)
6355                         bacpy(&conn->resp_addr, &adv->random_addr);
6356         }
6357
6358 unlock:
6359         hci_dev_unlock(hdev);
6360 }
6361
6362 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
6363                                             struct sk_buff *skb)
6364 {
6365         struct hci_ev_le_conn_update_complete *ev = data;
6366         struct hci_conn *conn;
6367
6368         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6369
6370         if (ev->status)
6371                 return;
6372
6373         hci_dev_lock(hdev);
6374
6375         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6376         if (conn) {
6377 #ifdef TIZEN_BT
6378                 if (ev->status) {
6379                         hci_dev_unlock(hdev);
6380                         mgmt_le_conn_update_failed(hdev, &conn->dst,
6381                                 conn->type, conn->dst_type, ev->status);
6382                         return;
6383                 }
6384 #endif
6385                 conn->le_conn_interval = le16_to_cpu(ev->interval);
6386                 conn->le_conn_latency = le16_to_cpu(ev->latency);
6387                 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
6388         }
6389
6390         hci_dev_unlock(hdev);
6391
6392 #ifdef TIZEN_BT
6393         mgmt_le_conn_updated(hdev, &conn->dst, conn->type,
6394                                 conn->dst_type, conn->le_conn_interval,
6395                                 conn->le_conn_latency, conn->le_supv_timeout);
6396 #endif
6397 }
6398
6399 /* This function requires the caller holds hdev->lock */
6400 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
6401                                               bdaddr_t *addr,
6402                                               u8 addr_type, bool addr_resolved,
6403                                               u8 adv_type)
6404 {
6405         struct hci_conn *conn;
6406         struct hci_conn_params *params;
6407
6408         /* If the event is not connectable don't proceed further */
6409         if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
6410                 return NULL;
6411
6412         /* Ignore if the device is blocked or hdev is suspended */
6413         if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
6414             hdev->suspended)
6415                 return NULL;
6416
6417         /* Most controller will fail if we try to create new connections
6418          * while we have an existing one in peripheral role.
6419          */
6420         if (hdev->conn_hash.le_num_peripheral > 0 &&
6421             (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
6422              !(hdev->le_states[3] & 0x10)))
6423                 return NULL;
6424
6425         /* If we're not connectable only connect devices that we have in
6426          * our pend_le_conns list.
6427          */
6428         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
6429                                            addr_type);
6430         if (!params)
6431                 return NULL;
6432
6433         if (!params->explicit_connect) {
6434                 switch (params->auto_connect) {
6435                 case HCI_AUTO_CONN_DIRECT:
6436                         /* Only devices advertising with ADV_DIRECT_IND are
6437                          * triggering a connection attempt. This is allowing
6438                          * incoming connections from peripheral devices.
6439                          */
6440                         if (adv_type != LE_ADV_DIRECT_IND)
6441                                 return NULL;
6442                         break;
6443                 case HCI_AUTO_CONN_ALWAYS:
6444                         /* Devices advertising with ADV_IND or ADV_DIRECT_IND
6445                          * are triggering a connection attempt. This means
6446                          * that incoming connections from peripheral device are
6447                          * accepted and also outgoing connections to peripheral
6448                          * devices are established when found.
6449                          */
6450                         break;
6451                 default:
6452                         return NULL;
6453                 }
6454         }
6455
6456         conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
6457                               BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
6458                               HCI_ROLE_MASTER);
6459         if (!IS_ERR(conn)) {
6460                 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6461                  * by higher layer that tried to connect, if no then
6462                  * store the pointer since we don't really have any
6463                  * other owner of the object besides the params that
6464                  * triggered it. This way we can abort the connection if
6465                  * the parameters get removed and keep the reference
6466                  * count consistent once the connection is established.
6467                  */
6468
6469                 if (!params->explicit_connect)
6470                         params->conn = hci_conn_get(conn);
6471
6472                 return conn;
6473         }
6474
6475         switch (PTR_ERR(conn)) {
6476         case -EBUSY:
6477                 /* If hci_connect() returns -EBUSY it means there is already
6478                  * an LE connection attempt going on. Since controllers don't
6479                  * support more than one connection attempt at the time, we
6480                  * don't consider this an error case.
6481                  */
6482                 break;
6483         default:
6484                 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6485                 return NULL;
6486         }
6487
6488         return NULL;
6489 }
6490
6491 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6492                                u8 bdaddr_type, bdaddr_t *direct_addr,
6493                                u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
6494                                bool ext_adv, bool ctl_time, u64 instant)
6495 {
6496 #ifndef TIZEN_BT
6497         struct discovery_state *d = &hdev->discovery;
6498         bool match;
6499 #endif
6500         struct smp_irk *irk;
6501         struct hci_conn *conn;
6502         bool bdaddr_resolved;
6503         u32 flags;
6504         u8 *ptr;
6505
6506         switch (type) {
6507         case LE_ADV_IND:
6508         case LE_ADV_DIRECT_IND:
6509         case LE_ADV_SCAN_IND:
6510         case LE_ADV_NONCONN_IND:
6511         case LE_ADV_SCAN_RSP:
6512                 break;
6513         default:
6514                 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6515                                        "type: 0x%02x", type);
6516                 return;
6517         }
6518
6519         if (len > max_adv_len(hdev)) {
6520                 bt_dev_err_ratelimited(hdev,
6521                                        "adv larger than maximum supported");
6522                 return;
6523         }
6524
6525         /* Find the end of the data in case the report contains padded zero
6526          * bytes at the end causing an invalid length value.
6527          *
6528          * When data is NULL, len is 0 so there is no need for extra ptr
6529          * check as 'ptr < data + 0' is already false in such case.
6530          */
6531         for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6532                 if (ptr + 1 + *ptr > data + len)
6533                         break;
6534         }
6535
6536         /* Adjust for actual length. This handles the case when remote
6537          * device is advertising with incorrect data length.
6538          */
6539         len = ptr - data;
6540
6541         /* If the direct address is present, then this report is from
6542          * a LE Direct Advertising Report event. In that case it is
6543          * important to see if the address is matching the local
6544          * controller address.
6545          */
6546         if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6547                 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6548                                                   &bdaddr_resolved);
6549
6550                 /* Only resolvable random addresses are valid for these
6551                  * kind of reports and others can be ignored.
6552                  */
6553                 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6554                         return;
6555
6556                 /* If the controller is not using resolvable random
6557                  * addresses, then this report can be ignored.
6558                  */
6559                 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6560                         return;
6561
6562                 /* If the local IRK of the controller does not match
6563                  * with the resolvable random address provided, then
6564                  * this report can be ignored.
6565                  */
6566                 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6567                         return;
6568         }
6569
6570         /* Check if we need to convert to identity address */
6571         irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6572         if (irk) {
6573                 bdaddr = &irk->bdaddr;
6574                 bdaddr_type = irk->addr_type;
6575         }
6576
6577         bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6578
6579         /* Check if we have been requested to connect to this device.
6580          *
6581          * direct_addr is set only for directed advertising reports (it is NULL
6582          * for advertising reports) and is already verified to be RPA above.
6583          */
6584         conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6585                                      type);
6586         if (!ext_adv && conn && type == LE_ADV_IND &&
6587             len <= max_adv_len(hdev)) {
6588                 /* Store report for later inclusion by
6589                  * mgmt_device_connected
6590                  */
6591                 memcpy(conn->le_adv_data, data, len);
6592                 conn->le_adv_data_len = len;
6593         }
6594
6595         if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6596                 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6597         else
6598                 flags = 0;
6599
6600         /* All scan results should be sent up for Mesh systems */
6601         if (hci_dev_test_flag(hdev, HCI_MESH)) {
6602                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6603                                   rssi, flags, data, len, NULL, 0, instant);
6604                 return;
6605         }
6606
6607         /* Passive scanning shouldn't trigger any device found events,
6608          * except for devices marked as CONN_REPORT for which we do send
6609          * device found events, or advertisement monitoring requested.
6610          */
6611         if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6612                 if (type == LE_ADV_DIRECT_IND)
6613                         return;
6614
6615 #ifndef TIZEN_BT
6616                 /* Handle all adv packet in platform */
6617                 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6618                                                bdaddr, bdaddr_type) &&
6619                     idr_is_empty(&hdev->adv_monitors_idr))
6620                         return;
6621 #endif
6622
6623 #ifdef TIZEN_BT
6624                 mgmt_le_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6625                                   rssi, flags, data, len, NULL, 0, type);
6626 #else
6627                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6628                                   rssi, flags, data, len, NULL, 0, 0);
6629 #endif
6630                 return;
6631         }
6632
6633         /* When receiving a scan response, then there is no way to
6634          * know if the remote device is connectable or not. However
6635          * since scan responses are merged with a previously seen
6636          * advertising report, the flags field from that report
6637          * will be used.
6638          *
6639          * In the unlikely case that a controller just sends a scan
6640          * response event that doesn't match the pending report, then
6641          * it is marked as a standalone SCAN_RSP.
6642          */
6643         if (type == LE_ADV_SCAN_RSP)
6644                 flags = MGMT_DEV_FOUND_SCAN_RSP;
6645
6646 #ifdef TIZEN_BT
6647         /* Disable adv ind and scan rsp merging */
6648         mgmt_le_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6649                                   rssi, flags, data, len, NULL, 0, type);
6650 #else
6651         /* If there's nothing pending either store the data from this
6652          * event or send an immediate device found event if the data
6653          * should not be stored for later.
6654          */
6655         if (!ext_adv && !has_pending_adv_report(hdev)) {
6656                 /* If the report will trigger a SCAN_REQ store it for
6657                  * later merging.
6658                  */
6659                 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6660                         store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6661                                                  rssi, flags, data, len);
6662                         return;
6663                 }
6664
6665                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6666                                   rssi, flags, data, len, NULL, 0, 0);
6667                 return;
6668         }
6669
6670         /* Check if the pending report is for the same device as the new one */
6671         match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6672                  bdaddr_type == d->last_adv_addr_type);
6673
6674         /* If the pending data doesn't match this report or this isn't a
6675          * scan response (e.g. we got a duplicate ADV_IND) then force
6676          * sending of the pending data.
6677          */
6678         if (type != LE_ADV_SCAN_RSP || !match) {
6679                 /* Send out whatever is in the cache, but skip duplicates */
6680                 if (!match)
6681                         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6682                                           d->last_adv_addr_type, NULL,
6683                                           d->last_adv_rssi, d->last_adv_flags,
6684                                           d->last_adv_data,
6685                                           d->last_adv_data_len, NULL, 0, 0);
6686
6687                 /* If the new report will trigger a SCAN_REQ store it for
6688                  * later merging.
6689                  */
6690                 if (!ext_adv && (type == LE_ADV_IND ||
6691                                  type == LE_ADV_SCAN_IND)) {
6692                         store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6693                                                  rssi, flags, data, len);
6694                         return;
6695                 }
6696
6697                 /* The advertising reports cannot be merged, so clear
6698                  * the pending report and send out a device found event.
6699                  */
6700                 clear_pending_adv_report(hdev);
6701                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6702                                   rssi, flags, data, len, NULL, 0, 0);
6703                 return;
6704         }
6705
6706         /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6707          * the new event is a SCAN_RSP. We can therefore proceed with
6708          * sending a merged device found event.
6709          */
6710         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6711                           d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6712                           d->last_adv_data, d->last_adv_data_len, data, len, 0);
6713         clear_pending_adv_report(hdev);
6714 #endif
6715 }
6716
6717 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6718                                   struct sk_buff *skb)
6719 {
6720         struct hci_ev_le_advertising_report *ev = data;
6721         u64 instant = jiffies;
6722
6723         if (!ev->num)
6724                 return;
6725
6726         hci_dev_lock(hdev);
6727
6728         while (ev->num--) {
6729                 struct hci_ev_le_advertising_info *info;
6730                 s8 rssi;
6731
6732                 info = hci_le_ev_skb_pull(hdev, skb,
6733                                           HCI_EV_LE_ADVERTISING_REPORT,
6734                                           sizeof(*info));
6735                 if (!info)
6736                         break;
6737
6738                 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6739                                         info->length + 1))
6740                         break;
6741
6742                 if (info->length <= max_adv_len(hdev)) {
6743                         rssi = info->data[info->length];
6744                         process_adv_report(hdev, info->type, &info->bdaddr,
6745                                            info->bdaddr_type, NULL, 0, rssi,
6746                                            info->data, info->length, false,
6747                                            false, instant);
6748                 } else {
6749                         bt_dev_err(hdev, "Dropping invalid advertising data");
6750                 }
6751         }
6752
6753         hci_dev_unlock(hdev);
6754 }
6755
6756 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6757 {
6758         if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6759                 switch (evt_type) {
6760                 case LE_LEGACY_ADV_IND:
6761                         return LE_ADV_IND;
6762                 case LE_LEGACY_ADV_DIRECT_IND:
6763                         return LE_ADV_DIRECT_IND;
6764                 case LE_LEGACY_ADV_SCAN_IND:
6765                         return LE_ADV_SCAN_IND;
6766                 case LE_LEGACY_NONCONN_IND:
6767                         return LE_ADV_NONCONN_IND;
6768                 case LE_LEGACY_SCAN_RSP_ADV:
6769                 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6770                         return LE_ADV_SCAN_RSP;
6771                 }
6772
6773                 goto invalid;
6774         }
6775
6776         if (evt_type & LE_EXT_ADV_CONN_IND) {
6777                 if (evt_type & LE_EXT_ADV_DIRECT_IND)
6778                         return LE_ADV_DIRECT_IND;
6779
6780                 return LE_ADV_IND;
6781         }
6782
6783         if (evt_type & LE_EXT_ADV_SCAN_RSP)
6784                 return LE_ADV_SCAN_RSP;
6785
6786         if (evt_type & LE_EXT_ADV_SCAN_IND)
6787                 return LE_ADV_SCAN_IND;
6788
6789         if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6790             evt_type & LE_EXT_ADV_DIRECT_IND)
6791                 return LE_ADV_NONCONN_IND;
6792
6793 invalid:
6794         bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6795                                evt_type);
6796
6797         return LE_ADV_INVALID;
6798 }
6799
6800 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6801                                       struct sk_buff *skb)
6802 {
6803         struct hci_ev_le_ext_adv_report *ev = data;
6804         u64 instant = jiffies;
6805
6806         if (!ev->num)
6807                 return;
6808
6809         hci_dev_lock(hdev);
6810
6811         while (ev->num--) {
6812                 struct hci_ev_le_ext_adv_info *info;
6813                 u8 legacy_evt_type;
6814                 u16 evt_type;
6815
6816                 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6817                                           sizeof(*info));
6818                 if (!info)
6819                         break;
6820
6821                 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6822                                         info->length))
6823                         break;
6824
6825                 evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
6826                 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6827                 if (legacy_evt_type != LE_ADV_INVALID) {
6828                         process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6829                                            info->bdaddr_type, NULL, 0,
6830                                            info->rssi, info->data, info->length,
6831                                            !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6832                                            false, instant);
6833                 }
6834         }
6835
6836         hci_dev_unlock(hdev);
6837 }
6838
6839 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6840 {
6841         struct hci_cp_le_pa_term_sync cp;
6842
6843         memset(&cp, 0, sizeof(cp));
6844         cp.handle = handle;
6845
6846         return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6847 }
6848
6849 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6850                                             struct sk_buff *skb)
6851 {
6852         struct hci_ev_le_pa_sync_established *ev = data;
6853         int mask = hdev->link_mode;
6854         __u8 flags = 0;
6855         struct hci_conn *pa_sync;
6856
6857         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6858
6859         hci_dev_lock(hdev);
6860
6861         hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6862
6863         mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6864         if (!(mask & HCI_LM_ACCEPT)) {
6865                 hci_le_pa_term_sync(hdev, ev->handle);
6866                 goto unlock;
6867         }
6868
6869         if (!(flags & HCI_PROTO_DEFER))
6870                 goto unlock;
6871
6872         if (ev->status) {
6873                 /* Add connection to indicate the failed PA sync event */
6874                 pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
6875                                              HCI_ROLE_SLAVE);
6876
6877                 if (!pa_sync)
6878                         goto unlock;
6879
6880                 set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
6881
6882                 /* Notify iso layer */
6883                 hci_connect_cfm(pa_sync, ev->status);
6884         }
6885
6886 unlock:
6887         hci_dev_unlock(hdev);
6888 }
6889
6890 static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
6891                                       struct sk_buff *skb)
6892 {
6893         struct hci_ev_le_per_adv_report *ev = data;
6894         int mask = hdev->link_mode;
6895         __u8 flags = 0;
6896
6897         bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
6898
6899         hci_dev_lock(hdev);
6900
6901         mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
6902         if (!(mask & HCI_LM_ACCEPT))
6903                 hci_le_pa_term_sync(hdev, ev->sync_handle);
6904
6905         hci_dev_unlock(hdev);
6906 }
6907
6908 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6909                                             struct sk_buff *skb)
6910 {
6911         struct hci_ev_le_remote_feat_complete *ev = data;
6912         struct hci_conn *conn;
6913
6914         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6915
6916         hci_dev_lock(hdev);
6917
6918         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6919         if (conn) {
6920                 if (!ev->status)
6921                         memcpy(conn->features[0], ev->features, 8);
6922
6923                 if (conn->state == BT_CONFIG) {
6924                         __u8 status;
6925
6926                         /* If the local controller supports peripheral-initiated
6927                          * features exchange, but the remote controller does
6928                          * not, then it is possible that the error code 0x1a
6929                          * for unsupported remote feature gets returned.
6930                          *
6931                          * In this specific case, allow the connection to
6932                          * transition into connected state and mark it as
6933                          * successful.
6934                          */
6935                         if (!conn->out && ev->status == 0x1a &&
6936                             (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6937                                 status = 0x00;
6938                         else
6939                                 status = ev->status;
6940
6941                         conn->state = BT_CONNECTED;
6942                         hci_connect_cfm(conn, status);
6943                         hci_conn_drop(conn);
6944                 }
6945         }
6946
6947         hci_dev_unlock(hdev);
6948 }
6949
6950 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6951                                    struct sk_buff *skb)
6952 {
6953         struct hci_ev_le_ltk_req *ev = data;
6954         struct hci_cp_le_ltk_reply cp;
6955         struct hci_cp_le_ltk_neg_reply neg;
6956         struct hci_conn *conn;
6957         struct smp_ltk *ltk;
6958
6959         bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6960
6961         hci_dev_lock(hdev);
6962
6963         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6964         if (conn == NULL)
6965                 goto not_found;
6966
6967         ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6968         if (!ltk)
6969                 goto not_found;
6970
6971         if (smp_ltk_is_sc(ltk)) {
6972                 /* With SC both EDiv and Rand are set to zero */
6973                 if (ev->ediv || ev->rand)
6974                         goto not_found;
6975         } else {
6976                 /* For non-SC keys check that EDiv and Rand match */
6977                 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6978                         goto not_found;
6979         }
6980
6981         memcpy(cp.ltk, ltk->val, ltk->enc_size);
6982         memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6983         cp.handle = cpu_to_le16(conn->handle);
6984
6985         conn->pending_sec_level = smp_ltk_sec_level(ltk);
6986
6987         conn->enc_key_size = ltk->enc_size;
6988
6989         hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6990
6991         /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6992          * temporary key used to encrypt a connection following
6993          * pairing. It is used during the Encrypted Session Setup to
6994          * distribute the keys. Later, security can be re-established
6995          * using a distributed LTK.
6996          */
6997         if (ltk->type == SMP_STK) {
6998                 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6999                 list_del_rcu(&ltk->list);
7000                 kfree_rcu(ltk, rcu);
7001         } else {
7002                 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
7003         }
7004
7005         hci_dev_unlock(hdev);
7006
7007         return;
7008
7009 not_found:
7010         neg.handle = ev->handle;
7011         hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
7012         hci_dev_unlock(hdev);
7013 }
7014
7015 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
7016                                       u8 reason)
7017 {
7018         struct hci_cp_le_conn_param_req_neg_reply cp;
7019
7020         cp.handle = cpu_to_le16(handle);
7021         cp.reason = reason;
7022
7023         hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
7024                      &cp);
7025 }
7026
7027 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
7028                                              struct sk_buff *skb)
7029 {
7030         struct hci_ev_le_remote_conn_param_req *ev = data;
7031         struct hci_cp_le_conn_param_req_reply cp;
7032         struct hci_conn *hcon;
7033         u16 handle, min, max, latency, timeout;
7034
7035         bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
7036
7037         handle = le16_to_cpu(ev->handle);
7038         min = le16_to_cpu(ev->interval_min);
7039         max = le16_to_cpu(ev->interval_max);
7040         latency = le16_to_cpu(ev->latency);
7041         timeout = le16_to_cpu(ev->timeout);
7042
7043         hcon = hci_conn_hash_lookup_handle(hdev, handle);
7044         if (!hcon || hcon->state != BT_CONNECTED)
7045                 return send_conn_param_neg_reply(hdev, handle,
7046                                                  HCI_ERROR_UNKNOWN_CONN_ID);
7047
7048         if (hci_check_conn_params(min, max, latency, timeout))
7049                 return send_conn_param_neg_reply(hdev, handle,
7050                                                  HCI_ERROR_INVALID_LL_PARAMS);
7051
7052         if (hcon->role == HCI_ROLE_MASTER) {
7053                 struct hci_conn_params *params;
7054                 u8 store_hint;
7055
7056                 hci_dev_lock(hdev);
7057
7058                 params = hci_conn_params_lookup(hdev, &hcon->dst,
7059                                                 hcon->dst_type);
7060                 if (params) {
7061                         params->conn_min_interval = min;
7062                         params->conn_max_interval = max;
7063                         params->conn_latency = latency;
7064                         params->supervision_timeout = timeout;
7065                         store_hint = 0x01;
7066                 } else {
7067                         store_hint = 0x00;
7068                 }
7069
7070                 hci_dev_unlock(hdev);
7071
7072                 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
7073                                     store_hint, min, max, latency, timeout);
7074         }
7075
7076         cp.handle = ev->handle;
7077         cp.interval_min = ev->interval_min;
7078         cp.interval_max = ev->interval_max;
7079         cp.latency = ev->latency;
7080         cp.timeout = ev->timeout;
7081         cp.min_ce_len = 0;
7082         cp.max_ce_len = 0;
7083
7084         hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
7085 }
7086
7087 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
7088                                          struct sk_buff *skb)
7089 {
7090         struct hci_ev_le_direct_adv_report *ev = data;
7091         u64 instant = jiffies;
7092         int i;
7093
7094         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
7095                                 flex_array_size(ev, info, ev->num)))
7096                 return;
7097
7098         if (!ev->num)
7099                 return;
7100
7101         hci_dev_lock(hdev);
7102
7103         for (i = 0; i < ev->num; i++) {
7104                 struct hci_ev_le_direct_adv_info *info = &ev->info[i];
7105
7106                 process_adv_report(hdev, info->type, &info->bdaddr,
7107                                    info->bdaddr_type, &info->direct_addr,
7108                                    info->direct_addr_type, info->rssi, NULL, 0,
7109                                    false, false, instant);
7110         }
7111
7112         hci_dev_unlock(hdev);
7113 }
7114
7115 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
7116                                   struct sk_buff *skb)
7117 {
7118         struct hci_ev_le_phy_update_complete *ev = data;
7119         struct hci_conn *conn;
7120
7121         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7122
7123         if (ev->status)
7124                 return;
7125
7126         hci_dev_lock(hdev);
7127
7128         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
7129         if (!conn)
7130                 goto unlock;
7131
7132         conn->le_tx_phy = ev->tx_phy;
7133         conn->le_rx_phy = ev->rx_phy;
7134
7135 unlock:
7136         hci_dev_unlock(hdev);
7137 }
7138
7139 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
7140                                         struct sk_buff *skb)
7141 {
7142         struct hci_evt_le_cis_established *ev = data;
7143         struct hci_conn *conn;
7144         struct bt_iso_qos *qos;
7145         bool pending = false;
7146         u16 handle = __le16_to_cpu(ev->handle);
7147
7148         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7149
7150         hci_dev_lock(hdev);
7151
7152         conn = hci_conn_hash_lookup_handle(hdev, handle);
7153         if (!conn) {
7154                 bt_dev_err(hdev,
7155                            "Unable to find connection with handle 0x%4.4x",
7156                            handle);
7157                 goto unlock;
7158         }
7159
7160         if (conn->type != ISO_LINK) {
7161                 bt_dev_err(hdev,
7162                            "Invalid connection link type handle 0x%4.4x",
7163                            handle);
7164                 goto unlock;
7165         }
7166
7167         qos = &conn->iso_qos;
7168
7169         pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
7170
7171         /* Convert ISO Interval (1.25 ms slots) to SDU Interval (us) */
7172         qos->ucast.in.interval = le16_to_cpu(ev->interval) * 1250;
7173         qos->ucast.out.interval = qos->ucast.in.interval;
7174
7175         switch (conn->role) {
7176         case HCI_ROLE_SLAVE:
7177                 /* Convert Transport Latency (us) to Latency (msec) */
7178                 qos->ucast.in.latency =
7179                         DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
7180                                           1000);
7181                 qos->ucast.out.latency =
7182                         DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
7183                                           1000);
7184                 qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu);
7185                 qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu);
7186                 qos->ucast.in.phy = ev->c_phy;
7187                 qos->ucast.out.phy = ev->p_phy;
7188                 break;
7189         case HCI_ROLE_MASTER:
7190                 /* Convert Transport Latency (us) to Latency (msec) */
7191                 qos->ucast.out.latency =
7192                         DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
7193                                           1000);
7194                 qos->ucast.in.latency =
7195                         DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
7196                                           1000);
7197                 qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu);
7198                 qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu);
7199                 qos->ucast.out.phy = ev->c_phy;
7200                 qos->ucast.in.phy = ev->p_phy;
7201                 break;
7202         }
7203
7204         if (!ev->status) {
7205                 conn->state = BT_CONNECTED;
7206                 hci_debugfs_create_conn(conn);
7207                 hci_conn_add_sysfs(conn);
7208                 hci_iso_setup_path(conn);
7209                 goto unlock;
7210         }
7211
7212         conn->state = BT_CLOSED;
7213         hci_connect_cfm(conn, ev->status);
7214         hci_conn_del(conn);
7215
7216 unlock:
7217         if (pending)
7218                 hci_le_create_cis_pending(hdev);
7219
7220         hci_dev_unlock(hdev);
7221 }
7222
7223 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
7224 {
7225         struct hci_cp_le_reject_cis cp;
7226
7227         memset(&cp, 0, sizeof(cp));
7228         cp.handle = handle;
7229         cp.reason = HCI_ERROR_REJ_BAD_ADDR;
7230         hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
7231 }
7232
7233 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
7234 {
7235         struct hci_cp_le_accept_cis cp;
7236
7237         memset(&cp, 0, sizeof(cp));
7238         cp.handle = handle;
7239         hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
7240 }
7241
7242 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
7243                                struct sk_buff *skb)
7244 {
7245         struct hci_evt_le_cis_req *ev = data;
7246         u16 acl_handle, cis_handle;
7247         struct hci_conn *acl, *cis;
7248         int mask;
7249         __u8 flags = 0;
7250
7251         acl_handle = __le16_to_cpu(ev->acl_handle);
7252         cis_handle = __le16_to_cpu(ev->cis_handle);
7253
7254         bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
7255                    acl_handle, cis_handle, ev->cig_id, ev->cis_id);
7256
7257         hci_dev_lock(hdev);
7258
7259         acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
7260         if (!acl)
7261                 goto unlock;
7262
7263         mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
7264         if (!(mask & HCI_LM_ACCEPT)) {
7265                 hci_le_reject_cis(hdev, ev->cis_handle);
7266                 goto unlock;
7267         }
7268
7269         cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
7270         if (!cis) {
7271                 cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE,
7272                                    cis_handle);
7273                 if (!cis) {
7274                         hci_le_reject_cis(hdev, ev->cis_handle);
7275                         goto unlock;
7276                 }
7277         }
7278
7279         cis->iso_qos.ucast.cig = ev->cig_id;
7280         cis->iso_qos.ucast.cis = ev->cis_id;
7281
7282         if (!(flags & HCI_PROTO_DEFER)) {
7283                 hci_le_accept_cis(hdev, ev->cis_handle);
7284         } else {
7285                 cis->state = BT_CONNECT2;
7286                 hci_connect_cfm(cis, 0);
7287         }
7288
7289 unlock:
7290         hci_dev_unlock(hdev);
7291 }
7292
7293 static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data)
7294 {
7295         u8 handle = PTR_UINT(data);
7296
7297         return hci_le_terminate_big_sync(hdev, handle,
7298                                          HCI_ERROR_LOCAL_HOST_TERM);
7299 }
7300
7301 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
7302                                            struct sk_buff *skb)
7303 {
7304         struct hci_evt_le_create_big_complete *ev = data;
7305         struct hci_conn *conn;
7306         __u8 i = 0;
7307
7308         BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
7309
7310         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
7311                                 flex_array_size(ev, bis_handle, ev->num_bis)))
7312                 return;
7313
7314         hci_dev_lock(hdev);
7315         rcu_read_lock();
7316
7317         /* Connect all BISes that are bound to the BIG */
7318         list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
7319                 if (bacmp(&conn->dst, BDADDR_ANY) ||
7320                     conn->type != ISO_LINK ||
7321                     conn->iso_qos.bcast.big != ev->handle)
7322                         continue;
7323
7324                 if (hci_conn_set_handle(conn,
7325                                         __le16_to_cpu(ev->bis_handle[i++])))
7326                         continue;
7327
7328                 if (!ev->status) {
7329                         conn->state = BT_CONNECTED;
7330                         set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
7331                         rcu_read_unlock();
7332                         hci_debugfs_create_conn(conn);
7333                         hci_conn_add_sysfs(conn);
7334                         hci_iso_setup_path(conn);
7335                         rcu_read_lock();
7336                         continue;
7337                 }
7338
7339                 hci_connect_cfm(conn, ev->status);
7340                 rcu_read_unlock();
7341                 hci_conn_del(conn);
7342                 rcu_read_lock();
7343         }
7344
7345         rcu_read_unlock();
7346
7347         if (!ev->status && !i)
7348                 /* If no BISes have been connected for the BIG,
7349                  * terminate. This is in case all bound connections
7350                  * have been closed before the BIG creation
7351                  * has completed.
7352                  */
7353                 hci_cmd_sync_queue(hdev, hci_iso_term_big_sync,
7354                                    UINT_PTR(ev->handle), NULL);
7355
7356         hci_dev_unlock(hdev);
7357 }
7358
7359 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
7360                                             struct sk_buff *skb)
7361 {
7362         struct hci_evt_le_big_sync_estabilished *ev = data;
7363         struct hci_conn *bis;
7364         struct hci_conn *pa_sync;
7365         int i;
7366
7367         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7368
7369         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7370                                 flex_array_size(ev, bis, ev->num_bis)))
7371                 return;
7372
7373         hci_dev_lock(hdev);
7374
7375         if (!ev->status) {
7376                 pa_sync = hci_conn_hash_lookup_pa_sync_big_handle(hdev, ev->handle);
7377                 if (pa_sync)
7378                         /* Also mark the BIG sync established event on the
7379                          * associated PA sync hcon
7380                          */
7381                         set_bit(HCI_CONN_BIG_SYNC, &pa_sync->flags);
7382         }
7383
7384         for (i = 0; i < ev->num_bis; i++) {
7385                 u16 handle = le16_to_cpu(ev->bis[i]);
7386                 __le32 interval;
7387
7388                 bis = hci_conn_hash_lookup_handle(hdev, handle);
7389                 if (!bis) {
7390                         bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
7391                                            HCI_ROLE_SLAVE, handle);
7392                         if (!bis)
7393                                 continue;
7394                 }
7395
7396                 if (ev->status != 0x42)
7397                         /* Mark PA sync as established */
7398                         set_bit(HCI_CONN_PA_SYNC, &bis->flags);
7399
7400                 bis->iso_qos.bcast.big = ev->handle;
7401                 memset(&interval, 0, sizeof(interval));
7402                 memcpy(&interval, ev->latency, sizeof(ev->latency));
7403                 bis->iso_qos.bcast.in.interval = le32_to_cpu(interval);
7404                 /* Convert ISO Interval (1.25 ms slots) to latency (ms) */
7405                 bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
7406                 bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
7407
7408                 if (!ev->status) {
7409                         set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
7410                         hci_iso_setup_path(bis);
7411                 }
7412         }
7413
7414         /* In case BIG sync failed, notify each failed connection to
7415          * the user after all hci connections have been added
7416          */
7417         if (ev->status)
7418                 for (i = 0; i < ev->num_bis; i++) {
7419                         u16 handle = le16_to_cpu(ev->bis[i]);
7420
7421                         bis = hci_conn_hash_lookup_handle(hdev, handle);
7422
7423                         set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags);
7424                         hci_connect_cfm(bis, ev->status);
7425                 }
7426
7427         hci_dev_unlock(hdev);
7428 }
7429
7430 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7431                                            struct sk_buff *skb)
7432 {
7433         struct hci_evt_le_big_info_adv_report *ev = data;
7434         int mask = hdev->link_mode;
7435         __u8 flags = 0;
7436         struct hci_conn *pa_sync;
7437
7438         bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7439
7440         hci_dev_lock(hdev);
7441
7442         mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
7443         if (!(mask & HCI_LM_ACCEPT)) {
7444                 hci_le_pa_term_sync(hdev, ev->sync_handle);
7445                 goto unlock;
7446         }
7447
7448         if (!(flags & HCI_PROTO_DEFER))
7449                 goto unlock;
7450
7451         pa_sync = hci_conn_hash_lookup_pa_sync_handle
7452                         (hdev,
7453                         le16_to_cpu(ev->sync_handle));
7454
7455         if (pa_sync)
7456                 goto unlock;
7457
7458         /* Add connection to indicate the PA sync event */
7459         pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
7460                                      HCI_ROLE_SLAVE);
7461
7462         if (!pa_sync)
7463                 goto unlock;
7464
7465         pa_sync->sync_handle = le16_to_cpu(ev->sync_handle);
7466         set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags);
7467
7468         /* Notify iso layer */
7469         hci_connect_cfm(pa_sync, 0x00);
7470
7471 unlock:
7472         hci_dev_unlock(hdev);
7473 }
7474
7475 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7476 [_op] = { \
7477         .func = _func, \
7478         .min_len = _min_len, \
7479         .max_len = _max_len, \
7480 }
7481
7482 #define HCI_LE_EV(_op, _func, _len) \
7483         HCI_LE_EV_VL(_op, _func, _len, _len)
7484
7485 #define HCI_LE_EV_STATUS(_op, _func) \
7486         HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7487
7488 /* Entries in this table shall have their position according to the subevent
7489  * opcode they handle so the use of the macros above is recommend since it does
7490  * attempt to initialize at its proper index using Designated Initializers that
7491  * way events without a callback function can be ommited.
7492  */
7493 static const struct hci_le_ev {
7494         void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7495         u16  min_len;
7496         u16  max_len;
7497 } hci_le_ev_table[U8_MAX + 1] = {
7498         /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7499         HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7500                   sizeof(struct hci_ev_le_conn_complete)),
7501         /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7502         HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7503                      sizeof(struct hci_ev_le_advertising_report),
7504                      HCI_MAX_EVENT_SIZE),
7505         /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7506         HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7507                   hci_le_conn_update_complete_evt,
7508                   sizeof(struct hci_ev_le_conn_update_complete)),
7509         /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7510         HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7511                   hci_le_remote_feat_complete_evt,
7512                   sizeof(struct hci_ev_le_remote_feat_complete)),
7513         /* [0x05 = HCI_EV_LE_LTK_REQ] */
7514         HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7515                   sizeof(struct hci_ev_le_ltk_req)),
7516         /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7517         HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7518                   hci_le_remote_conn_param_req_evt,
7519                   sizeof(struct hci_ev_le_remote_conn_param_req)),
7520         /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7521         HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7522                   hci_le_enh_conn_complete_evt,
7523                   sizeof(struct hci_ev_le_enh_conn_complete)),
7524         /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7525         HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7526                      sizeof(struct hci_ev_le_direct_adv_report),
7527                      HCI_MAX_EVENT_SIZE),
7528         /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7529         HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7530                   sizeof(struct hci_ev_le_phy_update_complete)),
7531         /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7532         HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7533                      sizeof(struct hci_ev_le_ext_adv_report),
7534                      HCI_MAX_EVENT_SIZE),
7535         /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7536         HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7537                   hci_le_pa_sync_estabilished_evt,
7538                   sizeof(struct hci_ev_le_pa_sync_established)),
7539         /* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */
7540         HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT,
7541                                  hci_le_per_adv_report_evt,
7542                                  sizeof(struct hci_ev_le_per_adv_report),
7543                                  HCI_MAX_EVENT_SIZE),
7544         /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7545         HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7546                   sizeof(struct hci_evt_le_ext_adv_set_term)),
7547         /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7548         HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7549                   sizeof(struct hci_evt_le_cis_established)),
7550         /* [0x1a = HCI_EVT_LE_CIS_REQ] */
7551         HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7552                   sizeof(struct hci_evt_le_cis_req)),
7553         /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7554         HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7555                      hci_le_create_big_complete_evt,
7556                      sizeof(struct hci_evt_le_create_big_complete),
7557                      HCI_MAX_EVENT_SIZE),
7558         /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7559         HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7560                      hci_le_big_sync_established_evt,
7561                      sizeof(struct hci_evt_le_big_sync_estabilished),
7562                      HCI_MAX_EVENT_SIZE),
7563         /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7564         HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7565                      hci_le_big_info_adv_report_evt,
7566                      sizeof(struct hci_evt_le_big_info_adv_report),
7567                      HCI_MAX_EVENT_SIZE),
7568 };
7569
7570 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7571                             struct sk_buff *skb, u16 *opcode, u8 *status,
7572                             hci_req_complete_t *req_complete,
7573                             hci_req_complete_skb_t *req_complete_skb)
7574 {
7575         struct hci_ev_le_meta *ev = data;
7576         const struct hci_le_ev *subev;
7577
7578         bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7579
7580         /* Only match event if command OGF is for LE */
7581         if (hdev->sent_cmd &&
7582             hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
7583             hci_skb_event(hdev->sent_cmd) == ev->subevent) {
7584                 *opcode = hci_skb_opcode(hdev->sent_cmd);
7585                 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7586                                      req_complete_skb);
7587         }
7588
7589         subev = &hci_le_ev_table[ev->subevent];
7590         if (!subev->func)
7591                 return;
7592
7593         if (skb->len < subev->min_len) {
7594                 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7595                            ev->subevent, skb->len, subev->min_len);
7596                 return;
7597         }
7598
7599         /* Just warn if the length is over max_len size it still be
7600          * possible to partially parse the event so leave to callback to
7601          * decide if that is acceptable.
7602          */
7603         if (skb->len > subev->max_len)
7604                 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7605                             ev->subevent, skb->len, subev->max_len);
7606         data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7607         if (!data)
7608                 return;
7609
7610         subev->func(hdev, data, skb);
7611 }
7612
7613 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7614                                  u8 event, struct sk_buff *skb)
7615 {
7616         struct hci_ev_cmd_complete *ev;
7617         struct hci_event_hdr *hdr;
7618
7619         if (!skb)
7620                 return false;
7621
7622         hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7623         if (!hdr)
7624                 return false;
7625
7626         if (event) {
7627                 if (hdr->evt != event)
7628                         return false;
7629                 return true;
7630         }
7631
7632         /* Check if request ended in Command Status - no way to retrieve
7633          * any extra parameters in this case.
7634          */
7635         if (hdr->evt == HCI_EV_CMD_STATUS)
7636                 return false;
7637
7638         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7639                 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7640                            hdr->evt);
7641                 return false;
7642         }
7643
7644         ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7645         if (!ev)
7646                 return false;
7647
7648         if (opcode != __le16_to_cpu(ev->opcode)) {
7649                 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7650                        __le16_to_cpu(ev->opcode));
7651                 return false;
7652         }
7653
7654         return true;
7655 }
7656
7657 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7658                                   struct sk_buff *skb)
7659 {
7660         struct hci_ev_le_advertising_info *adv;
7661         struct hci_ev_le_direct_adv_info *direct_adv;
7662         struct hci_ev_le_ext_adv_info *ext_adv;
7663         const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7664         const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7665
7666         hci_dev_lock(hdev);
7667
7668         /* If we are currently suspended and this is the first BT event seen,
7669          * save the wake reason associated with the event.
7670          */
7671         if (!hdev->suspended || hdev->wake_reason)
7672                 goto unlock;
7673
7674         /* Default to remote wake. Values for wake_reason are documented in the
7675          * Bluez mgmt api docs.
7676          */
7677         hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7678
7679         /* Once configured for remote wakeup, we should only wake up for
7680          * reconnections. It's useful to see which device is waking us up so
7681          * keep track of the bdaddr of the connection event that woke us up.
7682          */
7683         if (event == HCI_EV_CONN_REQUEST) {
7684                 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7685                 hdev->wake_addr_type = BDADDR_BREDR;
7686         } else if (event == HCI_EV_CONN_COMPLETE) {
7687                 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7688                 hdev->wake_addr_type = BDADDR_BREDR;
7689         } else if (event == HCI_EV_LE_META) {
7690                 struct hci_ev_le_meta *le_ev = (void *)skb->data;
7691                 u8 subevent = le_ev->subevent;
7692                 u8 *ptr = &skb->data[sizeof(*le_ev)];
7693                 u8 num_reports = *ptr;
7694
7695                 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7696                      subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7697                      subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7698                     num_reports) {
7699                         adv = (void *)(ptr + 1);
7700                         direct_adv = (void *)(ptr + 1);
7701                         ext_adv = (void *)(ptr + 1);
7702
7703                         switch (subevent) {
7704                         case HCI_EV_LE_ADVERTISING_REPORT:
7705                                 bacpy(&hdev->wake_addr, &adv->bdaddr);
7706                                 hdev->wake_addr_type = adv->bdaddr_type;
7707                                 break;
7708                         case HCI_EV_LE_DIRECT_ADV_REPORT:
7709                                 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7710                                 hdev->wake_addr_type = direct_adv->bdaddr_type;
7711                                 break;
7712                         case HCI_EV_LE_EXT_ADV_REPORT:
7713                                 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7714                                 hdev->wake_addr_type = ext_adv->bdaddr_type;
7715                                 break;
7716                         }
7717                 }
7718         } else {
7719                 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7720         }
7721
7722 unlock:
7723         hci_dev_unlock(hdev);
7724 }
7725
7726 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7727 [_op] = { \
7728         .req = false, \
7729         .func = _func, \
7730         .min_len = _min_len, \
7731         .max_len = _max_len, \
7732 }
7733
7734 #define HCI_EV(_op, _func, _len) \
7735         HCI_EV_VL(_op, _func, _len, _len)
7736
7737 #define HCI_EV_STATUS(_op, _func) \
7738         HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7739
7740 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7741 [_op] = { \
7742         .req = true, \
7743         .func_req = _func, \
7744         .min_len = _min_len, \
7745         .max_len = _max_len, \
7746 }
7747
7748 #define HCI_EV_REQ(_op, _func, _len) \
7749         HCI_EV_REQ_VL(_op, _func, _len, _len)
7750
7751 /* Entries in this table shall have their position according to the event opcode
7752  * they handle so the use of the macros above is recommend since it does attempt
7753  * to initialize at its proper index using Designated Initializers that way
7754  * events without a callback function don't have entered.
7755  */
7756 static const struct hci_ev {
7757         bool req;
7758         union {
7759                 void (*func)(struct hci_dev *hdev, void *data,
7760                              struct sk_buff *skb);
7761                 void (*func_req)(struct hci_dev *hdev, void *data,
7762                                  struct sk_buff *skb, u16 *opcode, u8 *status,
7763                                  hci_req_complete_t *req_complete,
7764                                  hci_req_complete_skb_t *req_complete_skb);
7765         };
7766         u16  min_len;
7767         u16  max_len;
7768 } hci_ev_table[U8_MAX + 1] = {
7769         /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7770         HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7771         /* [0x02 = HCI_EV_INQUIRY_RESULT] */
7772         HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7773                   sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7774         /* [0x03 = HCI_EV_CONN_COMPLETE] */
7775         HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7776                sizeof(struct hci_ev_conn_complete)),
7777         /* [0x04 = HCI_EV_CONN_REQUEST] */
7778         HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7779                sizeof(struct hci_ev_conn_request)),
7780         /* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7781         HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7782                sizeof(struct hci_ev_disconn_complete)),
7783         /* [0x06 = HCI_EV_AUTH_COMPLETE] */
7784         HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7785                sizeof(struct hci_ev_auth_complete)),
7786         /* [0x07 = HCI_EV_REMOTE_NAME] */
7787         HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7788                sizeof(struct hci_ev_remote_name)),
7789         /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7790         HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7791                sizeof(struct hci_ev_encrypt_change)),
7792         /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7793         HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7794                hci_change_link_key_complete_evt,
7795                sizeof(struct hci_ev_change_link_key_complete)),
7796         /* [0x0b = HCI_EV_REMOTE_FEATURES] */
7797         HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7798                sizeof(struct hci_ev_remote_features)),
7799         /* [0x0e = HCI_EV_CMD_COMPLETE] */
7800         HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7801                       sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7802         /* [0x0f = HCI_EV_CMD_STATUS] */
7803         HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7804                    sizeof(struct hci_ev_cmd_status)),
7805         /* [0x10 = HCI_EV_CMD_STATUS] */
7806         HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7807                sizeof(struct hci_ev_hardware_error)),
7808         /* [0x12 = HCI_EV_ROLE_CHANGE] */
7809         HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7810                sizeof(struct hci_ev_role_change)),
7811         /* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7812         HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7813                   sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7814         /* [0x14 = HCI_EV_MODE_CHANGE] */
7815         HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7816                sizeof(struct hci_ev_mode_change)),
7817         /* [0x16 = HCI_EV_PIN_CODE_REQ] */
7818         HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7819                sizeof(struct hci_ev_pin_code_req)),
7820         /* [0x17 = HCI_EV_LINK_KEY_REQ] */
7821         HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7822                sizeof(struct hci_ev_link_key_req)),
7823         /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7824         HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7825                sizeof(struct hci_ev_link_key_notify)),
7826         /* [0x1c = HCI_EV_CLOCK_OFFSET] */
7827         HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7828                sizeof(struct hci_ev_clock_offset)),
7829         /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7830         HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7831                sizeof(struct hci_ev_pkt_type_change)),
7832         /* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7833         HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7834                sizeof(struct hci_ev_pscan_rep_mode)),
7835         /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7836         HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7837                   hci_inquiry_result_with_rssi_evt,
7838                   sizeof(struct hci_ev_inquiry_result_rssi),
7839                   HCI_MAX_EVENT_SIZE),
7840         /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7841         HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7842                sizeof(struct hci_ev_remote_ext_features)),
7843         /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7844         HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7845                sizeof(struct hci_ev_sync_conn_complete)),
7846         /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7847         HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7848                   hci_extended_inquiry_result_evt,
7849                   sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7850         /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7851         HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7852                sizeof(struct hci_ev_key_refresh_complete)),
7853         /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7854         HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7855                sizeof(struct hci_ev_io_capa_request)),
7856         /* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7857         HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7858                sizeof(struct hci_ev_io_capa_reply)),
7859         /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7860         HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7861                sizeof(struct hci_ev_user_confirm_req)),
7862         /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7863         HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7864                sizeof(struct hci_ev_user_passkey_req)),
7865         /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7866         HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7867                sizeof(struct hci_ev_remote_oob_data_request)),
7868         /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7869         HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7870                sizeof(struct hci_ev_simple_pair_complete)),
7871         /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7872         HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7873                sizeof(struct hci_ev_user_passkey_notify)),
7874         /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7875         HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7876                sizeof(struct hci_ev_keypress_notify)),
7877         /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7878         HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7879                sizeof(struct hci_ev_remote_host_features)),
7880         /* [0x3e = HCI_EV_LE_META] */
7881         HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7882                       sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7883 #if IS_ENABLED(CONFIG_BT_HS)
7884         /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
7885         HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
7886                sizeof(struct hci_ev_phy_link_complete)),
7887         /* [0x41 = HCI_EV_CHANNEL_SELECTED] */
7888         HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
7889                sizeof(struct hci_ev_channel_selected)),
7890         /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
7891         HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
7892                hci_disconn_loglink_complete_evt,
7893                sizeof(struct hci_ev_disconn_logical_link_complete)),
7894         /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
7895         HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
7896                sizeof(struct hci_ev_logical_link_complete)),
7897         /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
7898         HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
7899                hci_disconn_phylink_complete_evt,
7900                sizeof(struct hci_ev_disconn_phy_link_complete)),
7901 #endif
7902         /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
7903         HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
7904                sizeof(struct hci_ev_num_comp_blocks)),
7905 #ifdef TIZEN_BT
7906         /* [0xFF = HCI_EV_VENDOR_SPECIFIC] */
7907         HCI_EV(HCI_EV_VENDOR_SPECIFIC, hci_vendor_specific_evt,
7908                sizeof(struct hci_ev_vendor_specific)),
7909 #else
7910         /* [0xff = HCI_EV_VENDOR] */
7911         HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7912 #endif
7913 };
7914
7915 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7916                            u16 *opcode, u8 *status,
7917                            hci_req_complete_t *req_complete,
7918                            hci_req_complete_skb_t *req_complete_skb)
7919 {
7920         const struct hci_ev *ev = &hci_ev_table[event];
7921         void *data;
7922
7923         if (!ev->func)
7924                 return;
7925
7926         if (skb->len < ev->min_len) {
7927                 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7928                            event, skb->len, ev->min_len);
7929                 return;
7930         }
7931
7932         /* Just warn if the length is over max_len size it still be
7933          * possible to partially parse the event so leave to callback to
7934          * decide if that is acceptable.
7935          */
7936         if (skb->len > ev->max_len)
7937                 bt_dev_warn_ratelimited(hdev,
7938                                         "unexpected event 0x%2.2x length: %u > %u",
7939                                         event, skb->len, ev->max_len);
7940
7941         data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7942         if (!data)
7943                 return;
7944
7945         if (ev->req)
7946                 ev->func_req(hdev, data, skb, opcode, status, req_complete,
7947                              req_complete_skb);
7948         else
7949                 ev->func(hdev, data, skb);
7950 }
7951
7952 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7953 {
7954         struct hci_event_hdr *hdr = (void *) skb->data;
7955         hci_req_complete_t req_complete = NULL;
7956         hci_req_complete_skb_t req_complete_skb = NULL;
7957         struct sk_buff *orig_skb = NULL;
7958         u8 status = 0, event, req_evt = 0;
7959         u16 opcode = HCI_OP_NOP;
7960
7961         if (skb->len < sizeof(*hdr)) {
7962                 bt_dev_err(hdev, "Malformed HCI Event");
7963                 goto done;
7964         }
7965
7966         kfree_skb(hdev->recv_event);
7967         hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7968
7969         event = hdr->evt;
7970         if (!event) {
7971                 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7972                             event);
7973                 goto done;
7974         }
7975
7976         /* Only match event if command OGF is not for LE */
7977         if (hdev->sent_cmd &&
7978             hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
7979             hci_skb_event(hdev->sent_cmd) == event) {
7980                 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
7981                                      status, &req_complete, &req_complete_skb);
7982                 req_evt = event;
7983         }
7984
7985         /* If it looks like we might end up having to call
7986          * req_complete_skb, store a pristine copy of the skb since the
7987          * various handlers may modify the original one through
7988          * skb_pull() calls, etc.
7989          */
7990         if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7991             event == HCI_EV_CMD_COMPLETE)
7992                 orig_skb = skb_clone(skb, GFP_KERNEL);
7993
7994         skb_pull(skb, HCI_EVENT_HDR_SIZE);
7995
7996         /* Store wake reason if we're suspended */
7997         hci_store_wake_reason(hdev, event, skb);
7998
7999         bt_dev_dbg(hdev, "event 0x%2.2x", event);
8000
8001         hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
8002                        &req_complete_skb);
8003
8004         if (req_complete) {
8005                 req_complete(hdev, status, opcode);
8006         } else if (req_complete_skb) {
8007                 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
8008                         kfree_skb(orig_skb);
8009                         orig_skb = NULL;
8010                 }
8011                 req_complete_skb(hdev, status, opcode, orig_skb);
8012         }
8013
8014 done:
8015         kfree_skb(orig_skb);
8016         kfree_skb(skb);
8017         hdev->stat.evt_rx++;
8018 }