Bluetooth: Enable inquiry and page scan
[platform/kernel/linux-starfive.git] / net / bluetooth / hci_event.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023 NXP
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI event handling. */
27
28 #include <asm/unaligned.h>
29 #include <linux/crypto.h>
30 #include <crypto/algapi.h>
31
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "hci_debugfs.h"
38 #include "hci_codec.h"
39 #include "a2mp.h"
40 #include "amp.h"
41 #include "smp.h"
42 #include "msft.h"
43 #include "eir.h"
44
45 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
46                  "\x00\x00\x00\x00\x00\x00\x00\x00"
47
48 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
49
50 /* Handle HCI Event packets */
51
52 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
53                              u8 ev, size_t len)
54 {
55         void *data;
56
57         data = skb_pull_data(skb, len);
58         if (!data)
59                 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
60
61         return data;
62 }
63
64 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
65                              u16 op, size_t len)
66 {
67         void *data;
68
69         data = skb_pull_data(skb, len);
70         if (!data)
71                 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
72
73         return data;
74 }
75
76 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
77                                 u8 ev, size_t len)
78 {
79         void *data;
80
81         data = skb_pull_data(skb, len);
82         if (!data)
83                 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
84
85         return data;
86 }
87
88 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
89                                 struct sk_buff *skb)
90 {
91         struct hci_ev_status *rp = data;
92
93         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
94
95         /* It is possible that we receive Inquiry Complete event right
96          * before we receive Inquiry Cancel Command Complete event, in
97          * which case the latter event should have status of Command
98          * Disallowed (0x0c). This should not be treated as error, since
99          * we actually achieve what Inquiry Cancel wants to achieve,
100          * which is to end the last Inquiry session.
101          */
102         if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
103                 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
104                 rp->status = 0x00;
105         }
106
107         if (rp->status)
108                 return rp->status;
109
110         clear_bit(HCI_INQUIRY, &hdev->flags);
111         smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
112         wake_up_bit(&hdev->flags, HCI_INQUIRY);
113
114         hci_dev_lock(hdev);
115         /* Set discovery state to stopped if we're not doing LE active
116          * scanning.
117          */
118         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
119             hdev->le_scan_type != LE_SCAN_ACTIVE)
120                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
121         hci_dev_unlock(hdev);
122
123         hci_conn_check_pending(hdev);
124
125         return rp->status;
126 }
127
128 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
129                               struct sk_buff *skb)
130 {
131         struct hci_ev_status *rp = data;
132
133         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
134
135         if (rp->status)
136                 return rp->status;
137
138         hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
139
140         return rp->status;
141 }
142
143 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
144                                    struct sk_buff *skb)
145 {
146         struct hci_ev_status *rp = data;
147
148         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
149
150         if (rp->status)
151                 return rp->status;
152
153         hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
154
155         hci_conn_check_pending(hdev);
156
157         return rp->status;
158 }
159
160 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
161                                         struct sk_buff *skb)
162 {
163         struct hci_ev_status *rp = data;
164
165         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
166
167         return rp->status;
168 }
169
170 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
171                                 struct sk_buff *skb)
172 {
173         struct hci_rp_role_discovery *rp = data;
174         struct hci_conn *conn;
175
176         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
177
178         if (rp->status)
179                 return rp->status;
180
181         hci_dev_lock(hdev);
182
183         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
184         if (conn)
185                 conn->role = rp->role;
186
187         hci_dev_unlock(hdev);
188
189         return rp->status;
190 }
191
192 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
193                                   struct sk_buff *skb)
194 {
195         struct hci_rp_read_link_policy *rp = data;
196         struct hci_conn *conn;
197
198         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
199
200         if (rp->status)
201                 return rp->status;
202
203         hci_dev_lock(hdev);
204
205         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
206         if (conn)
207                 conn->link_policy = __le16_to_cpu(rp->policy);
208
209         hci_dev_unlock(hdev);
210
211         return rp->status;
212 }
213
214 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
215                                    struct sk_buff *skb)
216 {
217         struct hci_rp_write_link_policy *rp = data;
218         struct hci_conn *conn;
219         void *sent;
220 #ifdef TIZEN_BT
221         struct hci_cp_write_link_policy cp;
222         struct hci_conn *sco_conn;
223 #endif
224
225         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
226
227         if (rp->status)
228                 return rp->status;
229
230         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
231         if (!sent)
232                 return rp->status;
233
234         hci_dev_lock(hdev);
235
236         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
237         if (conn)
238                 conn->link_policy = get_unaligned_le16(sent + 2);
239
240 #ifdef TIZEN_BT
241         sco_conn = hci_conn_hash_lookup_sco(hdev);
242         if (sco_conn && bacmp(&sco_conn->dst, &conn->dst) == 0 &&
243             conn->link_policy & HCI_LP_SNIFF) {
244                 BT_ERR("SNIFF is not allowed during sco connection");
245                 cp.handle = __cpu_to_le16(conn->handle);
246                 cp.policy = __cpu_to_le16(conn->link_policy & ~HCI_LP_SNIFF);
247                 hci_send_cmd(hdev, HCI_OP_WRITE_LINK_POLICY, sizeof(cp), &cp);
248         }
249 #endif
250
251         hci_dev_unlock(hdev);
252
253         return rp->status;
254 }
255
256 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
257                                       struct sk_buff *skb)
258 {
259         struct hci_rp_read_def_link_policy *rp = data;
260
261         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
262
263         if (rp->status)
264                 return rp->status;
265
266         hdev->link_policy = __le16_to_cpu(rp->policy);
267
268         return rp->status;
269 }
270
271 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
272                                        struct sk_buff *skb)
273 {
274         struct hci_ev_status *rp = data;
275         void *sent;
276
277         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
278
279         if (rp->status)
280                 return rp->status;
281
282         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
283         if (!sent)
284                 return rp->status;
285
286         hdev->link_policy = get_unaligned_le16(sent);
287
288         return rp->status;
289 }
290
291 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
292 {
293         struct hci_ev_status *rp = data;
294
295         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
296
297         clear_bit(HCI_RESET, &hdev->flags);
298
299         if (rp->status)
300                 return rp->status;
301
302         /* Reset all non-persistent flags */
303         hci_dev_clear_volatile_flags(hdev);
304
305         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
306
307         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
308         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
309
310         memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
311         hdev->adv_data_len = 0;
312
313         memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
314         hdev->scan_rsp_data_len = 0;
315
316         hdev->le_scan_type = LE_SCAN_PASSIVE;
317
318         hdev->ssp_debug_mode = 0;
319
320         hci_bdaddr_list_clear(&hdev->le_accept_list);
321         hci_bdaddr_list_clear(&hdev->le_resolv_list);
322
323         return rp->status;
324 }
325
326 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
327                                       struct sk_buff *skb)
328 {
329         struct hci_rp_read_stored_link_key *rp = data;
330         struct hci_cp_read_stored_link_key *sent;
331
332         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
333
334         sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
335         if (!sent)
336                 return rp->status;
337
338         if (!rp->status && sent->read_all == 0x01) {
339                 hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
340                 hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
341         }
342
343         return rp->status;
344 }
345
346 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
347                                         struct sk_buff *skb)
348 {
349         struct hci_rp_delete_stored_link_key *rp = data;
350         u16 num_keys;
351
352         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
353
354         if (rp->status)
355                 return rp->status;
356
357         num_keys = le16_to_cpu(rp->num_keys);
358
359         if (num_keys <= hdev->stored_num_keys)
360                 hdev->stored_num_keys -= num_keys;
361         else
362                 hdev->stored_num_keys = 0;
363
364         return rp->status;
365 }
366
367 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
368                                   struct sk_buff *skb)
369 {
370         struct hci_ev_status *rp = data;
371         void *sent;
372
373         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
374
375         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
376         if (!sent)
377                 return rp->status;
378
379         hci_dev_lock(hdev);
380
381         if (hci_dev_test_flag(hdev, HCI_MGMT))
382                 mgmt_set_local_name_complete(hdev, sent, rp->status);
383         else if (!rp->status)
384                 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
385
386         hci_dev_unlock(hdev);
387
388         return rp->status;
389 }
390
391 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
392                                  struct sk_buff *skb)
393 {
394         struct hci_rp_read_local_name *rp = data;
395
396         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
397
398         if (rp->status)
399                 return rp->status;
400
401         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
402             hci_dev_test_flag(hdev, HCI_CONFIG))
403                 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
404
405         return rp->status;
406 }
407
408 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
409                                    struct sk_buff *skb)
410 {
411         struct hci_ev_status *rp = data;
412         void *sent;
413
414         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
415
416         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
417         if (!sent)
418                 return rp->status;
419
420         hci_dev_lock(hdev);
421
422         if (!rp->status) {
423                 __u8 param = *((__u8 *) sent);
424
425                 if (param == AUTH_ENABLED)
426                         set_bit(HCI_AUTH, &hdev->flags);
427                 else
428                         clear_bit(HCI_AUTH, &hdev->flags);
429         }
430
431         if (hci_dev_test_flag(hdev, HCI_MGMT))
432                 mgmt_auth_enable_complete(hdev, rp->status);
433
434         hci_dev_unlock(hdev);
435
436         return rp->status;
437 }
438
439 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
440                                     struct sk_buff *skb)
441 {
442         struct hci_ev_status *rp = data;
443         __u8 param;
444         void *sent;
445
446         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
447
448         if (rp->status)
449                 return rp->status;
450
451         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
452         if (!sent)
453                 return rp->status;
454
455         param = *((__u8 *) sent);
456
457         if (param)
458                 set_bit(HCI_ENCRYPT, &hdev->flags);
459         else
460                 clear_bit(HCI_ENCRYPT, &hdev->flags);
461
462         return rp->status;
463 }
464
465 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
466                                    struct sk_buff *skb)
467 {
468         struct hci_ev_status *rp = data;
469         __u8 param;
470         void *sent;
471
472         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
473
474         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
475         if (!sent)
476                 return rp->status;
477
478         param = *((__u8 *) sent);
479
480         hci_dev_lock(hdev);
481
482         if (rp->status) {
483                 hdev->discov_timeout = 0;
484                 goto done;
485         }
486
487         if (param & SCAN_INQUIRY)
488                 set_bit(HCI_ISCAN, &hdev->flags);
489         else
490                 clear_bit(HCI_ISCAN, &hdev->flags);
491
492         if (param & SCAN_PAGE)
493                 set_bit(HCI_PSCAN, &hdev->flags);
494         else
495                 clear_bit(HCI_PSCAN, &hdev->flags);
496
497 done:
498         hci_dev_unlock(hdev);
499
500         return rp->status;
501 }
502
503 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
504                                   struct sk_buff *skb)
505 {
506         struct hci_ev_status *rp = data;
507         struct hci_cp_set_event_filter *cp;
508         void *sent;
509
510         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
511
512         if (rp->status)
513                 return rp->status;
514
515         sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
516         if (!sent)
517                 return rp->status;
518
519         cp = (struct hci_cp_set_event_filter *)sent;
520
521         if (cp->flt_type == HCI_FLT_CLEAR_ALL)
522                 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
523         else
524                 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
525
526         return rp->status;
527 }
528
529 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
530                                    struct sk_buff *skb)
531 {
532         struct hci_rp_read_class_of_dev *rp = data;
533
534         if (WARN_ON(!hdev))
535                 return HCI_ERROR_UNSPECIFIED;
536
537         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
538
539         if (rp->status)
540                 return rp->status;
541
542         memcpy(hdev->dev_class, rp->dev_class, 3);
543
544         bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
545                    hdev->dev_class[1], hdev->dev_class[0]);
546
547         return rp->status;
548 }
549
550 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
551                                     struct sk_buff *skb)
552 {
553         struct hci_ev_status *rp = data;
554         void *sent;
555
556         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
557
558         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
559         if (!sent)
560                 return rp->status;
561
562         hci_dev_lock(hdev);
563
564         if (!rp->status)
565                 memcpy(hdev->dev_class, sent, 3);
566
567         if (hci_dev_test_flag(hdev, HCI_MGMT))
568                 mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
569
570         hci_dev_unlock(hdev);
571
572         return rp->status;
573 }
574
575 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
576                                     struct sk_buff *skb)
577 {
578         struct hci_rp_read_voice_setting *rp = data;
579         __u16 setting;
580
581         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
582
583         if (rp->status)
584                 return rp->status;
585
586         setting = __le16_to_cpu(rp->voice_setting);
587
588         if (hdev->voice_setting == setting)
589                 return rp->status;
590
591         hdev->voice_setting = setting;
592
593         bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
594
595         if (hdev->notify)
596                 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
597
598         return rp->status;
599 }
600
601 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
602                                      struct sk_buff *skb)
603 {
604         struct hci_ev_status *rp = data;
605         __u16 setting;
606         void *sent;
607
608         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
609
610         if (rp->status)
611                 return rp->status;
612
613         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
614         if (!sent)
615                 return rp->status;
616
617         setting = get_unaligned_le16(sent);
618
619         if (hdev->voice_setting == setting)
620                 return rp->status;
621
622         hdev->voice_setting = setting;
623
624         bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
625
626         if (hdev->notify)
627                 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
628
629         return rp->status;
630 }
631
632 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
633                                         struct sk_buff *skb)
634 {
635         struct hci_rp_read_num_supported_iac *rp = data;
636
637         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
638
639         if (rp->status)
640                 return rp->status;
641
642         hdev->num_iac = rp->num_iac;
643
644         bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
645
646         return rp->status;
647 }
648
649 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
650                                 struct sk_buff *skb)
651 {
652         struct hci_ev_status *rp = data;
653         struct hci_cp_write_ssp_mode *sent;
654
655         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
656
657         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
658         if (!sent)
659                 return rp->status;
660
661         hci_dev_lock(hdev);
662
663         if (!rp->status) {
664                 if (sent->mode)
665                         hdev->features[1][0] |= LMP_HOST_SSP;
666                 else
667                         hdev->features[1][0] &= ~LMP_HOST_SSP;
668         }
669
670         if (!rp->status) {
671                 if (sent->mode)
672                         hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
673                 else
674                         hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
675         }
676
677         hci_dev_unlock(hdev);
678
679         return rp->status;
680 }
681
682 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
683                                   struct sk_buff *skb)
684 {
685         struct hci_ev_status *rp = data;
686         struct hci_cp_write_sc_support *sent;
687
688         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
689
690         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
691         if (!sent)
692                 return rp->status;
693
694         hci_dev_lock(hdev);
695
696         if (!rp->status) {
697                 if (sent->support)
698                         hdev->features[1][0] |= LMP_HOST_SC;
699                 else
700                         hdev->features[1][0] &= ~LMP_HOST_SC;
701         }
702
703         if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
704                 if (sent->support)
705                         hci_dev_set_flag(hdev, HCI_SC_ENABLED);
706                 else
707                         hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
708         }
709
710         hci_dev_unlock(hdev);
711
712         return rp->status;
713 }
714
715 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
716                                     struct sk_buff *skb)
717 {
718         struct hci_rp_read_local_version *rp = data;
719
720         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
721
722         if (rp->status)
723                 return rp->status;
724
725         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
726             hci_dev_test_flag(hdev, HCI_CONFIG)) {
727                 hdev->hci_ver = rp->hci_ver;
728                 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
729                 hdev->lmp_ver = rp->lmp_ver;
730                 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
731                 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
732         }
733
734         return rp->status;
735 }
736
737 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
738                                    struct sk_buff *skb)
739 {
740         struct hci_rp_read_enc_key_size *rp = data;
741         struct hci_conn *conn;
742         u16 handle;
743         u8 status = rp->status;
744
745         bt_dev_dbg(hdev, "status 0x%2.2x", status);
746
747         handle = le16_to_cpu(rp->handle);
748
749         hci_dev_lock(hdev);
750
751         conn = hci_conn_hash_lookup_handle(hdev, handle);
752         if (!conn) {
753                 status = 0xFF;
754                 goto done;
755         }
756
757         /* While unexpected, the read_enc_key_size command may fail. The most
758          * secure approach is to then assume the key size is 0 to force a
759          * disconnection.
760          */
761         if (status) {
762                 bt_dev_err(hdev, "failed to read key size for handle %u",
763                            handle);
764                 conn->enc_key_size = 0;
765         } else {
766                 conn->enc_key_size = rp->key_size;
767                 status = 0;
768
769                 if (conn->enc_key_size < hdev->min_enc_key_size) {
770                         /* As slave role, the conn->state has been set to
771                          * BT_CONNECTED and l2cap conn req might not be received
772                          * yet, at this moment the l2cap layer almost does
773                          * nothing with the non-zero status.
774                          * So we also clear encrypt related bits, and then the
775                          * handler of l2cap conn req will get the right secure
776                          * state at a later time.
777                          */
778                         status = HCI_ERROR_AUTH_FAILURE;
779                         clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
780                         clear_bit(HCI_CONN_AES_CCM, &conn->flags);
781                 }
782         }
783
784         hci_encrypt_cfm(conn, status);
785
786 done:
787         hci_dev_unlock(hdev);
788
789         return status;
790 }
791
792 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
793                                      struct sk_buff *skb)
794 {
795         struct hci_rp_read_local_commands *rp = data;
796
797         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
798
799         if (rp->status)
800                 return rp->status;
801
802         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
803             hci_dev_test_flag(hdev, HCI_CONFIG))
804                 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
805
806         return rp->status;
807 }
808
809 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
810                                            struct sk_buff *skb)
811 {
812         struct hci_rp_read_auth_payload_to *rp = data;
813         struct hci_conn *conn;
814
815         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
816
817         if (rp->status)
818                 return rp->status;
819
820         hci_dev_lock(hdev);
821
822         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
823         if (conn)
824                 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
825
826         hci_dev_unlock(hdev);
827
828         return rp->status;
829 }
830
831 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
832                                             struct sk_buff *skb)
833 {
834         struct hci_rp_write_auth_payload_to *rp = data;
835         struct hci_conn *conn;
836         void *sent;
837
838         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
839
840         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
841         if (!sent)
842                 return rp->status;
843
844         hci_dev_lock(hdev);
845
846         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
847         if (!conn) {
848                 rp->status = 0xff;
849                 goto unlock;
850         }
851
852         if (!rp->status)
853                 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
854
855 unlock:
856         hci_dev_unlock(hdev);
857
858         return rp->status;
859 }
860
861 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
862                                      struct sk_buff *skb)
863 {
864         struct hci_rp_read_local_features *rp = data;
865
866         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
867
868         if (rp->status)
869                 return rp->status;
870
871         memcpy(hdev->features, rp->features, 8);
872
873         /* Adjust default settings according to features
874          * supported by device. */
875
876         if (hdev->features[0][0] & LMP_3SLOT)
877                 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
878
879         if (hdev->features[0][0] & LMP_5SLOT)
880                 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
881
882         if (hdev->features[0][1] & LMP_HV2) {
883                 hdev->pkt_type  |= (HCI_HV2);
884                 hdev->esco_type |= (ESCO_HV2);
885         }
886
887         if (hdev->features[0][1] & LMP_HV3) {
888                 hdev->pkt_type  |= (HCI_HV3);
889                 hdev->esco_type |= (ESCO_HV3);
890         }
891
892         if (lmp_esco_capable(hdev))
893                 hdev->esco_type |= (ESCO_EV3);
894
895         if (hdev->features[0][4] & LMP_EV4)
896                 hdev->esco_type |= (ESCO_EV4);
897
898         if (hdev->features[0][4] & LMP_EV5)
899                 hdev->esco_type |= (ESCO_EV5);
900
901         if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
902                 hdev->esco_type |= (ESCO_2EV3);
903
904         if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
905                 hdev->esco_type |= (ESCO_3EV3);
906
907         if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
908                 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
909
910         return rp->status;
911 }
912
913 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
914                                          struct sk_buff *skb)
915 {
916         struct hci_rp_read_local_ext_features *rp = data;
917
918         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
919
920         if (rp->status)
921                 return rp->status;
922
923         if (hdev->max_page < rp->max_page) {
924                 if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
925                              &hdev->quirks))
926                         bt_dev_warn(hdev, "broken local ext features page 2");
927                 else
928                         hdev->max_page = rp->max_page;
929         }
930
931         if (rp->page < HCI_MAX_PAGES)
932                 memcpy(hdev->features[rp->page], rp->features, 8);
933
934         return rp->status;
935 }
936
937 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
938                                         struct sk_buff *skb)
939 {
940         struct hci_rp_read_flow_control_mode *rp = data;
941
942         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
943
944         if (rp->status)
945                 return rp->status;
946
947         hdev->flow_ctl_mode = rp->mode;
948
949         return rp->status;
950 }
951
952 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
953                                   struct sk_buff *skb)
954 {
955         struct hci_rp_read_buffer_size *rp = data;
956
957         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
958
959         if (rp->status)
960                 return rp->status;
961
962         hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
963         hdev->sco_mtu  = rp->sco_mtu;
964         hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
965         hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
966
967         if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
968                 hdev->sco_mtu  = 64;
969                 hdev->sco_pkts = 8;
970         }
971
972         hdev->acl_cnt = hdev->acl_pkts;
973         hdev->sco_cnt = hdev->sco_pkts;
974
975         BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
976                hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
977
978         return rp->status;
979 }
980
981 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
982                               struct sk_buff *skb)
983 {
984         struct hci_rp_read_bd_addr *rp = data;
985
986         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
987
988         if (rp->status)
989                 return rp->status;
990
991         if (test_bit(HCI_INIT, &hdev->flags))
992                 bacpy(&hdev->bdaddr, &rp->bdaddr);
993
994         if (hci_dev_test_flag(hdev, HCI_SETUP))
995                 bacpy(&hdev->setup_addr, &rp->bdaddr);
996
997         return rp->status;
998 }
999
1000 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
1001                                          struct sk_buff *skb)
1002 {
1003         struct hci_rp_read_local_pairing_opts *rp = data;
1004
1005         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1006
1007         if (rp->status)
1008                 return rp->status;
1009
1010         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1011             hci_dev_test_flag(hdev, HCI_CONFIG)) {
1012                 hdev->pairing_opts = rp->pairing_opts;
1013                 hdev->max_enc_key_size = rp->max_key_size;
1014         }
1015
1016         return rp->status;
1017 }
1018
1019 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
1020                                          struct sk_buff *skb)
1021 {
1022         struct hci_rp_read_page_scan_activity *rp = data;
1023
1024         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1025
1026         if (rp->status)
1027                 return rp->status;
1028
1029         if (test_bit(HCI_INIT, &hdev->flags)) {
1030                 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
1031                 hdev->page_scan_window = __le16_to_cpu(rp->window);
1032         }
1033
1034         return rp->status;
1035 }
1036
1037 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1038                                           struct sk_buff *skb)
1039 {
1040         struct hci_ev_status *rp = data;
1041         struct hci_cp_write_page_scan_activity *sent;
1042
1043         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1044
1045         if (rp->status)
1046                 return rp->status;
1047
1048         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1049         if (!sent)
1050                 return rp->status;
1051
1052         hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1053         hdev->page_scan_window = __le16_to_cpu(sent->window);
1054
1055         return rp->status;
1056 }
1057
1058 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1059                                      struct sk_buff *skb)
1060 {
1061         struct hci_rp_read_page_scan_type *rp = data;
1062
1063         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1064
1065         if (rp->status)
1066                 return rp->status;
1067
1068         if (test_bit(HCI_INIT, &hdev->flags))
1069                 hdev->page_scan_type = rp->type;
1070
1071         return rp->status;
1072 }
1073
1074 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1075                                       struct sk_buff *skb)
1076 {
1077         struct hci_ev_status *rp = data;
1078         u8 *type;
1079
1080         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1081
1082         if (rp->status)
1083                 return rp->status;
1084
1085         type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1086         if (type)
1087                 hdev->page_scan_type = *type;
1088
1089         return rp->status;
1090 }
1091
1092 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
1093                                       struct sk_buff *skb)
1094 {
1095         struct hci_rp_read_data_block_size *rp = data;
1096
1097         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1098
1099         if (rp->status)
1100                 return rp->status;
1101
1102         hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
1103         hdev->block_len = __le16_to_cpu(rp->block_len);
1104         hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
1105
1106         hdev->block_cnt = hdev->num_blocks;
1107
1108         BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
1109                hdev->block_cnt, hdev->block_len);
1110
1111         return rp->status;
1112 }
1113
1114 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1115                             struct sk_buff *skb)
1116 {
1117         struct hci_rp_read_clock *rp = data;
1118         struct hci_cp_read_clock *cp;
1119         struct hci_conn *conn;
1120
1121         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1122
1123         if (rp->status)
1124                 return rp->status;
1125
1126         hci_dev_lock(hdev);
1127
1128         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1129         if (!cp)
1130                 goto unlock;
1131
1132         if (cp->which == 0x00) {
1133                 hdev->clock = le32_to_cpu(rp->clock);
1134                 goto unlock;
1135         }
1136
1137         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1138         if (conn) {
1139                 conn->clock = le32_to_cpu(rp->clock);
1140                 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1141         }
1142
1143 unlock:
1144         hci_dev_unlock(hdev);
1145         return rp->status;
1146 }
1147
1148 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
1149                                      struct sk_buff *skb)
1150 {
1151         struct hci_rp_read_local_amp_info *rp = data;
1152
1153         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1154
1155         if (rp->status)
1156                 return rp->status;
1157
1158         hdev->amp_status = rp->amp_status;
1159         hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
1160         hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
1161         hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
1162         hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
1163         hdev->amp_type = rp->amp_type;
1164         hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
1165         hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
1166         hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
1167         hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
1168
1169         return rp->status;
1170 }
1171
1172 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1173                                        struct sk_buff *skb)
1174 {
1175         struct hci_rp_read_inq_rsp_tx_power *rp = data;
1176
1177         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1178
1179         if (rp->status)
1180                 return rp->status;
1181
1182         hdev->inq_tx_power = rp->tx_power;
1183
1184         return rp->status;
1185 }
1186
1187 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1188                                              struct sk_buff *skb)
1189 {
1190         struct hci_rp_read_def_err_data_reporting *rp = data;
1191
1192         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1193
1194         if (rp->status)
1195                 return rp->status;
1196
1197         hdev->err_data_reporting = rp->err_data_reporting;
1198
1199         return rp->status;
1200 }
1201
1202 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1203                                               struct sk_buff *skb)
1204 {
1205         struct hci_ev_status *rp = data;
1206         struct hci_cp_write_def_err_data_reporting *cp;
1207
1208         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1209
1210         if (rp->status)
1211                 return rp->status;
1212
1213         cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1214         if (!cp)
1215                 return rp->status;
1216
1217         hdev->err_data_reporting = cp->err_data_reporting;
1218
1219         return rp->status;
1220 }
1221
1222 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1223                                 struct sk_buff *skb)
1224 {
1225         struct hci_rp_pin_code_reply *rp = data;
1226         struct hci_cp_pin_code_reply *cp;
1227         struct hci_conn *conn;
1228
1229         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1230
1231         hci_dev_lock(hdev);
1232
1233         if (hci_dev_test_flag(hdev, HCI_MGMT))
1234                 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1235
1236         if (rp->status)
1237                 goto unlock;
1238
1239         cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1240         if (!cp)
1241                 goto unlock;
1242
1243         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1244         if (conn)
1245                 conn->pin_length = cp->pin_len;
1246
1247 unlock:
1248         hci_dev_unlock(hdev);
1249         return rp->status;
1250 }
1251
1252 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1253                                     struct sk_buff *skb)
1254 {
1255         struct hci_rp_pin_code_neg_reply *rp = data;
1256
1257         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1258
1259         hci_dev_lock(hdev);
1260
1261         if (hci_dev_test_flag(hdev, HCI_MGMT))
1262                 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1263                                                  rp->status);
1264
1265         hci_dev_unlock(hdev);
1266
1267         return rp->status;
1268 }
1269
1270 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1271                                      struct sk_buff *skb)
1272 {
1273         struct hci_rp_le_read_buffer_size *rp = data;
1274
1275         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1276
1277         if (rp->status)
1278                 return rp->status;
1279
1280         hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1281         hdev->le_pkts = rp->le_max_pkt;
1282
1283         hdev->le_cnt = hdev->le_pkts;
1284
1285         BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1286
1287         return rp->status;
1288 }
1289
1290 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1291                                         struct sk_buff *skb)
1292 {
1293         struct hci_rp_le_read_local_features *rp = data;
1294
1295         BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1296
1297         if (rp->status)
1298                 return rp->status;
1299
1300         memcpy(hdev->le_features, rp->features, 8);
1301
1302         return rp->status;
1303 }
1304
1305 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1306                                       struct sk_buff *skb)
1307 {
1308         struct hci_rp_le_read_adv_tx_power *rp = data;
1309
1310         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1311
1312         if (rp->status)
1313                 return rp->status;
1314
1315         hdev->adv_tx_power = rp->tx_power;
1316
1317         return rp->status;
1318 }
1319
1320 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1321                                     struct sk_buff *skb)
1322 {
1323         struct hci_rp_user_confirm_reply *rp = data;
1324
1325         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1326
1327         hci_dev_lock(hdev);
1328
1329         if (hci_dev_test_flag(hdev, HCI_MGMT))
1330                 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1331                                                  rp->status);
1332
1333         hci_dev_unlock(hdev);
1334
1335         return rp->status;
1336 }
1337
1338 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1339                                         struct sk_buff *skb)
1340 {
1341         struct hci_rp_user_confirm_reply *rp = data;
1342
1343         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1344
1345         hci_dev_lock(hdev);
1346
1347         if (hci_dev_test_flag(hdev, HCI_MGMT))
1348                 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1349                                                      ACL_LINK, 0, rp->status);
1350
1351         hci_dev_unlock(hdev);
1352
1353         return rp->status;
1354 }
1355
1356 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1357                                     struct sk_buff *skb)
1358 {
1359         struct hci_rp_user_confirm_reply *rp = data;
1360
1361         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1362
1363         hci_dev_lock(hdev);
1364
1365         if (hci_dev_test_flag(hdev, HCI_MGMT))
1366                 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1367                                                  0, rp->status);
1368
1369         hci_dev_unlock(hdev);
1370
1371         return rp->status;
1372 }
1373
1374 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1375                                         struct sk_buff *skb)
1376 {
1377         struct hci_rp_user_confirm_reply *rp = data;
1378
1379         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1380
1381         hci_dev_lock(hdev);
1382
1383         if (hci_dev_test_flag(hdev, HCI_MGMT))
1384                 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1385                                                      ACL_LINK, 0, rp->status);
1386
1387         hci_dev_unlock(hdev);
1388
1389         return rp->status;
1390 }
1391
1392 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1393                                      struct sk_buff *skb)
1394 {
1395         struct hci_rp_read_local_oob_data *rp = data;
1396
1397         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1398
1399         return rp->status;
1400 }
1401
1402 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1403                                          struct sk_buff *skb)
1404 {
1405         struct hci_rp_read_local_oob_ext_data *rp = data;
1406
1407         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1408
1409         return rp->status;
1410 }
1411
1412 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1413                                     struct sk_buff *skb)
1414 {
1415         struct hci_ev_status *rp = data;
1416         bdaddr_t *sent;
1417
1418         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1419
1420         if (rp->status)
1421                 return rp->status;
1422
1423         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1424         if (!sent)
1425                 return rp->status;
1426
1427         hci_dev_lock(hdev);
1428
1429         bacpy(&hdev->random_addr, sent);
1430
1431         if (!bacmp(&hdev->rpa, sent)) {
1432                 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1433                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1434                                    secs_to_jiffies(hdev->rpa_timeout));
1435         }
1436
1437         hci_dev_unlock(hdev);
1438
1439         return rp->status;
1440 }
1441
1442 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1443                                     struct sk_buff *skb)
1444 {
1445         struct hci_ev_status *rp = data;
1446         struct hci_cp_le_set_default_phy *cp;
1447
1448         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1449
1450         if (rp->status)
1451                 return rp->status;
1452
1453         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1454         if (!cp)
1455                 return rp->status;
1456
1457         hci_dev_lock(hdev);
1458
1459         hdev->le_tx_def_phys = cp->tx_phys;
1460         hdev->le_rx_def_phys = cp->rx_phys;
1461
1462         hci_dev_unlock(hdev);
1463
1464         return rp->status;
1465 }
1466
1467 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1468                                             struct sk_buff *skb)
1469 {
1470         struct hci_ev_status *rp = data;
1471         struct hci_cp_le_set_adv_set_rand_addr *cp;
1472         struct adv_info *adv;
1473
1474         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1475
1476         if (rp->status)
1477                 return rp->status;
1478
1479         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1480         /* Update only in case the adv instance since handle 0x00 shall be using
1481          * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1482          * non-extended adverting.
1483          */
1484         if (!cp || !cp->handle)
1485                 return rp->status;
1486
1487         hci_dev_lock(hdev);
1488
1489         adv = hci_find_adv_instance(hdev, cp->handle);
1490         if (adv) {
1491                 bacpy(&adv->random_addr, &cp->bdaddr);
1492                 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1493                         adv->rpa_expired = false;
1494                         queue_delayed_work(hdev->workqueue,
1495                                            &adv->rpa_expired_cb,
1496                                            secs_to_jiffies(hdev->rpa_timeout));
1497                 }
1498         }
1499
1500         hci_dev_unlock(hdev);
1501
1502         return rp->status;
1503 }
1504
1505 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1506                                    struct sk_buff *skb)
1507 {
1508         struct hci_ev_status *rp = data;
1509         u8 *instance;
1510         int err;
1511
1512         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1513
1514         if (rp->status)
1515                 return rp->status;
1516
1517         instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1518         if (!instance)
1519                 return rp->status;
1520
1521         hci_dev_lock(hdev);
1522
1523         err = hci_remove_adv_instance(hdev, *instance);
1524         if (!err)
1525                 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1526                                          *instance);
1527
1528         hci_dev_unlock(hdev);
1529
1530         return rp->status;
1531 }
1532
1533 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1534                                    struct sk_buff *skb)
1535 {
1536         struct hci_ev_status *rp = data;
1537         struct adv_info *adv, *n;
1538         int err;
1539
1540         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1541
1542         if (rp->status)
1543                 return rp->status;
1544
1545         if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1546                 return rp->status;
1547
1548         hci_dev_lock(hdev);
1549
1550         list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1551                 u8 instance = adv->instance;
1552
1553                 err = hci_remove_adv_instance(hdev, instance);
1554                 if (!err)
1555                         mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1556                                                  hdev, instance);
1557         }
1558
1559         hci_dev_unlock(hdev);
1560
1561         return rp->status;
1562 }
1563
1564 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1565                                         struct sk_buff *skb)
1566 {
1567         struct hci_rp_le_read_transmit_power *rp = data;
1568
1569         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1570
1571         if (rp->status)
1572                 return rp->status;
1573
1574         hdev->min_le_tx_power = rp->min_le_tx_power;
1575         hdev->max_le_tx_power = rp->max_le_tx_power;
1576
1577         return rp->status;
1578 }
1579
1580 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1581                                      struct sk_buff *skb)
1582 {
1583         struct hci_ev_status *rp = data;
1584         struct hci_cp_le_set_privacy_mode *cp;
1585         struct hci_conn_params *params;
1586
1587         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1588
1589         if (rp->status)
1590                 return rp->status;
1591
1592         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1593         if (!cp)
1594                 return rp->status;
1595
1596         hci_dev_lock(hdev);
1597
1598         params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1599         if (params)
1600                 WRITE_ONCE(params->privacy_mode, cp->mode);
1601
1602         hci_dev_unlock(hdev);
1603
1604         return rp->status;
1605 }
1606
1607 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1608                                    struct sk_buff *skb)
1609 {
1610         struct hci_ev_status *rp = data;
1611         __u8 *sent;
1612
1613         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1614
1615         if (rp->status)
1616                 return rp->status;
1617
1618         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1619         if (!sent)
1620                 return rp->status;
1621
1622         hci_dev_lock(hdev);
1623
1624         /* If we're doing connection initiation as peripheral. Set a
1625          * timeout in case something goes wrong.
1626          */
1627         if (*sent) {
1628                 struct hci_conn *conn;
1629
1630                 hci_dev_set_flag(hdev, HCI_LE_ADV);
1631
1632                 conn = hci_lookup_le_connect(hdev);
1633                 if (conn)
1634                         queue_delayed_work(hdev->workqueue,
1635                                            &conn->le_conn_timeout,
1636                                            conn->conn_timeout);
1637         } else {
1638                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1639         }
1640
1641         hci_dev_unlock(hdev);
1642
1643         return rp->status;
1644 }
1645
1646 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1647                                        struct sk_buff *skb)
1648 {
1649         struct hci_cp_le_set_ext_adv_enable *cp;
1650         struct hci_cp_ext_adv_set *set;
1651         struct adv_info *adv = NULL, *n;
1652         struct hci_ev_status *rp = data;
1653
1654         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1655
1656         if (rp->status)
1657                 return rp->status;
1658
1659         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1660         if (!cp)
1661                 return rp->status;
1662
1663         set = (void *)cp->data;
1664
1665         hci_dev_lock(hdev);
1666
1667         if (cp->num_of_sets)
1668                 adv = hci_find_adv_instance(hdev, set->handle);
1669
1670         if (cp->enable) {
1671                 struct hci_conn *conn;
1672
1673                 hci_dev_set_flag(hdev, HCI_LE_ADV);
1674
1675                 if (adv && !adv->periodic)
1676                         adv->enabled = true;
1677
1678                 conn = hci_lookup_le_connect(hdev);
1679                 if (conn)
1680                         queue_delayed_work(hdev->workqueue,
1681                                            &conn->le_conn_timeout,
1682                                            conn->conn_timeout);
1683         } else {
1684                 if (cp->num_of_sets) {
1685                         if (adv)
1686                                 adv->enabled = false;
1687
1688                         /* If just one instance was disabled check if there are
1689                          * any other instance enabled before clearing HCI_LE_ADV
1690                          */
1691                         list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1692                                                  list) {
1693                                 if (adv->enabled)
1694                                         goto unlock;
1695                         }
1696                 } else {
1697                         /* All instances shall be considered disabled */
1698                         list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1699                                                  list)
1700                                 adv->enabled = false;
1701                 }
1702
1703                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1704         }
1705
1706 unlock:
1707         hci_dev_unlock(hdev);
1708         return rp->status;
1709 }
1710
1711 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1712                                    struct sk_buff *skb)
1713 {
1714         struct hci_cp_le_set_scan_param *cp;
1715         struct hci_ev_status *rp = data;
1716
1717         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1718
1719         if (rp->status)
1720                 return rp->status;
1721
1722         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1723         if (!cp)
1724                 return rp->status;
1725
1726         hci_dev_lock(hdev);
1727
1728         hdev->le_scan_type = cp->type;
1729
1730         hci_dev_unlock(hdev);
1731
1732         return rp->status;
1733 }
1734
1735 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1736                                        struct sk_buff *skb)
1737 {
1738         struct hci_cp_le_set_ext_scan_params *cp;
1739         struct hci_ev_status *rp = data;
1740         struct hci_cp_le_scan_phy_params *phy_param;
1741
1742         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1743
1744         if (rp->status)
1745                 return rp->status;
1746
1747         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1748         if (!cp)
1749                 return rp->status;
1750
1751         phy_param = (void *)cp->data;
1752
1753         hci_dev_lock(hdev);
1754
1755         hdev->le_scan_type = phy_param->type;
1756
1757         hci_dev_unlock(hdev);
1758
1759         return rp->status;
1760 }
1761
1762 static bool has_pending_adv_report(struct hci_dev *hdev)
1763 {
1764         struct discovery_state *d = &hdev->discovery;
1765
1766         return bacmp(&d->last_adv_addr, BDADDR_ANY);
1767 }
1768
1769 static void clear_pending_adv_report(struct hci_dev *hdev)
1770 {
1771         struct discovery_state *d = &hdev->discovery;
1772
1773         bacpy(&d->last_adv_addr, BDADDR_ANY);
1774         d->last_adv_data_len = 0;
1775 }
1776
1777 #ifndef TIZEN_BT
1778 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1779                                      u8 bdaddr_type, s8 rssi, u32 flags,
1780                                      u8 *data, u8 len)
1781 {
1782         struct discovery_state *d = &hdev->discovery;
1783
1784         if (len > max_adv_len(hdev))
1785                 return;
1786
1787         bacpy(&d->last_adv_addr, bdaddr);
1788         d->last_adv_addr_type = bdaddr_type;
1789         d->last_adv_rssi = rssi;
1790         d->last_adv_flags = flags;
1791         memcpy(d->last_adv_data, data, len);
1792         d->last_adv_data_len = len;
1793 }
1794 #endif
1795
1796 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1797 {
1798         hci_dev_lock(hdev);
1799
1800         switch (enable) {
1801         case LE_SCAN_ENABLE:
1802                 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1803                 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1804                         clear_pending_adv_report(hdev);
1805                 if (hci_dev_test_flag(hdev, HCI_MESH))
1806                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1807                 break;
1808
1809         case LE_SCAN_DISABLE:
1810                 /* We do this here instead of when setting DISCOVERY_STOPPED
1811                  * since the latter would potentially require waiting for
1812                  * inquiry to stop too.
1813                  */
1814                 if (has_pending_adv_report(hdev)) {
1815                         struct discovery_state *d = &hdev->discovery;
1816
1817                         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1818                                           d->last_adv_addr_type, NULL,
1819                                           d->last_adv_rssi, d->last_adv_flags,
1820                                           d->last_adv_data,
1821                                           d->last_adv_data_len, NULL, 0, 0);
1822                 }
1823
1824                 /* Cancel this timer so that we don't try to disable scanning
1825                  * when it's already disabled.
1826                  */
1827                 cancel_delayed_work(&hdev->le_scan_disable);
1828
1829                 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1830
1831                 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1832                  * interrupted scanning due to a connect request. Mark
1833                  * therefore discovery as stopped.
1834                  */
1835                 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1836 #ifndef TIZEN_BT /* The below line is kernel bug. */
1837                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1838 #else
1839                         hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
1840 #endif
1841                 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1842                          hdev->discovery.state == DISCOVERY_FINDING)
1843                         queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1844
1845                 break;
1846
1847         default:
1848                 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1849                            enable);
1850                 break;
1851         }
1852
1853         hci_dev_unlock(hdev);
1854 }
1855
1856 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1857                                     struct sk_buff *skb)
1858 {
1859         struct hci_cp_le_set_scan_enable *cp;
1860         struct hci_ev_status *rp = data;
1861
1862         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1863
1864         if (rp->status)
1865                 return rp->status;
1866
1867         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1868         if (!cp)
1869                 return rp->status;
1870
1871         le_set_scan_enable_complete(hdev, cp->enable);
1872
1873         return rp->status;
1874 }
1875
1876 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1877                                         struct sk_buff *skb)
1878 {
1879         struct hci_cp_le_set_ext_scan_enable *cp;
1880         struct hci_ev_status *rp = data;
1881
1882         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1883
1884         if (rp->status)
1885                 return rp->status;
1886
1887         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1888         if (!cp)
1889                 return rp->status;
1890
1891         le_set_scan_enable_complete(hdev, cp->enable);
1892
1893         return rp->status;
1894 }
1895
1896 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1897                                       struct sk_buff *skb)
1898 {
1899         struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1900
1901         bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1902                    rp->num_of_sets);
1903
1904         if (rp->status)
1905                 return rp->status;
1906
1907         hdev->le_num_of_adv_sets = rp->num_of_sets;
1908
1909         return rp->status;
1910 }
1911
1912 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1913                                           struct sk_buff *skb)
1914 {
1915         struct hci_rp_le_read_accept_list_size *rp = data;
1916
1917         bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1918
1919         if (rp->status)
1920                 return rp->status;
1921
1922         hdev->le_accept_list_size = rp->size;
1923
1924         return rp->status;
1925 }
1926
1927 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1928                                       struct sk_buff *skb)
1929 {
1930         struct hci_ev_status *rp = data;
1931
1932         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1933
1934         if (rp->status)
1935                 return rp->status;
1936
1937         hci_dev_lock(hdev);
1938         hci_bdaddr_list_clear(&hdev->le_accept_list);
1939         hci_dev_unlock(hdev);
1940
1941         return rp->status;
1942 }
1943
1944 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1945                                        struct sk_buff *skb)
1946 {
1947         struct hci_cp_le_add_to_accept_list *sent;
1948         struct hci_ev_status *rp = data;
1949
1950         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1951
1952         if (rp->status)
1953                 return rp->status;
1954
1955         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1956         if (!sent)
1957                 return rp->status;
1958
1959         hci_dev_lock(hdev);
1960         hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1961                             sent->bdaddr_type);
1962         hci_dev_unlock(hdev);
1963
1964         return rp->status;
1965 }
1966
1967 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1968                                          struct sk_buff *skb)
1969 {
1970         struct hci_cp_le_del_from_accept_list *sent;
1971         struct hci_ev_status *rp = data;
1972
1973         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1974
1975         if (rp->status)
1976                 return rp->status;
1977
1978         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1979         if (!sent)
1980                 return rp->status;
1981
1982         hci_dev_lock(hdev);
1983         hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1984                             sent->bdaddr_type);
1985         hci_dev_unlock(hdev);
1986
1987         return rp->status;
1988 }
1989
1990 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1991                                           struct sk_buff *skb)
1992 {
1993         struct hci_rp_le_read_supported_states *rp = data;
1994
1995         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1996
1997         if (rp->status)
1998                 return rp->status;
1999
2000         memcpy(hdev->le_states, rp->le_states, 8);
2001
2002         return rp->status;
2003 }
2004
2005 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
2006                                       struct sk_buff *skb)
2007 {
2008         struct hci_rp_le_read_def_data_len *rp = data;
2009
2010         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2011
2012         if (rp->status)
2013                 return rp->status;
2014
2015         hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
2016         hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
2017
2018         return rp->status;
2019 }
2020
2021 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
2022                                        struct sk_buff *skb)
2023 {
2024         struct hci_cp_le_write_def_data_len *sent;
2025         struct hci_ev_status *rp = data;
2026
2027         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2028
2029         if (rp->status)
2030                 return rp->status;
2031
2032         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
2033         if (!sent)
2034                 return rp->status;
2035
2036         hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
2037         hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
2038
2039         return rp->status;
2040 }
2041
2042 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
2043                                        struct sk_buff *skb)
2044 {
2045         struct hci_cp_le_add_to_resolv_list *sent;
2046         struct hci_ev_status *rp = data;
2047
2048         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2049
2050         if (rp->status)
2051                 return rp->status;
2052
2053         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
2054         if (!sent)
2055                 return rp->status;
2056
2057         hci_dev_lock(hdev);
2058         hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2059                                 sent->bdaddr_type, sent->peer_irk,
2060                                 sent->local_irk);
2061         hci_dev_unlock(hdev);
2062
2063         return rp->status;
2064 }
2065
2066 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2067                                          struct sk_buff *skb)
2068 {
2069         struct hci_cp_le_del_from_resolv_list *sent;
2070         struct hci_ev_status *rp = data;
2071
2072         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2073
2074         if (rp->status)
2075                 return rp->status;
2076
2077         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2078         if (!sent)
2079                 return rp->status;
2080
2081         hci_dev_lock(hdev);
2082         hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2083                             sent->bdaddr_type);
2084         hci_dev_unlock(hdev);
2085
2086         return rp->status;
2087 }
2088
2089 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2090                                       struct sk_buff *skb)
2091 {
2092         struct hci_ev_status *rp = data;
2093
2094         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2095
2096         if (rp->status)
2097                 return rp->status;
2098
2099         hci_dev_lock(hdev);
2100         hci_bdaddr_list_clear(&hdev->le_resolv_list);
2101         hci_dev_unlock(hdev);
2102
2103         return rp->status;
2104 }
2105
2106 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2107                                           struct sk_buff *skb)
2108 {
2109         struct hci_rp_le_read_resolv_list_size *rp = data;
2110
2111         bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2112
2113         if (rp->status)
2114                 return rp->status;
2115
2116         hdev->le_resolv_list_size = rp->size;
2117
2118         return rp->status;
2119 }
2120
2121 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2122                                                struct sk_buff *skb)
2123 {
2124         struct hci_ev_status *rp = data;
2125         __u8 *sent;
2126
2127         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2128
2129         if (rp->status)
2130                 return rp->status;
2131
2132         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2133         if (!sent)
2134                 return rp->status;
2135
2136         hci_dev_lock(hdev);
2137
2138         if (*sent)
2139                 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2140         else
2141                 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2142
2143         hci_dev_unlock(hdev);
2144
2145         return rp->status;
2146 }
2147
2148 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2149                                       struct sk_buff *skb)
2150 {
2151         struct hci_rp_le_read_max_data_len *rp = data;
2152
2153         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2154
2155         if (rp->status)
2156                 return rp->status;
2157
2158         hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2159         hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2160         hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2161         hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2162
2163         return rp->status;
2164 }
2165
2166 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2167                                          struct sk_buff *skb)
2168 {
2169         struct hci_cp_write_le_host_supported *sent;
2170         struct hci_ev_status *rp = data;
2171
2172         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2173
2174         if (rp->status)
2175                 return rp->status;
2176
2177         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2178         if (!sent)
2179                 return rp->status;
2180
2181         hci_dev_lock(hdev);
2182
2183         if (sent->le) {
2184                 hdev->features[1][0] |= LMP_HOST_LE;
2185                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2186         } else {
2187                 hdev->features[1][0] &= ~LMP_HOST_LE;
2188                 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2189                 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2190         }
2191
2192         if (sent->simul)
2193                 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2194         else
2195                 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2196
2197         hci_dev_unlock(hdev);
2198
2199         return rp->status;
2200 }
2201
2202 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2203                                struct sk_buff *skb)
2204 {
2205         struct hci_cp_le_set_adv_param *cp;
2206         struct hci_ev_status *rp = data;
2207
2208         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2209
2210         if (rp->status)
2211                 return rp->status;
2212
2213         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2214         if (!cp)
2215                 return rp->status;
2216
2217         hci_dev_lock(hdev);
2218         hdev->adv_addr_type = cp->own_address_type;
2219         hci_dev_unlock(hdev);
2220
2221         return rp->status;
2222 }
2223
2224 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2225                                    struct sk_buff *skb)
2226 {
2227         struct hci_rp_le_set_ext_adv_params *rp = data;
2228         struct hci_cp_le_set_ext_adv_params *cp;
2229         struct adv_info *adv_instance;
2230
2231         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2232
2233         if (rp->status)
2234                 return rp->status;
2235
2236         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2237         if (!cp)
2238                 return rp->status;
2239
2240         hci_dev_lock(hdev);
2241         hdev->adv_addr_type = cp->own_addr_type;
2242         if (!cp->handle) {
2243                 /* Store in hdev for instance 0 */
2244                 hdev->adv_tx_power = rp->tx_power;
2245         } else {
2246                 adv_instance = hci_find_adv_instance(hdev, cp->handle);
2247                 if (adv_instance)
2248                         adv_instance->tx_power = rp->tx_power;
2249         }
2250         /* Update adv data as tx power is known now */
2251         hci_update_adv_data(hdev, cp->handle);
2252
2253         hci_dev_unlock(hdev);
2254
2255         return rp->status;
2256 }
2257
2258 #ifdef TIZEN_BT
2259 static u8 hci_cc_enable_rssi(struct hci_dev *hdev, void *data,
2260                              struct sk_buff *skb)
2261 {
2262         struct hci_cc_rsp_enable_rssi *rp = data;
2263
2264         BT_DBG("hci_cc_enable_rssi - %s status 0x%2.2x Event_LE_ext_Opcode 0x%2.2x",
2265                hdev->name, rp->status, rp->le_ext_opcode);
2266
2267         mgmt_enable_rssi_cc(hdev, rp, rp->status);
2268
2269         return rp->status;
2270 }
2271
2272 static u8 hci_cc_get_raw_rssi(struct hci_dev *hdev, void *data,
2273                               struct sk_buff *skb)
2274 {
2275         struct hci_cc_rp_get_raw_rssi *rp = data;
2276
2277         BT_DBG("hci_cc_get_raw_rssi- %s Get Raw Rssi Response[%2.2x %4.4x %2.2X]",
2278                hdev->name, rp->status, rp->conn_handle, rp->rssi_dbm);
2279
2280         mgmt_raw_rssi_response(hdev, rp, rp->status);
2281
2282         return rp->status;
2283 }
2284
2285 static void hci_vendor_ext_rssi_link_alert_evt(struct hci_dev *hdev,
2286                                                struct sk_buff *skb)
2287 {
2288         struct hci_ev_vendor_specific_rssi_alert *ev = (void *)skb->data;
2289
2290         BT_DBG("RSSI event LE_RSSI_LINK_ALERT %X", LE_RSSI_LINK_ALERT);
2291
2292         mgmt_rssi_alert_evt(hdev, ev->conn_handle, ev->alert_type,
2293                             ev->rssi_dbm);
2294 }
2295
2296 static void hci_vendor_specific_group_ext_evt(struct hci_dev *hdev,
2297                                               struct sk_buff *skb)
2298 {
2299         struct hci_ev_ext_vendor_specific *ev = (void *)skb->data;
2300         __u8 event_le_ext_sub_code;
2301
2302         BT_DBG("RSSI event LE_META_VENDOR_SPECIFIC_GROUP_EVENT: %X",
2303                LE_META_VENDOR_SPECIFIC_GROUP_EVENT);
2304
2305         skb_pull(skb, sizeof(*ev));
2306         event_le_ext_sub_code = ev->event_le_ext_sub_code;
2307
2308         switch (event_le_ext_sub_code) {
2309         case LE_RSSI_LINK_ALERT:
2310                 hci_vendor_ext_rssi_link_alert_evt(hdev, skb);
2311                 break;
2312
2313         default:
2314                 break;
2315         }
2316 }
2317
2318 static void hci_vendor_multi_adv_state_change_evt(struct hci_dev *hdev,
2319                                                   struct sk_buff *skb)
2320 {
2321         struct hci_ev_vendor_specific_multi_adv_state *ev = (void *)skb->data;
2322
2323         BT_DBG("LE_MULTI_ADV_STATE_CHANGE_SUB_EVENT");
2324
2325         mgmt_multi_adv_state_change_evt(hdev, ev->adv_instance,
2326                                         ev->state_change_reason,
2327                                         ev->connection_handle);
2328 }
2329
2330 static void hci_vendor_specific_evt(struct hci_dev *hdev, void *data,
2331                                     struct sk_buff *skb)
2332 {
2333         struct hci_ev_vendor_specific *ev = (void *)skb->data;
2334         __u8 event_sub_code;
2335
2336         BT_DBG("hci_vendor_specific_evt");
2337
2338         skb_pull(skb, sizeof(*ev));
2339         event_sub_code = ev->event_sub_code;
2340
2341         switch (event_sub_code) {
2342         case LE_META_VENDOR_SPECIFIC_GROUP_EVENT:
2343                 hci_vendor_specific_group_ext_evt(hdev, skb);
2344                 break;
2345
2346         case LE_MULTI_ADV_STATE_CHANGE_SUB_EVENT:
2347                 hci_vendor_multi_adv_state_change_evt(hdev, skb);
2348                 break;
2349
2350         default:
2351                 break;
2352         }
2353 }
2354 #endif
2355
2356 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2357                            struct sk_buff *skb)
2358 {
2359         struct hci_rp_read_rssi *rp = data;
2360         struct hci_conn *conn;
2361
2362         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2363
2364         if (rp->status)
2365                 return rp->status;
2366
2367         hci_dev_lock(hdev);
2368
2369         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2370         if (conn)
2371                 conn->rssi = rp->rssi;
2372
2373         hci_dev_unlock(hdev);
2374
2375         return rp->status;
2376 }
2377
2378 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2379                                struct sk_buff *skb)
2380 {
2381         struct hci_cp_read_tx_power *sent;
2382         struct hci_rp_read_tx_power *rp = data;
2383         struct hci_conn *conn;
2384
2385         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2386
2387         if (rp->status)
2388                 return rp->status;
2389
2390         sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2391         if (!sent)
2392                 return rp->status;
2393
2394         hci_dev_lock(hdev);
2395
2396         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2397         if (!conn)
2398                 goto unlock;
2399
2400         switch (sent->type) {
2401         case 0x00:
2402                 conn->tx_power = rp->tx_power;
2403                 break;
2404         case 0x01:
2405                 conn->max_tx_power = rp->tx_power;
2406                 break;
2407         }
2408
2409 unlock:
2410         hci_dev_unlock(hdev);
2411         return rp->status;
2412 }
2413
2414 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2415                                       struct sk_buff *skb)
2416 {
2417         struct hci_ev_status *rp = data;
2418         u8 *mode;
2419
2420         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2421
2422         if (rp->status)
2423                 return rp->status;
2424
2425         mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2426         if (mode)
2427                 hdev->ssp_debug_mode = *mode;
2428
2429         return rp->status;
2430 }
2431
2432 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2433 {
2434         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2435
2436         if (status) {
2437                 hci_conn_check_pending(hdev);
2438                 return;
2439         }
2440
2441         if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
2442                 set_bit(HCI_INQUIRY, &hdev->flags);
2443 }
2444
2445 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2446 {
2447         struct hci_cp_create_conn *cp;
2448         struct hci_conn *conn;
2449
2450         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2451
2452         cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2453         if (!cp)
2454                 return;
2455
2456         hci_dev_lock(hdev);
2457
2458         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2459
2460         bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2461
2462         if (status) {
2463                 if (conn && conn->state == BT_CONNECT) {
2464                         if (status != 0x0c || conn->attempt > 2) {
2465                                 conn->state = BT_CLOSED;
2466                                 hci_connect_cfm(conn, status);
2467                                 hci_conn_del(conn);
2468                         } else
2469                                 conn->state = BT_CONNECT2;
2470                 }
2471         } else {
2472                 if (!conn) {
2473                         conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
2474                                                   HCI_ROLE_MASTER);
2475                         if (!conn)
2476                                 bt_dev_err(hdev, "no memory for new connection");
2477                 }
2478         }
2479
2480         hci_dev_unlock(hdev);
2481 }
2482
2483 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2484 {
2485         struct hci_cp_add_sco *cp;
2486         struct hci_conn *acl;
2487         struct hci_link *link;
2488         __u16 handle;
2489
2490         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2491
2492         if (!status)
2493                 return;
2494
2495         cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2496         if (!cp)
2497                 return;
2498
2499         handle = __le16_to_cpu(cp->handle);
2500
2501         bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2502
2503         hci_dev_lock(hdev);
2504
2505         acl = hci_conn_hash_lookup_handle(hdev, handle);
2506         if (acl) {
2507                 link = list_first_entry_or_null(&acl->link_list,
2508                                                 struct hci_link, list);
2509                 if (link && link->conn) {
2510                         link->conn->state = BT_CLOSED;
2511
2512                         hci_connect_cfm(link->conn, status);
2513                         hci_conn_del(link->conn);
2514                 }
2515         }
2516
2517         hci_dev_unlock(hdev);
2518 }
2519
2520 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2521 {
2522         struct hci_cp_auth_requested *cp;
2523         struct hci_conn *conn;
2524
2525         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2526
2527         if (!status)
2528                 return;
2529
2530         cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2531         if (!cp)
2532                 return;
2533
2534         hci_dev_lock(hdev);
2535
2536         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2537         if (conn) {
2538                 if (conn->state == BT_CONFIG) {
2539                         hci_connect_cfm(conn, status);
2540                         hci_conn_drop(conn);
2541                 }
2542         }
2543
2544         hci_dev_unlock(hdev);
2545 }
2546
2547 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2548 {
2549         struct hci_cp_set_conn_encrypt *cp;
2550         struct hci_conn *conn;
2551
2552         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2553
2554         if (!status)
2555                 return;
2556
2557         cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2558         if (!cp)
2559                 return;
2560
2561         hci_dev_lock(hdev);
2562
2563         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2564         if (conn) {
2565                 if (conn->state == BT_CONFIG) {
2566                         hci_connect_cfm(conn, status);
2567                         hci_conn_drop(conn);
2568                 }
2569         }
2570
2571         hci_dev_unlock(hdev);
2572 }
2573
2574 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2575                                     struct hci_conn *conn)
2576 {
2577         if (conn->state != BT_CONFIG || !conn->out)
2578                 return 0;
2579
2580         if (conn->pending_sec_level == BT_SECURITY_SDP)
2581                 return 0;
2582
2583         /* Only request authentication for SSP connections or non-SSP
2584          * devices with sec_level MEDIUM or HIGH or if MITM protection
2585          * is requested.
2586          */
2587         if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2588             conn->pending_sec_level != BT_SECURITY_FIPS &&
2589             conn->pending_sec_level != BT_SECURITY_HIGH &&
2590             conn->pending_sec_level != BT_SECURITY_MEDIUM)
2591                 return 0;
2592
2593         return 1;
2594 }
2595
2596 static int hci_resolve_name(struct hci_dev *hdev,
2597                                    struct inquiry_entry *e)
2598 {
2599         struct hci_cp_remote_name_req cp;
2600
2601         memset(&cp, 0, sizeof(cp));
2602
2603         bacpy(&cp.bdaddr, &e->data.bdaddr);
2604         cp.pscan_rep_mode = e->data.pscan_rep_mode;
2605         cp.pscan_mode = e->data.pscan_mode;
2606         cp.clock_offset = e->data.clock_offset;
2607
2608         return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2609 }
2610
2611 static bool hci_resolve_next_name(struct hci_dev *hdev)
2612 {
2613         struct discovery_state *discov = &hdev->discovery;
2614         struct inquiry_entry *e;
2615
2616         if (list_empty(&discov->resolve))
2617                 return false;
2618
2619         /* We should stop if we already spent too much time resolving names. */
2620         if (time_after(jiffies, discov->name_resolve_timeout)) {
2621                 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2622                 return false;
2623         }
2624
2625         e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2626         if (!e)
2627                 return false;
2628
2629         if (hci_resolve_name(hdev, e) == 0) {
2630                 e->name_state = NAME_PENDING;
2631                 return true;
2632         }
2633
2634         return false;
2635 }
2636
2637 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2638                                    bdaddr_t *bdaddr, u8 *name, u8 name_len)
2639 {
2640         struct discovery_state *discov = &hdev->discovery;
2641         struct inquiry_entry *e;
2642
2643 #ifdef TIZEN_BT
2644         /* Update the mgmt connected state if necessary. Be careful with
2645          * conn objects that exist but are not (yet) connected however.
2646          * Only those in BT_CONFIG or BT_CONNECTED states can be
2647          * considered connected.
2648          */
2649         if (conn &&
2650             (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) {
2651                 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2652                         mgmt_device_connected(hdev, conn, name, name_len);
2653                 else
2654                         mgmt_device_name_update(hdev, bdaddr, name, name_len);
2655         }
2656 #else
2657         if (conn &&
2658             (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2659             !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2660                 mgmt_device_connected(hdev, conn, name, name_len);
2661 #endif
2662
2663         if (discov->state == DISCOVERY_STOPPED)
2664                 return;
2665
2666         if (discov->state == DISCOVERY_STOPPING)
2667                 goto discov_complete;
2668
2669         if (discov->state != DISCOVERY_RESOLVING)
2670                 return;
2671
2672         e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2673         /* If the device was not found in a list of found devices names of which
2674          * are pending. there is no need to continue resolving a next name as it
2675          * will be done upon receiving another Remote Name Request Complete
2676          * Event */
2677         if (!e)
2678                 return;
2679
2680         list_del(&e->list);
2681
2682         e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2683         mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2684                          name, name_len);
2685
2686         if (hci_resolve_next_name(hdev))
2687                 return;
2688
2689 discov_complete:
2690         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2691 }
2692
2693 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2694 {
2695         struct hci_cp_remote_name_req *cp;
2696         struct hci_conn *conn;
2697
2698         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2699
2700         /* If successful wait for the name req complete event before
2701          * checking for the need to do authentication */
2702         if (!status)
2703                 return;
2704
2705         cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2706         if (!cp)
2707                 return;
2708
2709         hci_dev_lock(hdev);
2710
2711         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2712
2713         if (hci_dev_test_flag(hdev, HCI_MGMT))
2714                 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2715
2716         if (!conn)
2717                 goto unlock;
2718
2719         if (!hci_outgoing_auth_needed(hdev, conn))
2720                 goto unlock;
2721
2722         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2723                 struct hci_cp_auth_requested auth_cp;
2724
2725                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2726
2727                 auth_cp.handle = __cpu_to_le16(conn->handle);
2728                 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2729                              sizeof(auth_cp), &auth_cp);
2730         }
2731
2732 unlock:
2733         hci_dev_unlock(hdev);
2734 }
2735
2736 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2737 {
2738         struct hci_cp_read_remote_features *cp;
2739         struct hci_conn *conn;
2740
2741         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2742
2743         if (!status)
2744                 return;
2745
2746         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2747         if (!cp)
2748                 return;
2749
2750         hci_dev_lock(hdev);
2751
2752         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2753         if (conn) {
2754                 if (conn->state == BT_CONFIG) {
2755                         hci_connect_cfm(conn, status);
2756                         hci_conn_drop(conn);
2757                 }
2758         }
2759
2760         hci_dev_unlock(hdev);
2761 }
2762
2763 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2764 {
2765         struct hci_cp_read_remote_ext_features *cp;
2766         struct hci_conn *conn;
2767
2768         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2769
2770         if (!status)
2771                 return;
2772
2773         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2774         if (!cp)
2775                 return;
2776
2777         hci_dev_lock(hdev);
2778
2779         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2780         if (conn) {
2781                 if (conn->state == BT_CONFIG) {
2782                         hci_connect_cfm(conn, status);
2783                         hci_conn_drop(conn);
2784                 }
2785         }
2786
2787         hci_dev_unlock(hdev);
2788 }
2789
2790 static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle,
2791                                        __u8 status)
2792 {
2793         struct hci_conn *acl;
2794         struct hci_link *link;
2795
2796         bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status);
2797
2798         hci_dev_lock(hdev);
2799
2800         acl = hci_conn_hash_lookup_handle(hdev, handle);
2801         if (acl) {
2802                 link = list_first_entry_or_null(&acl->link_list,
2803                                                 struct hci_link, list);
2804                 if (link && link->conn) {
2805                         link->conn->state = BT_CLOSED;
2806
2807                         hci_connect_cfm(link->conn, status);
2808                         hci_conn_del(link->conn);
2809                 }
2810         }
2811
2812         hci_dev_unlock(hdev);
2813 }
2814
2815 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2816 {
2817         struct hci_cp_setup_sync_conn *cp;
2818
2819         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2820
2821         if (!status)
2822                 return;
2823
2824         cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2825         if (!cp)
2826                 return;
2827
2828         hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2829 }
2830
2831 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2832 {
2833         struct hci_cp_enhanced_setup_sync_conn *cp;
2834
2835         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2836
2837         if (!status)
2838                 return;
2839
2840         cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2841         if (!cp)
2842                 return;
2843
2844         hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2845 }
2846
2847 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2848 {
2849         struct hci_cp_sniff_mode *cp;
2850         struct hci_conn *conn;
2851
2852         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2853
2854         if (!status)
2855                 return;
2856
2857         cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2858         if (!cp)
2859                 return;
2860
2861         hci_dev_lock(hdev);
2862
2863         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2864         if (conn) {
2865                 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2866
2867                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2868                         hci_sco_setup(conn, status);
2869         }
2870
2871         hci_dev_unlock(hdev);
2872 }
2873
2874 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2875 {
2876         struct hci_cp_exit_sniff_mode *cp;
2877         struct hci_conn *conn;
2878
2879         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2880
2881         if (!status)
2882                 return;
2883
2884         cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2885         if (!cp)
2886                 return;
2887
2888         hci_dev_lock(hdev);
2889
2890         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2891         if (conn) {
2892                 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2893
2894                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2895                         hci_sco_setup(conn, status);
2896         }
2897
2898         hci_dev_unlock(hdev);
2899 }
2900
2901 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2902 {
2903         struct hci_cp_disconnect *cp;
2904         struct hci_conn_params *params;
2905         struct hci_conn *conn;
2906         bool mgmt_conn;
2907
2908         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2909
2910         /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2911          * otherwise cleanup the connection immediately.
2912          */
2913         if (!status && !hdev->suspended)
2914                 return;
2915
2916         cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2917         if (!cp)
2918                 return;
2919
2920         hci_dev_lock(hdev);
2921
2922         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2923         if (!conn)
2924                 goto unlock;
2925
2926         if (status) {
2927                 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2928                                        conn->dst_type, status);
2929
2930                 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2931                         hdev->cur_adv_instance = conn->adv_instance;
2932                         hci_enable_advertising(hdev);
2933                 }
2934
2935                 /* Inform sockets conn is gone before we delete it */
2936                 hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
2937
2938                 goto done;
2939         }
2940
2941         mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2942
2943         if (conn->type == ACL_LINK) {
2944                 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2945                         hci_remove_link_key(hdev, &conn->dst);
2946         }
2947
2948         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2949         if (params) {
2950                 switch (params->auto_connect) {
2951                 case HCI_AUTO_CONN_LINK_LOSS:
2952                         if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2953                                 break;
2954                         fallthrough;
2955
2956                 case HCI_AUTO_CONN_DIRECT:
2957                 case HCI_AUTO_CONN_ALWAYS:
2958                         hci_pend_le_list_del_init(params);
2959                         hci_pend_le_list_add(params, &hdev->pend_le_conns);
2960                         break;
2961
2962                 default:
2963                         break;
2964                 }
2965         }
2966
2967         mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2968                                  cp->reason, mgmt_conn);
2969
2970         hci_disconn_cfm(conn, cp->reason);
2971
2972 done:
2973         /* If the disconnection failed for any reason, the upper layer
2974          * does not retry to disconnect in current implementation.
2975          * Hence, we need to do some basic cleanup here and re-enable
2976          * advertising if necessary.
2977          */
2978         hci_conn_del(conn);
2979 unlock:
2980         hci_dev_unlock(hdev);
2981 }
2982
2983 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2984 {
2985         /* When using controller based address resolution, then the new
2986          * address types 0x02 and 0x03 are used. These types need to be
2987          * converted back into either public address or random address type
2988          */
2989         switch (type) {
2990         case ADDR_LE_DEV_PUBLIC_RESOLVED:
2991                 if (resolved)
2992                         *resolved = true;
2993                 return ADDR_LE_DEV_PUBLIC;
2994         case ADDR_LE_DEV_RANDOM_RESOLVED:
2995                 if (resolved)
2996                         *resolved = true;
2997                 return ADDR_LE_DEV_RANDOM;
2998         }
2999
3000         if (resolved)
3001                 *resolved = false;
3002         return type;
3003 }
3004
3005 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
3006                               u8 peer_addr_type, u8 own_address_type,
3007                               u8 filter_policy)
3008 {
3009         struct hci_conn *conn;
3010
3011         conn = hci_conn_hash_lookup_le(hdev, peer_addr,
3012                                        peer_addr_type);
3013         if (!conn)
3014                 return;
3015
3016         own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
3017
3018         /* Store the initiator and responder address information which
3019          * is needed for SMP. These values will not change during the
3020          * lifetime of the connection.
3021          */
3022         conn->init_addr_type = own_address_type;
3023         if (own_address_type == ADDR_LE_DEV_RANDOM)
3024                 bacpy(&conn->init_addr, &hdev->random_addr);
3025         else
3026                 bacpy(&conn->init_addr, &hdev->bdaddr);
3027
3028         conn->resp_addr_type = peer_addr_type;
3029         bacpy(&conn->resp_addr, peer_addr);
3030 }
3031
3032 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
3033 {
3034         struct hci_cp_le_create_conn *cp;
3035
3036         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3037
3038         /* All connection failure handling is taken care of by the
3039          * hci_conn_failed function which is triggered by the HCI
3040          * request completion callbacks used for connecting.
3041          */
3042         if (status)
3043                 return;
3044
3045         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
3046         if (!cp)
3047                 return;
3048
3049         hci_dev_lock(hdev);
3050
3051         cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
3052                           cp->own_address_type, cp->filter_policy);
3053
3054         hci_dev_unlock(hdev);
3055 }
3056
3057 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
3058 {
3059         struct hci_cp_le_ext_create_conn *cp;
3060
3061         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3062
3063         /* All connection failure handling is taken care of by the
3064          * hci_conn_failed function which is triggered by the HCI
3065          * request completion callbacks used for connecting.
3066          */
3067         if (status)
3068                 return;
3069
3070         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
3071         if (!cp)
3072                 return;
3073
3074         hci_dev_lock(hdev);
3075
3076         cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
3077                           cp->own_addr_type, cp->filter_policy);
3078
3079         hci_dev_unlock(hdev);
3080 }
3081
3082 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
3083 {
3084         struct hci_cp_le_read_remote_features *cp;
3085         struct hci_conn *conn;
3086
3087         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3088
3089         if (!status)
3090                 return;
3091
3092         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
3093         if (!cp)
3094                 return;
3095
3096         hci_dev_lock(hdev);
3097
3098         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3099         if (conn) {
3100                 if (conn->state == BT_CONFIG) {
3101                         hci_connect_cfm(conn, status);
3102                         hci_conn_drop(conn);
3103                 }
3104         }
3105
3106         hci_dev_unlock(hdev);
3107 }
3108
3109 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
3110 {
3111         struct hci_cp_le_start_enc *cp;
3112         struct hci_conn *conn;
3113
3114         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3115
3116         if (!status)
3117                 return;
3118
3119         hci_dev_lock(hdev);
3120
3121         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
3122         if (!cp)
3123                 goto unlock;
3124
3125         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3126         if (!conn)
3127                 goto unlock;
3128
3129         if (conn->state != BT_CONNECTED)
3130                 goto unlock;
3131
3132         hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3133         hci_conn_drop(conn);
3134
3135 unlock:
3136         hci_dev_unlock(hdev);
3137 }
3138
3139 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
3140 {
3141         struct hci_cp_switch_role *cp;
3142         struct hci_conn *conn;
3143
3144         BT_DBG("%s status 0x%2.2x", hdev->name, status);
3145
3146         if (!status)
3147                 return;
3148
3149         cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
3150         if (!cp)
3151                 return;
3152
3153         hci_dev_lock(hdev);
3154
3155         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
3156         if (conn)
3157                 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3158
3159         hci_dev_unlock(hdev);
3160 }
3161
3162 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
3163                                      struct sk_buff *skb)
3164 {
3165         struct hci_ev_status *ev = data;
3166         struct discovery_state *discov = &hdev->discovery;
3167         struct inquiry_entry *e;
3168
3169         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3170
3171         hci_conn_check_pending(hdev);
3172
3173         if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
3174                 return;
3175
3176         smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
3177         wake_up_bit(&hdev->flags, HCI_INQUIRY);
3178
3179         if (!hci_dev_test_flag(hdev, HCI_MGMT))
3180                 return;
3181
3182         hci_dev_lock(hdev);
3183
3184         if (discov->state != DISCOVERY_FINDING)
3185                 goto unlock;
3186
3187         if (list_empty(&discov->resolve)) {
3188                 /* When BR/EDR inquiry is active and no LE scanning is in
3189                  * progress, then change discovery state to indicate completion.
3190                  *
3191                  * When running LE scanning and BR/EDR inquiry simultaneously
3192                  * and the LE scan already finished, then change the discovery
3193                  * state to indicate completion.
3194                  */
3195                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3196                     !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3197                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3198                 goto unlock;
3199         }
3200
3201         e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3202         if (e && hci_resolve_name(hdev, e) == 0) {
3203                 e->name_state = NAME_PENDING;
3204                 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3205                 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3206         } else {
3207                 /* When BR/EDR inquiry is active and no LE scanning is in
3208                  * progress, then change discovery state to indicate completion.
3209                  *
3210                  * When running LE scanning and BR/EDR inquiry simultaneously
3211                  * and the LE scan already finished, then change the discovery
3212                  * state to indicate completion.
3213                  */
3214                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3215                     !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3216                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3217         }
3218
3219 unlock:
3220         hci_dev_unlock(hdev);
3221 }
3222
3223 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3224                                    struct sk_buff *skb)
3225 {
3226         struct hci_ev_inquiry_result *ev = edata;
3227         struct inquiry_data data;
3228         int i;
3229
3230         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3231                              flex_array_size(ev, info, ev->num)))
3232                 return;
3233
3234         bt_dev_dbg(hdev, "num %d", ev->num);
3235
3236         if (!ev->num)
3237                 return;
3238
3239         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3240                 return;
3241
3242         hci_dev_lock(hdev);
3243
3244         for (i = 0; i < ev->num; i++) {
3245                 struct inquiry_info *info = &ev->info[i];
3246                 u32 flags;
3247
3248                 bacpy(&data.bdaddr, &info->bdaddr);
3249                 data.pscan_rep_mode     = info->pscan_rep_mode;
3250                 data.pscan_period_mode  = info->pscan_period_mode;
3251                 data.pscan_mode         = info->pscan_mode;
3252                 memcpy(data.dev_class, info->dev_class, 3);
3253                 data.clock_offset       = info->clock_offset;
3254                 data.rssi               = HCI_RSSI_INVALID;
3255                 data.ssp_mode           = 0x00;
3256
3257                 flags = hci_inquiry_cache_update(hdev, &data, false);
3258
3259                 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3260                                   info->dev_class, HCI_RSSI_INVALID,
3261                                   flags, NULL, 0, NULL, 0, 0);
3262         }
3263
3264         hci_dev_unlock(hdev);
3265 }
3266
3267 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3268                                   struct sk_buff *skb)
3269 {
3270         struct hci_ev_conn_complete *ev = data;
3271         struct hci_conn *conn;
3272         u8 status = ev->status;
3273
3274         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3275
3276         hci_dev_lock(hdev);
3277
3278         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3279         if (!conn) {
3280                 /* In case of error status and there is no connection pending
3281                  * just unlock as there is nothing to cleanup.
3282                  */
3283                 if (ev->status)
3284                         goto unlock;
3285
3286                 /* Connection may not exist if auto-connected. Check the bredr
3287                  * allowlist to see if this device is allowed to auto connect.
3288                  * If link is an ACL type, create a connection class
3289                  * automatically.
3290                  *
3291                  * Auto-connect will only occur if the event filter is
3292                  * programmed with a given address. Right now, event filter is
3293                  * only used during suspend.
3294                  */
3295                 if (ev->link_type == ACL_LINK &&
3296                     hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3297                                                       &ev->bdaddr,
3298                                                       BDADDR_BREDR)) {
3299                         conn = hci_conn_add_unset(hdev, ev->link_type,
3300                                                   &ev->bdaddr, HCI_ROLE_SLAVE);
3301                         if (!conn) {
3302                                 bt_dev_err(hdev, "no memory for new conn");
3303                                 goto unlock;
3304                         }
3305                 } else {
3306                         if (ev->link_type != SCO_LINK)
3307                                 goto unlock;
3308
3309                         conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3310                                                        &ev->bdaddr);
3311                         if (!conn)
3312                                 goto unlock;
3313
3314                         conn->type = SCO_LINK;
3315                 }
3316         }
3317
3318         /* The HCI_Connection_Complete event is only sent once per connection.
3319          * Processing it more than once per connection can corrupt kernel memory.
3320          *
3321          * As the connection handle is set here for the first time, it indicates
3322          * whether the connection is already set up.
3323          */
3324         if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
3325                 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3326                 goto unlock;
3327         }
3328
3329         if (!status) {
3330                 status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
3331                 if (status)
3332                         goto done;
3333
3334                 if (conn->type == ACL_LINK) {
3335                         conn->state = BT_CONFIG;
3336                         hci_conn_hold(conn);
3337
3338                         if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3339                             !hci_find_link_key(hdev, &ev->bdaddr))
3340                                 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3341                         else
3342                                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3343                 } else
3344                         conn->state = BT_CONNECTED;
3345
3346                 hci_debugfs_create_conn(conn);
3347                 hci_conn_add_sysfs(conn);
3348
3349                 if (test_bit(HCI_AUTH, &hdev->flags))
3350                         set_bit(HCI_CONN_AUTH, &conn->flags);
3351
3352                 if (test_bit(HCI_ENCRYPT, &hdev->flags))
3353                         set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3354
3355                 /* Get remote features */
3356                 if (conn->type == ACL_LINK) {
3357                         struct hci_cp_read_remote_features cp;
3358                         cp.handle = ev->handle;
3359                         hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3360                                      sizeof(cp), &cp);
3361
3362                         hci_update_scan(hdev);
3363                 }
3364
3365                 /* Set packet type for incoming connection */
3366                 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3367                         struct hci_cp_change_conn_ptype cp;
3368                         cp.handle = ev->handle;
3369                         cp.pkt_type = cpu_to_le16(conn->pkt_type);
3370                         hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3371                                      &cp);
3372                 }
3373
3374 #ifdef TIZEN_BT
3375                 if (get_link_mode(conn) & HCI_LM_MASTER)
3376                         hci_conn_change_supervision_timeout(conn,
3377                                         LINK_SUPERVISION_TIMEOUT);
3378 #endif
3379         }
3380
3381         if (conn->type == ACL_LINK)
3382                 hci_sco_setup(conn, ev->status);
3383
3384 done:
3385         if (status) {
3386                 hci_conn_failed(conn, status);
3387         } else if (ev->link_type == SCO_LINK) {
3388                 switch (conn->setting & SCO_AIRMODE_MASK) {
3389                 case SCO_AIRMODE_CVSD:
3390                         if (hdev->notify)
3391                                 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3392                         break;
3393                 }
3394
3395                 hci_connect_cfm(conn, status);
3396         }
3397
3398 unlock:
3399         hci_dev_unlock(hdev);
3400
3401         hci_conn_check_pending(hdev);
3402 }
3403
3404 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3405 {
3406         struct hci_cp_reject_conn_req cp;
3407
3408         bacpy(&cp.bdaddr, bdaddr);
3409         cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3410         hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3411 }
3412
3413 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3414                                  struct sk_buff *skb)
3415 {
3416         struct hci_ev_conn_request *ev = data;
3417         int mask = hdev->link_mode;
3418         struct inquiry_entry *ie;
3419         struct hci_conn *conn;
3420         __u8 flags = 0;
3421
3422         bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3423
3424         /* Reject incoming connection from device with same BD ADDR against
3425          * CVE-2020-26555
3426          */
3427         if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
3428                 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
3429                            &ev->bdaddr);
3430                 hci_reject_conn(hdev, &ev->bdaddr);
3431                 return;
3432         }
3433
3434         mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3435                                       &flags);
3436
3437         if (!(mask & HCI_LM_ACCEPT)) {
3438                 hci_reject_conn(hdev, &ev->bdaddr);
3439                 return;
3440         }
3441
3442         hci_dev_lock(hdev);
3443
3444         if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3445                                    BDADDR_BREDR)) {
3446                 hci_reject_conn(hdev, &ev->bdaddr);
3447                 goto unlock;
3448         }
3449
3450         /* Require HCI_CONNECTABLE or an accept list entry to accept the
3451          * connection. These features are only touched through mgmt so
3452          * only do the checks if HCI_MGMT is set.
3453          */
3454         if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3455             !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3456             !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3457                                                BDADDR_BREDR)) {
3458                 hci_reject_conn(hdev, &ev->bdaddr);
3459                 goto unlock;
3460         }
3461
3462         /* Connection accepted */
3463
3464         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3465         if (ie)
3466                 memcpy(ie->data.dev_class, ev->dev_class, 3);
3467
3468 #ifdef TIZEN_BT
3469                 if ((ev->link_type == SCO_LINK || ev->link_type == ESCO_LINK) &&
3470                     hci_conn_hash_lookup_sco(hdev)) {
3471                         struct hci_cp_reject_conn_req cp;
3472
3473                         bacpy(&cp.bdaddr, &ev->bdaddr);
3474                         cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
3475                         hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ,
3476                                      sizeof(cp), &cp);
3477                         hci_dev_unlock(hdev);
3478                         return;
3479                 }
3480 #endif
3481
3482         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3483                         &ev->bdaddr);
3484         if (!conn) {
3485                 conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
3486                                           HCI_ROLE_SLAVE);
3487                 if (!conn) {
3488                         bt_dev_err(hdev, "no memory for new connection");
3489                         goto unlock;
3490                 }
3491         }
3492
3493         memcpy(conn->dev_class, ev->dev_class, 3);
3494
3495         hci_dev_unlock(hdev);
3496
3497         if (ev->link_type == ACL_LINK ||
3498             (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3499                 struct hci_cp_accept_conn_req cp;
3500                 conn->state = BT_CONNECT;
3501
3502                 bacpy(&cp.bdaddr, &ev->bdaddr);
3503
3504                 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3505                         cp.role = 0x00; /* Become central */
3506                 else
3507                         cp.role = 0x01; /* Remain peripheral */
3508
3509                 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3510         } else if (!(flags & HCI_PROTO_DEFER)) {
3511                 struct hci_cp_accept_sync_conn_req cp;
3512                 conn->state = BT_CONNECT;
3513
3514                 bacpy(&cp.bdaddr, &ev->bdaddr);
3515                 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3516
3517                 cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
3518                 cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
3519                 cp.max_latency    = cpu_to_le16(0xffff);
3520                 cp.content_format = cpu_to_le16(hdev->voice_setting);
3521                 cp.retrans_effort = 0xff;
3522
3523                 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3524                              &cp);
3525         } else {
3526                 conn->state = BT_CONNECT2;
3527                 hci_connect_cfm(conn, 0);
3528         }
3529
3530         return;
3531 unlock:
3532         hci_dev_unlock(hdev);
3533 }
3534
3535 static u8 hci_to_mgmt_reason(u8 err)
3536 {
3537         switch (err) {
3538         case HCI_ERROR_CONNECTION_TIMEOUT:
3539                 return MGMT_DEV_DISCONN_TIMEOUT;
3540         case HCI_ERROR_REMOTE_USER_TERM:
3541         case HCI_ERROR_REMOTE_LOW_RESOURCES:
3542         case HCI_ERROR_REMOTE_POWER_OFF:
3543                 return MGMT_DEV_DISCONN_REMOTE;
3544         case HCI_ERROR_LOCAL_HOST_TERM:
3545                 return MGMT_DEV_DISCONN_LOCAL_HOST;
3546         default:
3547                 return MGMT_DEV_DISCONN_UNKNOWN;
3548         }
3549 }
3550
3551 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3552                                      struct sk_buff *skb)
3553 {
3554         struct hci_ev_disconn_complete *ev = data;
3555         u8 reason;
3556         struct hci_conn_params *params;
3557         struct hci_conn *conn;
3558         bool mgmt_connected;
3559
3560         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3561
3562         hci_dev_lock(hdev);
3563
3564         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3565         if (!conn)
3566                 goto unlock;
3567
3568         if (ev->status) {
3569                 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3570                                        conn->dst_type, ev->status);
3571                 goto unlock;
3572         }
3573
3574         conn->state = BT_CLOSED;
3575
3576         mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3577
3578         if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3579                 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3580         else
3581                 reason = hci_to_mgmt_reason(ev->reason);
3582
3583         mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3584                                 reason, mgmt_connected);
3585
3586         if (conn->type == ACL_LINK) {
3587                 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3588                         hci_remove_link_key(hdev, &conn->dst);
3589
3590                 hci_update_scan(hdev);
3591         }
3592
3593         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3594         if (params) {
3595                 switch (params->auto_connect) {
3596                 case HCI_AUTO_CONN_LINK_LOSS:
3597                         if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3598                                 break;
3599                         fallthrough;
3600
3601                 case HCI_AUTO_CONN_DIRECT:
3602                 case HCI_AUTO_CONN_ALWAYS:
3603                         hci_pend_le_list_del_init(params);
3604                         hci_pend_le_list_add(params, &hdev->pend_le_conns);
3605                         hci_update_passive_scan(hdev);
3606                         break;
3607
3608                 default:
3609                         break;
3610                 }
3611         }
3612
3613         hci_disconn_cfm(conn, ev->reason);
3614
3615         /* Re-enable advertising if necessary, since it might
3616          * have been disabled by the connection. From the
3617          * HCI_LE_Set_Advertise_Enable command description in
3618          * the core specification (v4.0):
3619          * "The Controller shall continue advertising until the Host
3620          * issues an LE_Set_Advertise_Enable command with
3621          * Advertising_Enable set to 0x00 (Advertising is disabled)
3622          * or until a connection is created or until the Advertising
3623          * is timed out due to Directed Advertising."
3624          */
3625         if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3626                 hdev->cur_adv_instance = conn->adv_instance;
3627                 hci_enable_advertising(hdev);
3628         }
3629
3630         hci_conn_del(conn);
3631
3632 #ifdef TIZEN_BT
3633         if (conn->type == ACL_LINK && !hci_conn_num(hdev, ACL_LINK)) {
3634                 int iscan;
3635                 int pscan;
3636
3637                 iscan = test_bit(HCI_ISCAN, &hdev->flags);
3638                 pscan = test_bit(HCI_PSCAN, &hdev->flags);
3639                 if (!iscan && !pscan) {
3640                         u8 scan_enable = SCAN_PAGE;
3641
3642                         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE,
3643                                      sizeof(scan_enable), &scan_enable);
3644                 }
3645         }
3646 #endif
3647
3648 unlock:
3649         hci_dev_unlock(hdev);
3650 }
3651
3652 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3653                                   struct sk_buff *skb)
3654 {
3655         struct hci_ev_auth_complete *ev = data;
3656         struct hci_conn *conn;
3657
3658         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3659
3660         hci_dev_lock(hdev);
3661
3662         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3663         if (!conn)
3664                 goto unlock;
3665
3666         if (!ev->status) {
3667                 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3668                 set_bit(HCI_CONN_AUTH, &conn->flags);
3669                 conn->sec_level = conn->pending_sec_level;
3670         } else {
3671                 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3672                         set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3673
3674                 mgmt_auth_failed(conn, ev->status);
3675         }
3676
3677         clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3678
3679         if (conn->state == BT_CONFIG) {
3680                 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3681                         struct hci_cp_set_conn_encrypt cp;
3682                         cp.handle  = ev->handle;
3683                         cp.encrypt = 0x01;
3684                         hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3685                                      &cp);
3686                 } else {
3687                         conn->state = BT_CONNECTED;
3688                         hci_connect_cfm(conn, ev->status);
3689                         hci_conn_drop(conn);
3690                 }
3691         } else {
3692                 hci_auth_cfm(conn, ev->status);
3693
3694                 hci_conn_hold(conn);
3695                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3696                 hci_conn_drop(conn);
3697         }
3698
3699         if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3700                 if (!ev->status) {
3701                         struct hci_cp_set_conn_encrypt cp;
3702                         cp.handle  = ev->handle;
3703                         cp.encrypt = 0x01;
3704                         hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3705                                      &cp);
3706                 } else {
3707                         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3708                         hci_encrypt_cfm(conn, ev->status);
3709                 }
3710         }
3711
3712 unlock:
3713         hci_dev_unlock(hdev);
3714 }
3715
3716 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3717                                 struct sk_buff *skb)
3718 {
3719         struct hci_ev_remote_name *ev = data;
3720         struct hci_conn *conn;
3721
3722         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3723
3724         hci_conn_check_pending(hdev);
3725
3726         hci_dev_lock(hdev);
3727
3728         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3729
3730         if (!hci_dev_test_flag(hdev, HCI_MGMT))
3731                 goto check_auth;
3732
3733         if (ev->status == 0)
3734                 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3735                                        strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3736         else
3737                 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3738
3739 check_auth:
3740         if (!conn)
3741                 goto unlock;
3742
3743         if (!hci_outgoing_auth_needed(hdev, conn))
3744                 goto unlock;
3745
3746         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3747                 struct hci_cp_auth_requested cp;
3748
3749                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3750
3751                 cp.handle = __cpu_to_le16(conn->handle);
3752                 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3753         }
3754
3755 unlock:
3756         hci_dev_unlock(hdev);
3757 }
3758
3759 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3760                                    struct sk_buff *skb)
3761 {
3762         struct hci_ev_encrypt_change *ev = data;
3763         struct hci_conn *conn;
3764
3765         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3766
3767         hci_dev_lock(hdev);
3768
3769         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3770         if (!conn)
3771                 goto unlock;
3772
3773         if (!ev->status) {
3774                 if (ev->encrypt) {
3775                         /* Encryption implies authentication */
3776                         set_bit(HCI_CONN_AUTH, &conn->flags);
3777                         set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3778                         conn->sec_level = conn->pending_sec_level;
3779
3780                         /* P-256 authentication key implies FIPS */
3781                         if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3782                                 set_bit(HCI_CONN_FIPS, &conn->flags);
3783
3784                         if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3785                             conn->type == LE_LINK)
3786                                 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3787                 } else {
3788                         clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3789                         clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3790                 }
3791         }
3792
3793         /* We should disregard the current RPA and generate a new one
3794          * whenever the encryption procedure fails.
3795          */
3796         if (ev->status && conn->type == LE_LINK) {
3797                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3798                 hci_adv_instances_set_rpa_expired(hdev, true);
3799         }
3800
3801         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3802
3803         /* Check link security requirements are met */
3804         if (!hci_conn_check_link_mode(conn))
3805                 ev->status = HCI_ERROR_AUTH_FAILURE;
3806
3807         if (ev->status && conn->state == BT_CONNECTED) {
3808                 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3809                         set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3810
3811                 /* Notify upper layers so they can cleanup before
3812                  * disconnecting.
3813                  */
3814                 hci_encrypt_cfm(conn, ev->status);
3815                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3816                 hci_conn_drop(conn);
3817                 goto unlock;
3818         }
3819
3820         /* Try reading the encryption key size for encrypted ACL links */
3821         if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3822                 struct hci_cp_read_enc_key_size cp;
3823
3824                 /* Only send HCI_Read_Encryption_Key_Size if the
3825                  * controller really supports it. If it doesn't, assume
3826                  * the default size (16).
3827                  */
3828                 if (!(hdev->commands[20] & 0x10)) {
3829                         conn->enc_key_size = HCI_LINK_KEY_SIZE;
3830                         goto notify;
3831                 }
3832
3833                 cp.handle = cpu_to_le16(conn->handle);
3834                 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3835                                  sizeof(cp), &cp)) {
3836                         bt_dev_err(hdev, "sending read key size failed");
3837                         conn->enc_key_size = HCI_LINK_KEY_SIZE;
3838                         goto notify;
3839                 }
3840
3841                 goto unlock;
3842         }
3843
3844         /* Set the default Authenticated Payload Timeout after
3845          * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3846          * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3847          * sent when the link is active and Encryption is enabled, the conn
3848          * type can be either LE or ACL and controller must support LMP Ping.
3849          * Ensure for AES-CCM encryption as well.
3850          */
3851         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3852             test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3853             ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3854              (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3855                 struct hci_cp_write_auth_payload_to cp;
3856
3857                 cp.handle = cpu_to_le16(conn->handle);
3858                 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3859                 if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3860                                  sizeof(cp), &cp))
3861                         bt_dev_err(hdev, "write auth payload timeout failed");
3862         }
3863
3864 notify:
3865         hci_encrypt_cfm(conn, ev->status);
3866
3867 unlock:
3868         hci_dev_unlock(hdev);
3869 }
3870
3871 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3872                                              struct sk_buff *skb)
3873 {
3874         struct hci_ev_change_link_key_complete *ev = data;
3875         struct hci_conn *conn;
3876
3877         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3878
3879         hci_dev_lock(hdev);
3880
3881         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3882         if (conn) {
3883                 if (!ev->status)
3884                         set_bit(HCI_CONN_SECURE, &conn->flags);
3885
3886                 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3887
3888                 hci_key_change_cfm(conn, ev->status);
3889         }
3890
3891         hci_dev_unlock(hdev);
3892 }
3893
3894 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3895                                     struct sk_buff *skb)
3896 {
3897         struct hci_ev_remote_features *ev = data;
3898         struct hci_conn *conn;
3899
3900         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3901
3902         hci_dev_lock(hdev);
3903
3904         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3905         if (!conn)
3906                 goto unlock;
3907
3908         if (!ev->status)
3909                 memcpy(conn->features[0], ev->features, 8);
3910
3911         if (conn->state != BT_CONFIG)
3912                 goto unlock;
3913
3914         if (!ev->status && lmp_ext_feat_capable(hdev) &&
3915             lmp_ext_feat_capable(conn)) {
3916                 struct hci_cp_read_remote_ext_features cp;
3917                 cp.handle = ev->handle;
3918                 cp.page = 0x01;
3919                 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3920                              sizeof(cp), &cp);
3921                 goto unlock;
3922         }
3923
3924         if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3925                 struct hci_cp_remote_name_req cp;
3926                 memset(&cp, 0, sizeof(cp));
3927                 bacpy(&cp.bdaddr, &conn->dst);
3928                 cp.pscan_rep_mode = 0x02;
3929                 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3930         } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3931                 mgmt_device_connected(hdev, conn, NULL, 0);
3932
3933         if (!hci_outgoing_auth_needed(hdev, conn)) {
3934                 conn->state = BT_CONNECTED;
3935                 hci_connect_cfm(conn, ev->status);
3936                 hci_conn_drop(conn);
3937         }
3938
3939 unlock:
3940         hci_dev_unlock(hdev);
3941 }
3942
3943 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3944 {
3945         cancel_delayed_work(&hdev->cmd_timer);
3946
3947         rcu_read_lock();
3948         if (!test_bit(HCI_RESET, &hdev->flags)) {
3949                 if (ncmd) {
3950                         cancel_delayed_work(&hdev->ncmd_timer);
3951                         atomic_set(&hdev->cmd_cnt, 1);
3952                 } else {
3953                         if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3954                                 queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3955                                                    HCI_NCMD_TIMEOUT);
3956                 }
3957         }
3958         rcu_read_unlock();
3959 }
3960
3961 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3962                                         struct sk_buff *skb)
3963 {
3964         struct hci_rp_le_read_buffer_size_v2 *rp = data;
3965
3966         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3967
3968         if (rp->status)
3969                 return rp->status;
3970
3971         hdev->le_mtu   = __le16_to_cpu(rp->acl_mtu);
3972         hdev->le_pkts  = rp->acl_max_pkt;
3973         hdev->iso_mtu  = __le16_to_cpu(rp->iso_mtu);
3974         hdev->iso_pkts = rp->iso_max_pkt;
3975
3976         hdev->le_cnt  = hdev->le_pkts;
3977         hdev->iso_cnt = hdev->iso_pkts;
3978
3979         BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3980                hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3981
3982         return rp->status;
3983 }
3984
3985 static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status)
3986 {
3987         struct hci_conn *conn, *tmp;
3988
3989         lockdep_assert_held(&hdev->lock);
3990
3991         list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
3992                 if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY) ||
3993                     conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig)
3994                         continue;
3995
3996                 if (HCI_CONN_HANDLE_UNSET(conn->handle))
3997                         hci_conn_failed(conn, status);
3998         }
3999 }
4000
4001 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
4002                                    struct sk_buff *skb)
4003 {
4004         struct hci_rp_le_set_cig_params *rp = data;
4005         struct hci_cp_le_set_cig_params *cp;
4006         struct hci_conn *conn;
4007         u8 status = rp->status;
4008         bool pending = false;
4009         int i;
4010
4011         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4012
4013         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS);
4014         if (!rp->status && (!cp || rp->num_handles != cp->num_cis ||
4015                             rp->cig_id != cp->cig_id)) {
4016                 bt_dev_err(hdev, "unexpected Set CIG Parameters response data");
4017                 status = HCI_ERROR_UNSPECIFIED;
4018         }
4019
4020         hci_dev_lock(hdev);
4021
4022         /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554
4023          *
4024          * If the Status return parameter is non-zero, then the state of the CIG
4025          * and its CIS configurations shall not be changed by the command. If
4026          * the CIG did not already exist, it shall not be created.
4027          */
4028         if (status) {
4029                 /* Keep current configuration, fail only the unbound CIS */
4030                 hci_unbound_cis_failed(hdev, rp->cig_id, status);
4031                 goto unlock;
4032         }
4033
4034         /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553
4035          *
4036          * If the Status return parameter is zero, then the Controller shall
4037          * set the Connection_Handle arrayed return parameter to the connection
4038          * handle(s) corresponding to the CIS configurations specified in
4039          * the CIS_IDs command parameter, in the same order.
4040          */
4041         for (i = 0; i < rp->num_handles; ++i) {
4042                 conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id,
4043                                                 cp->cis[i].cis_id);
4044                 if (!conn || !bacmp(&conn->dst, BDADDR_ANY))
4045                         continue;
4046
4047                 if (conn->state != BT_BOUND && conn->state != BT_CONNECT)
4048                         continue;
4049
4050                 if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i])))
4051                         continue;
4052
4053                 if (conn->state == BT_CONNECT)
4054                         pending = true;
4055         }
4056
4057 unlock:
4058         if (pending)
4059                 hci_le_create_cis_pending(hdev);
4060
4061         hci_dev_unlock(hdev);
4062
4063         return rp->status;
4064 }
4065
4066 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
4067                                    struct sk_buff *skb)
4068 {
4069         struct hci_rp_le_setup_iso_path *rp = data;
4070         struct hci_cp_le_setup_iso_path *cp;
4071         struct hci_conn *conn;
4072
4073         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4074
4075         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
4076         if (!cp)
4077                 return rp->status;
4078
4079         hci_dev_lock(hdev);
4080
4081         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
4082         if (!conn)
4083                 goto unlock;
4084
4085         if (rp->status) {
4086                 hci_connect_cfm(conn, rp->status);
4087                 hci_conn_del(conn);
4088                 goto unlock;
4089         }
4090
4091         switch (cp->direction) {
4092         /* Input (Host to Controller) */
4093         case 0x00:
4094                 /* Only confirm connection if output only */
4095                 if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu)
4096                         hci_connect_cfm(conn, rp->status);
4097                 break;
4098         /* Output (Controller to Host) */
4099         case 0x01:
4100                 /* Confirm connection since conn->iso_qos is always configured
4101                  * last.
4102                  */
4103                 hci_connect_cfm(conn, rp->status);
4104                 break;
4105         }
4106
4107 unlock:
4108         hci_dev_unlock(hdev);
4109         return rp->status;
4110 }
4111
4112 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
4113 {
4114         bt_dev_dbg(hdev, "status 0x%2.2x", status);
4115 }
4116
4117 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
4118                                    struct sk_buff *skb)
4119 {
4120         struct hci_ev_status *rp = data;
4121         struct hci_cp_le_set_per_adv_params *cp;
4122
4123         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4124
4125         if (rp->status)
4126                 return rp->status;
4127
4128         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
4129         if (!cp)
4130                 return rp->status;
4131
4132         /* TODO: set the conn state */
4133         return rp->status;
4134 }
4135
4136 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
4137                                        struct sk_buff *skb)
4138 {
4139         struct hci_ev_status *rp = data;
4140         struct hci_cp_le_set_per_adv_enable *cp;
4141         struct adv_info *adv = NULL, *n;
4142         u8 per_adv_cnt = 0;
4143
4144         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4145
4146         if (rp->status)
4147                 return rp->status;
4148
4149         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
4150         if (!cp)
4151                 return rp->status;
4152
4153         hci_dev_lock(hdev);
4154
4155         adv = hci_find_adv_instance(hdev, cp->handle);
4156
4157         if (cp->enable) {
4158                 hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
4159
4160                 if (adv)
4161                         adv->enabled = true;
4162         } else {
4163                 /* If just one instance was disabled check if there are
4164                  * any other instance enabled before clearing HCI_LE_PER_ADV.
4165                  * The current periodic adv instance will be marked as
4166                  * disabled once extended advertising is also disabled.
4167                  */
4168                 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
4169                                          list) {
4170                         if (adv->periodic && adv->enabled)
4171                                 per_adv_cnt++;
4172                 }
4173
4174                 if (per_adv_cnt > 1)
4175                         goto unlock;
4176
4177                 hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
4178         }
4179
4180 unlock:
4181         hci_dev_unlock(hdev);
4182
4183         return rp->status;
4184 }
4185
4186 #define HCI_CC_VL(_op, _func, _min, _max) \
4187 { \
4188         .op = _op, \
4189         .func = _func, \
4190         .min_len = _min, \
4191         .max_len = _max, \
4192 }
4193
4194 #define HCI_CC(_op, _func, _len) \
4195         HCI_CC_VL(_op, _func, _len, _len)
4196
4197 #define HCI_CC_STATUS(_op, _func) \
4198         HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4199
4200 static const struct hci_cc {
4201         u16  op;
4202         u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4203         u16  min_len;
4204         u16  max_len;
4205 } hci_cc_table[] = {
4206         HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4207         HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4208         HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4209         HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
4210                       hci_cc_remote_name_req_cancel),
4211         HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4212                sizeof(struct hci_rp_role_discovery)),
4213         HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4214                sizeof(struct hci_rp_read_link_policy)),
4215         HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4216                sizeof(struct hci_rp_write_link_policy)),
4217         HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4218                sizeof(struct hci_rp_read_def_link_policy)),
4219         HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4220                       hci_cc_write_def_link_policy),
4221         HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4222         HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4223                sizeof(struct hci_rp_read_stored_link_key)),
4224         HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4225                sizeof(struct hci_rp_delete_stored_link_key)),
4226         HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4227         HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4228                sizeof(struct hci_rp_read_local_name)),
4229         HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4230         HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4231         HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4232         HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4233         HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4234                sizeof(struct hci_rp_read_class_of_dev)),
4235         HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4236         HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4237                sizeof(struct hci_rp_read_voice_setting)),
4238         HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4239         HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4240                sizeof(struct hci_rp_read_num_supported_iac)),
4241         HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4242         HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4243         HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4244                sizeof(struct hci_rp_read_auth_payload_to)),
4245         HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4246                sizeof(struct hci_rp_write_auth_payload_to)),
4247         HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4248                sizeof(struct hci_rp_read_local_version)),
4249         HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4250                sizeof(struct hci_rp_read_local_commands)),
4251         HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4252                sizeof(struct hci_rp_read_local_features)),
4253         HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4254                sizeof(struct hci_rp_read_local_ext_features)),
4255         HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4256                sizeof(struct hci_rp_read_buffer_size)),
4257         HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4258                sizeof(struct hci_rp_read_bd_addr)),
4259         HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4260                sizeof(struct hci_rp_read_local_pairing_opts)),
4261         HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4262                sizeof(struct hci_rp_read_page_scan_activity)),
4263         HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4264                       hci_cc_write_page_scan_activity),
4265         HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4266                sizeof(struct hci_rp_read_page_scan_type)),
4267         HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4268         HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
4269                sizeof(struct hci_rp_read_data_block_size)),
4270         HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
4271                sizeof(struct hci_rp_read_flow_control_mode)),
4272         HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
4273                sizeof(struct hci_rp_read_local_amp_info)),
4274         HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4275                sizeof(struct hci_rp_read_clock)),
4276         HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4277                sizeof(struct hci_rp_read_enc_key_size)),
4278         HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4279                sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4280         HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4281                hci_cc_read_def_err_data_reporting,
4282                sizeof(struct hci_rp_read_def_err_data_reporting)),
4283         HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4284                       hci_cc_write_def_err_data_reporting),
4285         HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4286                sizeof(struct hci_rp_pin_code_reply)),
4287         HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4288                sizeof(struct hci_rp_pin_code_neg_reply)),
4289         HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4290                sizeof(struct hci_rp_read_local_oob_data)),
4291         HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4292                sizeof(struct hci_rp_read_local_oob_ext_data)),
4293         HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4294                sizeof(struct hci_rp_le_read_buffer_size)),
4295         HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4296                sizeof(struct hci_rp_le_read_local_features)),
4297         HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4298                sizeof(struct hci_rp_le_read_adv_tx_power)),
4299         HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4300                sizeof(struct hci_rp_user_confirm_reply)),
4301         HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4302                sizeof(struct hci_rp_user_confirm_reply)),
4303         HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4304                sizeof(struct hci_rp_user_confirm_reply)),
4305         HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4306                sizeof(struct hci_rp_user_confirm_reply)),
4307         HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4308         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4309         HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4310         HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4311         HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4312                hci_cc_le_read_accept_list_size,
4313                sizeof(struct hci_rp_le_read_accept_list_size)),
4314         HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4315         HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4316                       hci_cc_le_add_to_accept_list),
4317         HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4318                       hci_cc_le_del_from_accept_list),
4319         HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4320                sizeof(struct hci_rp_le_read_supported_states)),
4321         HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4322                sizeof(struct hci_rp_le_read_def_data_len)),
4323         HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4324                       hci_cc_le_write_def_data_len),
4325         HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4326                       hci_cc_le_add_to_resolv_list),
4327         HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4328                       hci_cc_le_del_from_resolv_list),
4329         HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4330                       hci_cc_le_clear_resolv_list),
4331         HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4332                sizeof(struct hci_rp_le_read_resolv_list_size)),
4333         HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4334                       hci_cc_le_set_addr_resolution_enable),
4335         HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4336                sizeof(struct hci_rp_le_read_max_data_len)),
4337         HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4338                       hci_cc_write_le_host_supported),
4339         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4340         HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4341                sizeof(struct hci_rp_read_rssi)),
4342         HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4343                sizeof(struct hci_rp_read_tx_power)),
4344         HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4345         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4346                       hci_cc_le_set_ext_scan_param),
4347         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4348                       hci_cc_le_set_ext_scan_enable),
4349         HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4350         HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4351                hci_cc_le_read_num_adv_sets,
4352                sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4353         HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4354                sizeof(struct hci_rp_le_set_ext_adv_params)),
4355         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4356                       hci_cc_le_set_ext_adv_enable),
4357         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4358                       hci_cc_le_set_adv_set_random_addr),
4359         HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4360         HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4361         HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4362         HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4363                       hci_cc_le_set_per_adv_enable),
4364         HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4365                sizeof(struct hci_rp_le_read_transmit_power)),
4366 #ifdef TIZEN_BT
4367         HCI_CC(HCI_OP_ENABLE_RSSI, hci_cc_enable_rssi,
4368                sizeof(struct hci_cc_rsp_enable_rssi)),
4369         HCI_CC(HCI_OP_GET_RAW_RSSI, hci_cc_get_raw_rssi,
4370                sizeof(struct hci_cc_rp_get_raw_rssi)),
4371 #endif
4372         HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4373         HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4374                sizeof(struct hci_rp_le_read_buffer_size_v2)),
4375         HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4376                   sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4377         HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4378                sizeof(struct hci_rp_le_setup_iso_path)),
4379 };
4380
4381 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4382                       struct sk_buff *skb)
4383 {
4384         void *data;
4385
4386         if (skb->len < cc->min_len) {
4387                 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4388                            cc->op, skb->len, cc->min_len);
4389                 return HCI_ERROR_UNSPECIFIED;
4390         }
4391
4392         /* Just warn if the length is over max_len size it still be possible to
4393          * partially parse the cc so leave to callback to decide if that is
4394          * acceptable.
4395          */
4396         if (skb->len > cc->max_len)
4397                 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4398                             cc->op, skb->len, cc->max_len);
4399
4400         data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4401         if (!data)
4402                 return HCI_ERROR_UNSPECIFIED;
4403
4404         return cc->func(hdev, data, skb);
4405 }
4406
4407 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4408                                  struct sk_buff *skb, u16 *opcode, u8 *status,
4409                                  hci_req_complete_t *req_complete,
4410                                  hci_req_complete_skb_t *req_complete_skb)
4411 {
4412         struct hci_ev_cmd_complete *ev = data;
4413         int i;
4414
4415         *opcode = __le16_to_cpu(ev->opcode);
4416
4417         bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4418
4419         for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4420                 if (hci_cc_table[i].op == *opcode) {
4421                         *status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4422                         break;
4423                 }
4424         }
4425
4426         if (i == ARRAY_SIZE(hci_cc_table)) {
4427                 /* Unknown opcode, assume byte 0 contains the status, so
4428                  * that e.g. __hci_cmd_sync() properly returns errors
4429                  * for vendor specific commands send by HCI drivers.
4430                  * If a vendor doesn't actually follow this convention we may
4431                  * need to introduce a vendor CC table in order to properly set
4432                  * the status.
4433                  */
4434                 *status = skb->data[0];
4435         }
4436
4437         handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4438
4439         hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4440                              req_complete_skb);
4441
4442         if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4443                 bt_dev_err(hdev,
4444                            "unexpected event for opcode 0x%4.4x", *opcode);
4445                 return;
4446         }
4447
4448         if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4449                 queue_work(hdev->workqueue, &hdev->cmd_work);
4450 }
4451
4452 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4453 {
4454         struct hci_cp_le_create_cis *cp;
4455         bool pending = false;
4456         int i;
4457
4458         bt_dev_dbg(hdev, "status 0x%2.2x", status);
4459
4460         if (!status)
4461                 return;
4462
4463         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4464         if (!cp)
4465                 return;
4466
4467         hci_dev_lock(hdev);
4468
4469         /* Remove connection if command failed */
4470         for (i = 0; cp->num_cis; cp->num_cis--, i++) {
4471                 struct hci_conn *conn;
4472                 u16 handle;
4473
4474                 handle = __le16_to_cpu(cp->cis[i].cis_handle);
4475
4476                 conn = hci_conn_hash_lookup_handle(hdev, handle);
4477                 if (conn) {
4478                         if (test_and_clear_bit(HCI_CONN_CREATE_CIS,
4479                                                &conn->flags))
4480                                 pending = true;
4481                         conn->state = BT_CLOSED;
4482                         hci_connect_cfm(conn, status);
4483                         hci_conn_del(conn);
4484                 }
4485         }
4486
4487         if (pending)
4488                 hci_le_create_cis_pending(hdev);
4489
4490         hci_dev_unlock(hdev);
4491 }
4492
4493 #define HCI_CS(_op, _func) \
4494 { \
4495         .op = _op, \
4496         .func = _func, \
4497 }
4498
4499 static const struct hci_cs {
4500         u16  op;
4501         void (*func)(struct hci_dev *hdev, __u8 status);
4502 } hci_cs_table[] = {
4503         HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4504         HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4505         HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4506         HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4507         HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4508         HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4509         HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4510         HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4511         HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4512                hci_cs_read_remote_ext_features),
4513         HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4514         HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4515                hci_cs_enhanced_setup_sync_conn),
4516         HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4517         HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4518         HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4519         HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4520         HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4521         HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4522         HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4523         HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4524         HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4525 };
4526
4527 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4528                                struct sk_buff *skb, u16 *opcode, u8 *status,
4529                                hci_req_complete_t *req_complete,
4530                                hci_req_complete_skb_t *req_complete_skb)
4531 {
4532         struct hci_ev_cmd_status *ev = data;
4533         int i;
4534
4535         *opcode = __le16_to_cpu(ev->opcode);
4536         *status = ev->status;
4537
4538         bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4539
4540         for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4541                 if (hci_cs_table[i].op == *opcode) {
4542                         hci_cs_table[i].func(hdev, ev->status);
4543                         break;
4544                 }
4545         }
4546
4547         handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4548
4549         /* Indicate request completion if the command failed. Also, if
4550          * we're not waiting for a special event and we get a success
4551          * command status we should try to flag the request as completed
4552          * (since for this kind of commands there will not be a command
4553          * complete event).
4554          */
4555         if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
4556                 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4557                                      req_complete_skb);
4558                 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4559                         bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4560                                    *opcode);
4561                         return;
4562                 }
4563         }
4564
4565         if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4566                 queue_work(hdev->workqueue, &hdev->cmd_work);
4567 }
4568
4569 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4570                                    struct sk_buff *skb)
4571 {
4572         struct hci_ev_hardware_error *ev = data;
4573
4574         bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4575
4576 #ifdef TIZEN_BT
4577         hci_dev_lock(hdev);
4578         mgmt_hardware_error(hdev, ev->code);
4579         hci_dev_unlock(hdev);
4580 #endif
4581         hdev->hw_error_code = ev->code;
4582
4583         queue_work(hdev->req_workqueue, &hdev->error_reset);
4584 }
4585
4586 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4587                                 struct sk_buff *skb)
4588 {
4589         struct hci_ev_role_change *ev = data;
4590         struct hci_conn *conn;
4591
4592         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4593
4594         hci_dev_lock(hdev);
4595
4596         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4597         if (conn) {
4598                 if (!ev->status)
4599                         conn->role = ev->role;
4600
4601                 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4602
4603                 hci_role_switch_cfm(conn, ev->status, ev->role);
4604 #ifdef TIZEN_BT
4605                 if (!ev->status && (get_link_mode(conn) & HCI_LM_MASTER))
4606                         hci_conn_change_supervision_timeout(conn,
4607                                         LINK_SUPERVISION_TIMEOUT);
4608 #endif
4609         }
4610
4611         hci_dev_unlock(hdev);
4612 }
4613
4614 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4615                                   struct sk_buff *skb)
4616 {
4617         struct hci_ev_num_comp_pkts *ev = data;
4618         int i;
4619
4620         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4621                              flex_array_size(ev, handles, ev->num)))
4622                 return;
4623
4624         if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4625                 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4626                 return;
4627         }
4628
4629         bt_dev_dbg(hdev, "num %d", ev->num);
4630
4631         for (i = 0; i < ev->num; i++) {
4632                 struct hci_comp_pkts_info *info = &ev->handles[i];
4633                 struct hci_conn *conn;
4634                 __u16  handle, count;
4635
4636                 handle = __le16_to_cpu(info->handle);
4637                 count  = __le16_to_cpu(info->count);
4638
4639                 conn = hci_conn_hash_lookup_handle(hdev, handle);
4640                 if (!conn)
4641                         continue;
4642
4643                 conn->sent -= count;
4644
4645                 switch (conn->type) {
4646                 case ACL_LINK:
4647                         hdev->acl_cnt += count;
4648                         if (hdev->acl_cnt > hdev->acl_pkts)
4649                                 hdev->acl_cnt = hdev->acl_pkts;
4650                         break;
4651
4652                 case LE_LINK:
4653                         if (hdev->le_pkts) {
4654                                 hdev->le_cnt += count;
4655                                 if (hdev->le_cnt > hdev->le_pkts)
4656                                         hdev->le_cnt = hdev->le_pkts;
4657                         } else {
4658                                 hdev->acl_cnt += count;
4659                                 if (hdev->acl_cnt > hdev->acl_pkts)
4660                                         hdev->acl_cnt = hdev->acl_pkts;
4661                         }
4662                         break;
4663
4664                 case SCO_LINK:
4665                         hdev->sco_cnt += count;
4666                         if (hdev->sco_cnt > hdev->sco_pkts)
4667                                 hdev->sco_cnt = hdev->sco_pkts;
4668                         break;
4669
4670                 case ISO_LINK:
4671                         if (hdev->iso_pkts) {
4672                                 hdev->iso_cnt += count;
4673                                 if (hdev->iso_cnt > hdev->iso_pkts)
4674                                         hdev->iso_cnt = hdev->iso_pkts;
4675                         } else if (hdev->le_pkts) {
4676                                 hdev->le_cnt += count;
4677                                 if (hdev->le_cnt > hdev->le_pkts)
4678                                         hdev->le_cnt = hdev->le_pkts;
4679                         } else {
4680                                 hdev->acl_cnt += count;
4681                                 if (hdev->acl_cnt > hdev->acl_pkts)
4682                                         hdev->acl_cnt = hdev->acl_pkts;
4683                         }
4684                         break;
4685
4686                 default:
4687                         bt_dev_err(hdev, "unknown type %d conn %p",
4688                                    conn->type, conn);
4689                         break;
4690                 }
4691         }
4692
4693         queue_work(hdev->workqueue, &hdev->tx_work);
4694 }
4695
4696 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4697                                                  __u16 handle)
4698 {
4699         struct hci_chan *chan;
4700
4701         switch (hdev->dev_type) {
4702         case HCI_PRIMARY:
4703                 return hci_conn_hash_lookup_handle(hdev, handle);
4704         case HCI_AMP:
4705                 chan = hci_chan_lookup_handle(hdev, handle);
4706                 if (chan)
4707                         return chan->conn;
4708                 break;
4709         default:
4710                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4711                 break;
4712         }
4713
4714         return NULL;
4715 }
4716
4717 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
4718                                     struct sk_buff *skb)
4719 {
4720         struct hci_ev_num_comp_blocks *ev = data;
4721         int i;
4722
4723         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
4724                              flex_array_size(ev, handles, ev->num_hndl)))
4725                 return;
4726
4727         if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4728                 bt_dev_err(hdev, "wrong event for mode %d",
4729                            hdev->flow_ctl_mode);
4730                 return;
4731         }
4732
4733         bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
4734                    ev->num_hndl);
4735
4736         for (i = 0; i < ev->num_hndl; i++) {
4737                 struct hci_comp_blocks_info *info = &ev->handles[i];
4738                 struct hci_conn *conn = NULL;
4739                 __u16  handle, block_count;
4740
4741                 handle = __le16_to_cpu(info->handle);
4742                 block_count = __le16_to_cpu(info->blocks);
4743
4744                 conn = __hci_conn_lookup_handle(hdev, handle);
4745                 if (!conn)
4746                         continue;
4747
4748                 conn->sent -= block_count;
4749
4750                 switch (conn->type) {
4751                 case ACL_LINK:
4752                 case AMP_LINK:
4753                         hdev->block_cnt += block_count;
4754                         if (hdev->block_cnt > hdev->num_blocks)
4755                                 hdev->block_cnt = hdev->num_blocks;
4756                         break;
4757
4758                 default:
4759                         bt_dev_err(hdev, "unknown type %d conn %p",
4760                                    conn->type, conn);
4761                         break;
4762                 }
4763         }
4764
4765         queue_work(hdev->workqueue, &hdev->tx_work);
4766 }
4767
4768 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4769                                 struct sk_buff *skb)
4770 {
4771         struct hci_ev_mode_change *ev = data;
4772         struct hci_conn *conn;
4773
4774         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4775
4776         hci_dev_lock(hdev);
4777
4778         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4779         if (conn) {
4780                 conn->mode = ev->mode;
4781
4782                 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4783                                         &conn->flags)) {
4784                         if (conn->mode == HCI_CM_ACTIVE)
4785                                 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4786                         else
4787                                 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4788                 }
4789
4790                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4791                         hci_sco_setup(conn, ev->status);
4792         }
4793
4794         hci_dev_unlock(hdev);
4795 }
4796
4797 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4798                                      struct sk_buff *skb)
4799 {
4800         struct hci_ev_pin_code_req *ev = data;
4801         struct hci_conn *conn;
4802
4803         bt_dev_dbg(hdev, "");
4804
4805         hci_dev_lock(hdev);
4806
4807         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4808         if (!conn)
4809                 goto unlock;
4810
4811         if (conn->state == BT_CONNECTED) {
4812                 hci_conn_hold(conn);
4813                 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4814                 hci_conn_drop(conn);
4815         }
4816
4817         if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4818             !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4819                 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4820                              sizeof(ev->bdaddr), &ev->bdaddr);
4821         } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4822                 u8 secure;
4823
4824                 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4825                         secure = 1;
4826                 else
4827                         secure = 0;
4828
4829                 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4830         }
4831
4832 unlock:
4833         hci_dev_unlock(hdev);
4834 }
4835
4836 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4837 {
4838         if (key_type == HCI_LK_CHANGED_COMBINATION)
4839                 return;
4840
4841         conn->pin_length = pin_len;
4842         conn->key_type = key_type;
4843
4844         switch (key_type) {
4845         case HCI_LK_LOCAL_UNIT:
4846         case HCI_LK_REMOTE_UNIT:
4847         case HCI_LK_DEBUG_COMBINATION:
4848                 return;
4849         case HCI_LK_COMBINATION:
4850                 if (pin_len == 16)
4851                         conn->pending_sec_level = BT_SECURITY_HIGH;
4852                 else
4853                         conn->pending_sec_level = BT_SECURITY_MEDIUM;
4854                 break;
4855         case HCI_LK_UNAUTH_COMBINATION_P192:
4856         case HCI_LK_UNAUTH_COMBINATION_P256:
4857                 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4858                 break;
4859         case HCI_LK_AUTH_COMBINATION_P192:
4860                 conn->pending_sec_level = BT_SECURITY_HIGH;
4861                 break;
4862         case HCI_LK_AUTH_COMBINATION_P256:
4863                 conn->pending_sec_level = BT_SECURITY_FIPS;
4864                 break;
4865         }
4866 }
4867
4868 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4869                                      struct sk_buff *skb)
4870 {
4871         struct hci_ev_link_key_req *ev = data;
4872         struct hci_cp_link_key_reply cp;
4873         struct hci_conn *conn;
4874         struct link_key *key;
4875
4876         bt_dev_dbg(hdev, "");
4877
4878         if (!hci_dev_test_flag(hdev, HCI_MGMT))
4879                 return;
4880
4881         hci_dev_lock(hdev);
4882
4883         key = hci_find_link_key(hdev, &ev->bdaddr);
4884         if (!key) {
4885                 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4886                 goto not_found;
4887         }
4888
4889         bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4890
4891         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4892         if (conn) {
4893                 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4894
4895                 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4896                      key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4897                     conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4898                         bt_dev_dbg(hdev, "ignoring unauthenticated key");
4899                         goto not_found;
4900                 }
4901
4902                 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4903                     (conn->pending_sec_level == BT_SECURITY_HIGH ||
4904                      conn->pending_sec_level == BT_SECURITY_FIPS)) {
4905                         bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4906                         goto not_found;
4907                 }
4908
4909                 conn_set_key(conn, key->type, key->pin_len);
4910         }
4911
4912         bacpy(&cp.bdaddr, &ev->bdaddr);
4913         memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4914
4915         hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4916
4917         hci_dev_unlock(hdev);
4918
4919         return;
4920
4921 not_found:
4922         hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4923         hci_dev_unlock(hdev);
4924 }
4925
4926 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4927                                     struct sk_buff *skb)
4928 {
4929         struct hci_ev_link_key_notify *ev = data;
4930         struct hci_conn *conn;
4931         struct link_key *key;
4932         bool persistent;
4933         u8 pin_len = 0;
4934
4935         bt_dev_dbg(hdev, "");
4936
4937         hci_dev_lock(hdev);
4938
4939         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4940         if (!conn)
4941                 goto unlock;
4942
4943         /* Ignore NULL link key against CVE-2020-26555 */
4944         if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4945                 bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4946                            &ev->bdaddr);
4947                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4948                 hci_conn_drop(conn);
4949                 goto unlock;
4950         }
4951
4952         hci_conn_hold(conn);
4953         conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4954         hci_conn_drop(conn);
4955
4956         set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4957         conn_set_key(conn, ev->key_type, conn->pin_length);
4958
4959         if (!hci_dev_test_flag(hdev, HCI_MGMT))
4960                 goto unlock;
4961
4962         key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4963                                 ev->key_type, pin_len, &persistent);
4964         if (!key)
4965                 goto unlock;
4966
4967         /* Update connection information since adding the key will have
4968          * fixed up the type in the case of changed combination keys.
4969          */
4970         if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4971                 conn_set_key(conn, key->type, key->pin_len);
4972
4973         mgmt_new_link_key(hdev, key, persistent);
4974
4975         /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4976          * is set. If it's not set simply remove the key from the kernel
4977          * list (we've still notified user space about it but with
4978          * store_hint being 0).
4979          */
4980         if (key->type == HCI_LK_DEBUG_COMBINATION &&
4981             !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4982                 list_del_rcu(&key->list);
4983                 kfree_rcu(key, rcu);
4984                 goto unlock;
4985         }
4986
4987         if (persistent)
4988                 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4989         else
4990                 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4991
4992 unlock:
4993         hci_dev_unlock(hdev);
4994 }
4995
4996 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4997                                  struct sk_buff *skb)
4998 {
4999         struct hci_ev_clock_offset *ev = data;
5000         struct hci_conn *conn;
5001
5002         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5003
5004         hci_dev_lock(hdev);
5005
5006         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5007         if (conn && !ev->status) {
5008                 struct inquiry_entry *ie;
5009
5010                 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
5011                 if (ie) {
5012                         ie->data.clock_offset = ev->clock_offset;
5013                         ie->timestamp = jiffies;
5014                 }
5015         }
5016
5017         hci_dev_unlock(hdev);
5018 }
5019
5020 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
5021                                     struct sk_buff *skb)
5022 {
5023         struct hci_ev_pkt_type_change *ev = data;
5024         struct hci_conn *conn;
5025
5026         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5027
5028         hci_dev_lock(hdev);
5029
5030         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5031         if (conn && !ev->status)
5032                 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
5033
5034         hci_dev_unlock(hdev);
5035 }
5036
5037 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
5038                                    struct sk_buff *skb)
5039 {
5040         struct hci_ev_pscan_rep_mode *ev = data;
5041         struct inquiry_entry *ie;
5042
5043         bt_dev_dbg(hdev, "");
5044
5045         hci_dev_lock(hdev);
5046
5047         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5048         if (ie) {
5049                 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
5050                 ie->timestamp = jiffies;
5051         }
5052
5053         hci_dev_unlock(hdev);
5054 }
5055
5056 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
5057                                              struct sk_buff *skb)
5058 {
5059         struct hci_ev_inquiry_result_rssi *ev = edata;
5060         struct inquiry_data data;
5061         int i;
5062
5063         bt_dev_dbg(hdev, "num_rsp %d", ev->num);
5064
5065         if (!ev->num)
5066                 return;
5067
5068         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5069                 return;
5070
5071         hci_dev_lock(hdev);
5072
5073         if (skb->len == array_size(ev->num,
5074                                    sizeof(struct inquiry_info_rssi_pscan))) {
5075                 struct inquiry_info_rssi_pscan *info;
5076
5077                 for (i = 0; i < ev->num; i++) {
5078                         u32 flags;
5079
5080                         info = hci_ev_skb_pull(hdev, skb,
5081                                                HCI_EV_INQUIRY_RESULT_WITH_RSSI,
5082                                                sizeof(*info));
5083                         if (!info) {
5084                                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
5085                                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
5086                                 goto unlock;
5087                         }
5088
5089                         bacpy(&data.bdaddr, &info->bdaddr);
5090                         data.pscan_rep_mode     = info->pscan_rep_mode;
5091                         data.pscan_period_mode  = info->pscan_period_mode;
5092                         data.pscan_mode         = info->pscan_mode;
5093                         memcpy(data.dev_class, info->dev_class, 3);
5094                         data.clock_offset       = info->clock_offset;
5095                         data.rssi               = info->rssi;
5096                         data.ssp_mode           = 0x00;
5097
5098                         flags = hci_inquiry_cache_update(hdev, &data, false);
5099
5100                         mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5101                                           info->dev_class, info->rssi,
5102                                           flags, NULL, 0, NULL, 0, 0);
5103                 }
5104         } else if (skb->len == array_size(ev->num,
5105                                           sizeof(struct inquiry_info_rssi))) {
5106                 struct inquiry_info_rssi *info;
5107
5108                 for (i = 0; i < ev->num; i++) {
5109                         u32 flags;
5110
5111                         info = hci_ev_skb_pull(hdev, skb,
5112                                                HCI_EV_INQUIRY_RESULT_WITH_RSSI,
5113                                                sizeof(*info));
5114                         if (!info) {
5115                                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
5116                                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
5117                                 goto unlock;
5118                         }
5119
5120                         bacpy(&data.bdaddr, &info->bdaddr);
5121                         data.pscan_rep_mode     = info->pscan_rep_mode;
5122                         data.pscan_period_mode  = info->pscan_period_mode;
5123                         data.pscan_mode         = 0x00;
5124                         memcpy(data.dev_class, info->dev_class, 3);
5125                         data.clock_offset       = info->clock_offset;
5126                         data.rssi               = info->rssi;
5127                         data.ssp_mode           = 0x00;
5128
5129                         flags = hci_inquiry_cache_update(hdev, &data, false);
5130
5131                         mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5132                                           info->dev_class, info->rssi,
5133                                           flags, NULL, 0, NULL, 0, 0);
5134                 }
5135         } else {
5136                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
5137                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
5138         }
5139 unlock:
5140         hci_dev_unlock(hdev);
5141 }
5142
5143 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
5144                                         struct sk_buff *skb)
5145 {
5146         struct hci_ev_remote_ext_features *ev = data;
5147         struct hci_conn *conn;
5148
5149         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5150
5151         hci_dev_lock(hdev);
5152
5153         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5154         if (!conn)
5155                 goto unlock;
5156
5157         if (ev->page < HCI_MAX_PAGES)
5158                 memcpy(conn->features[ev->page], ev->features, 8);
5159
5160         if (!ev->status && ev->page == 0x01) {
5161                 struct inquiry_entry *ie;
5162
5163                 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
5164                 if (ie)
5165                         ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5166
5167                 if (ev->features[0] & LMP_HOST_SSP) {
5168                         set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5169                 } else {
5170                         /* It is mandatory by the Bluetooth specification that
5171                          * Extended Inquiry Results are only used when Secure
5172                          * Simple Pairing is enabled, but some devices violate
5173                          * this.
5174                          *
5175                          * To make these devices work, the internal SSP
5176                          * enabled flag needs to be cleared if the remote host
5177                          * features do not indicate SSP support */
5178                         clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5179                 }
5180
5181                 if (ev->features[0] & LMP_HOST_SC)
5182                         set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
5183         }
5184
5185         if (conn->state != BT_CONFIG)
5186                 goto unlock;
5187
5188         if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
5189                 struct hci_cp_remote_name_req cp;
5190                 memset(&cp, 0, sizeof(cp));
5191                 bacpy(&cp.bdaddr, &conn->dst);
5192                 cp.pscan_rep_mode = 0x02;
5193                 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
5194         } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5195                 mgmt_device_connected(hdev, conn, NULL, 0);
5196
5197         if (!hci_outgoing_auth_needed(hdev, conn)) {
5198                 conn->state = BT_CONNECTED;
5199                 hci_connect_cfm(conn, ev->status);
5200                 hci_conn_drop(conn);
5201         }
5202
5203 unlock:
5204         hci_dev_unlock(hdev);
5205 }
5206
5207 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
5208                                        struct sk_buff *skb)
5209 {
5210         struct hci_ev_sync_conn_complete *ev = data;
5211         struct hci_conn *conn;
5212         u8 status = ev->status;
5213
5214         switch (ev->link_type) {
5215         case SCO_LINK:
5216         case ESCO_LINK:
5217                 break;
5218         default:
5219                 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
5220                  * for HCI_Synchronous_Connection_Complete is limited to
5221                  * either SCO or eSCO
5222                  */
5223                 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
5224                 return;
5225         }
5226
5227         bt_dev_dbg(hdev, "status 0x%2.2x", status);
5228
5229         hci_dev_lock(hdev);
5230
5231         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
5232         if (!conn) {
5233                 if (ev->link_type == ESCO_LINK)
5234                         goto unlock;
5235
5236                 /* When the link type in the event indicates SCO connection
5237                  * and lookup of the connection object fails, then check
5238                  * if an eSCO connection object exists.
5239                  *
5240                  * The core limits the synchronous connections to either
5241                  * SCO or eSCO. The eSCO connection is preferred and tried
5242                  * to be setup first and until successfully established,
5243                  * the link type will be hinted as eSCO.
5244                  */
5245                 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
5246                 if (!conn)
5247                         goto unlock;
5248         }
5249
5250         /* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
5251          * Processing it more than once per connection can corrupt kernel memory.
5252          *
5253          * As the connection handle is set here for the first time, it indicates
5254          * whether the connection is already set up.
5255          */
5256         if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5257                 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
5258                 goto unlock;
5259         }
5260
5261         switch (status) {
5262         case 0x00:
5263                 status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
5264                 if (status) {
5265                         conn->state = BT_CLOSED;
5266                         break;
5267                 }
5268
5269                 conn->state  = BT_CONNECTED;
5270                 conn->type   = ev->link_type;
5271
5272                 hci_debugfs_create_conn(conn);
5273                 hci_conn_add_sysfs(conn);
5274                 break;
5275
5276         case 0x10:      /* Connection Accept Timeout */
5277         case 0x0d:      /* Connection Rejected due to Limited Resources */
5278         case 0x11:      /* Unsupported Feature or Parameter Value */
5279         case 0x1c:      /* SCO interval rejected */
5280         case 0x1a:      /* Unsupported Remote Feature */
5281         case 0x1e:      /* Invalid LMP Parameters */
5282         case 0x1f:      /* Unspecified error */
5283         case 0x20:      /* Unsupported LMP Parameter value */
5284                 if (conn->out) {
5285                         conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5286                                         (hdev->esco_type & EDR_ESCO_MASK);
5287                         if (hci_setup_sync(conn, conn->parent->handle))
5288                                 goto unlock;
5289                 }
5290                 fallthrough;
5291
5292         default:
5293                 conn->state = BT_CLOSED;
5294                 break;
5295         }
5296
5297         bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5298         /* Notify only in case of SCO over HCI transport data path which
5299          * is zero and non-zero value shall be non-HCI transport data path
5300          */
5301         if (conn->codec.data_path == 0 && hdev->notify) {
5302                 switch (ev->air_mode) {
5303                 case 0x02:
5304                         hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5305                         break;
5306                 case 0x03:
5307                         hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5308                         break;
5309                 }
5310         }
5311
5312         hci_connect_cfm(conn, status);
5313         if (status)
5314                 hci_conn_del(conn);
5315
5316 unlock:
5317         hci_dev_unlock(hdev);
5318 }
5319
5320 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5321 {
5322         size_t parsed = 0;
5323
5324         while (parsed < eir_len) {
5325                 u8 field_len = eir[0];
5326
5327                 if (field_len == 0)
5328                         return parsed;
5329
5330                 parsed += field_len + 1;
5331                 eir += field_len + 1;
5332         }
5333
5334         return eir_len;
5335 }
5336
5337 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5338                                             struct sk_buff *skb)
5339 {
5340         struct hci_ev_ext_inquiry_result *ev = edata;
5341         struct inquiry_data data;
5342         size_t eir_len;
5343         int i;
5344
5345         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5346                              flex_array_size(ev, info, ev->num)))
5347                 return;
5348
5349         bt_dev_dbg(hdev, "num %d", ev->num);
5350
5351         if (!ev->num)
5352                 return;
5353
5354         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5355                 return;
5356
5357         hci_dev_lock(hdev);
5358
5359         for (i = 0; i < ev->num; i++) {
5360                 struct extended_inquiry_info *info = &ev->info[i];
5361                 u32 flags;
5362                 bool name_known;
5363
5364                 bacpy(&data.bdaddr, &info->bdaddr);
5365                 data.pscan_rep_mode     = info->pscan_rep_mode;
5366                 data.pscan_period_mode  = info->pscan_period_mode;
5367                 data.pscan_mode         = 0x00;
5368                 memcpy(data.dev_class, info->dev_class, 3);
5369                 data.clock_offset       = info->clock_offset;
5370                 data.rssi               = info->rssi;
5371                 data.ssp_mode           = 0x01;
5372
5373                 if (hci_dev_test_flag(hdev, HCI_MGMT))
5374                         name_known = eir_get_data(info->data,
5375                                                   sizeof(info->data),
5376                                                   EIR_NAME_COMPLETE, NULL);
5377                 else
5378                         name_known = true;
5379
5380                 flags = hci_inquiry_cache_update(hdev, &data, name_known);
5381
5382                 eir_len = eir_get_length(info->data, sizeof(info->data));
5383
5384                 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5385                                   info->dev_class, info->rssi,
5386                                   flags, info->data, eir_len, NULL, 0, 0);
5387         }
5388
5389         hci_dev_unlock(hdev);
5390 }
5391
5392 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5393                                          struct sk_buff *skb)
5394 {
5395         struct hci_ev_key_refresh_complete *ev = data;
5396         struct hci_conn *conn;
5397
5398         bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5399                    __le16_to_cpu(ev->handle));
5400
5401         hci_dev_lock(hdev);
5402
5403         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5404         if (!conn)
5405                 goto unlock;
5406
5407         /* For BR/EDR the necessary steps are taken through the
5408          * auth_complete event.
5409          */
5410         if (conn->type != LE_LINK)
5411                 goto unlock;
5412
5413         if (!ev->status)
5414                 conn->sec_level = conn->pending_sec_level;
5415
5416         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5417
5418         if (ev->status && conn->state == BT_CONNECTED) {
5419                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5420                 hci_conn_drop(conn);
5421                 goto unlock;
5422         }
5423
5424         if (conn->state == BT_CONFIG) {
5425                 if (!ev->status)
5426                         conn->state = BT_CONNECTED;
5427
5428                 hci_connect_cfm(conn, ev->status);
5429                 hci_conn_drop(conn);
5430         } else {
5431                 hci_auth_cfm(conn, ev->status);
5432
5433                 hci_conn_hold(conn);
5434                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5435                 hci_conn_drop(conn);
5436         }
5437
5438 unlock:
5439         hci_dev_unlock(hdev);
5440 }
5441
5442 static u8 hci_get_auth_req(struct hci_conn *conn)
5443 {
5444 #ifdef TIZEN_BT
5445         if (conn->remote_auth == HCI_AT_GENERAL_BONDING_MITM) {
5446                 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5447                     conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5448                         return HCI_AT_GENERAL_BONDING_MITM;
5449         }
5450 #endif
5451
5452         /* If remote requests no-bonding follow that lead */
5453         if (conn->remote_auth == HCI_AT_NO_BONDING ||
5454             conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5455                 return conn->remote_auth | (conn->auth_type & 0x01);
5456
5457         /* If both remote and local have enough IO capabilities, require
5458          * MITM protection
5459          */
5460         if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5461             conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5462                 return conn->remote_auth | 0x01;
5463
5464         /* No MITM protection possible so ignore remote requirement */
5465         return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5466 }
5467
5468 static u8 bredr_oob_data_present(struct hci_conn *conn)
5469 {
5470         struct hci_dev *hdev = conn->hdev;
5471         struct oob_data *data;
5472
5473         data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5474         if (!data)
5475                 return 0x00;
5476
5477         if (bredr_sc_enabled(hdev)) {
5478                 /* When Secure Connections is enabled, then just
5479                  * return the present value stored with the OOB
5480                  * data. The stored value contains the right present
5481                  * information. However it can only be trusted when
5482                  * not in Secure Connection Only mode.
5483                  */
5484                 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5485                         return data->present;
5486
5487                 /* When Secure Connections Only mode is enabled, then
5488                  * the P-256 values are required. If they are not
5489                  * available, then do not declare that OOB data is
5490                  * present.
5491                  */
5492                 if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
5493                     !crypto_memneq(data->hash256, ZERO_KEY, 16))
5494                         return 0x00;
5495
5496                 return 0x02;
5497         }
5498
5499         /* When Secure Connections is not enabled or actually
5500          * not supported by the hardware, then check that if
5501          * P-192 data values are present.
5502          */
5503         if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
5504             !crypto_memneq(data->hash192, ZERO_KEY, 16))
5505                 return 0x00;
5506
5507         return 0x01;
5508 }
5509
5510 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5511                                     struct sk_buff *skb)
5512 {
5513         struct hci_ev_io_capa_request *ev = data;
5514         struct hci_conn *conn;
5515
5516         bt_dev_dbg(hdev, "");
5517
5518         hci_dev_lock(hdev);
5519
5520         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5521         if (!conn || !hci_conn_ssp_enabled(conn))
5522                 goto unlock;
5523
5524         hci_conn_hold(conn);
5525
5526         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5527                 goto unlock;
5528
5529         /* Allow pairing if we're pairable, the initiators of the
5530          * pairing or if the remote is not requesting bonding.
5531          */
5532         if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5533             test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5534             (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5535                 struct hci_cp_io_capability_reply cp;
5536
5537                 bacpy(&cp.bdaddr, &ev->bdaddr);
5538                 /* Change the IO capability from KeyboardDisplay
5539                  * to DisplayYesNo as it is not supported by BT spec. */
5540                 cp.capability = (conn->io_capability == 0x04) ?
5541                                 HCI_IO_DISPLAY_YESNO : conn->io_capability;
5542
5543                 /* If we are initiators, there is no remote information yet */
5544                 if (conn->remote_auth == 0xff) {
5545                         /* Request MITM protection if our IO caps allow it
5546                          * except for the no-bonding case.
5547                          */
5548                         if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5549                             conn->auth_type != HCI_AT_NO_BONDING)
5550                                 conn->auth_type |= 0x01;
5551                 } else {
5552                         conn->auth_type = hci_get_auth_req(conn);
5553                 }
5554
5555                 /* If we're not bondable, force one of the non-bondable
5556                  * authentication requirement values.
5557                  */
5558                 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5559                         conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5560
5561                 cp.authentication = conn->auth_type;
5562                 cp.oob_data = bredr_oob_data_present(conn);
5563
5564                 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5565                              sizeof(cp), &cp);
5566         } else {
5567                 struct hci_cp_io_capability_neg_reply cp;
5568
5569                 bacpy(&cp.bdaddr, &ev->bdaddr);
5570                 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5571
5572                 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5573                              sizeof(cp), &cp);
5574         }
5575
5576 unlock:
5577         hci_dev_unlock(hdev);
5578 }
5579
5580 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5581                                   struct sk_buff *skb)
5582 {
5583         struct hci_ev_io_capa_reply *ev = data;
5584         struct hci_conn *conn;
5585
5586         bt_dev_dbg(hdev, "");
5587
5588         hci_dev_lock(hdev);
5589
5590         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5591         if (!conn)
5592                 goto unlock;
5593
5594         conn->remote_cap = ev->capability;
5595         conn->remote_auth = ev->authentication;
5596
5597 unlock:
5598         hci_dev_unlock(hdev);
5599 }
5600
5601 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5602                                          struct sk_buff *skb)
5603 {
5604         struct hci_ev_user_confirm_req *ev = data;
5605         int loc_mitm, rem_mitm, confirm_hint = 0;
5606         struct hci_conn *conn;
5607
5608         bt_dev_dbg(hdev, "");
5609
5610         hci_dev_lock(hdev);
5611
5612         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5613                 goto unlock;
5614
5615         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5616         if (!conn)
5617                 goto unlock;
5618
5619         loc_mitm = (conn->auth_type & 0x01);
5620         rem_mitm = (conn->remote_auth & 0x01);
5621
5622         /* If we require MITM but the remote device can't provide that
5623          * (it has NoInputNoOutput) then reject the confirmation
5624          * request. We check the security level here since it doesn't
5625          * necessarily match conn->auth_type.
5626          */
5627         if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5628             conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5629                 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5630                 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5631                              sizeof(ev->bdaddr), &ev->bdaddr);
5632                 goto unlock;
5633         }
5634
5635         /* If no side requires MITM protection; auto-accept */
5636         if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5637             (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5638
5639                 /* If we're not the initiators request authorization to
5640                  * proceed from user space (mgmt_user_confirm with
5641                  * confirm_hint set to 1). The exception is if neither
5642                  * side had MITM or if the local IO capability is
5643                  * NoInputNoOutput, in which case we do auto-accept
5644                  */
5645                 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5646                     conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5647                     (loc_mitm || rem_mitm)) {
5648                         bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5649                         confirm_hint = 1;
5650                         goto confirm;
5651                 }
5652
5653                 /* If there already exists link key in local host, leave the
5654                  * decision to user space since the remote device could be
5655                  * legitimate or malicious.
5656                  */
5657                 if (hci_find_link_key(hdev, &ev->bdaddr)) {
5658                         bt_dev_dbg(hdev, "Local host already has link key");
5659                         confirm_hint = 1;
5660                         goto confirm;
5661                 }
5662
5663                 BT_DBG("Auto-accept of user confirmation with %ums delay",
5664                        hdev->auto_accept_delay);
5665
5666                 if (hdev->auto_accept_delay > 0) {
5667                         int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5668                         queue_delayed_work(conn->hdev->workqueue,
5669                                            &conn->auto_accept_work, delay);
5670                         goto unlock;
5671                 }
5672
5673                 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5674                              sizeof(ev->bdaddr), &ev->bdaddr);
5675                 goto unlock;
5676         }
5677
5678 confirm:
5679         mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5680                                   le32_to_cpu(ev->passkey), confirm_hint);
5681
5682 unlock:
5683         hci_dev_unlock(hdev);
5684 }
5685
5686 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5687                                          struct sk_buff *skb)
5688 {
5689         struct hci_ev_user_passkey_req *ev = data;
5690
5691         bt_dev_dbg(hdev, "");
5692
5693         if (hci_dev_test_flag(hdev, HCI_MGMT))
5694                 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5695 }
5696
5697 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5698                                         struct sk_buff *skb)
5699 {
5700         struct hci_ev_user_passkey_notify *ev = data;
5701         struct hci_conn *conn;
5702
5703         bt_dev_dbg(hdev, "");
5704
5705         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5706         if (!conn)
5707                 return;
5708
5709         conn->passkey_notify = __le32_to_cpu(ev->passkey);
5710         conn->passkey_entered = 0;
5711
5712         if (hci_dev_test_flag(hdev, HCI_MGMT))
5713                 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5714                                          conn->dst_type, conn->passkey_notify,
5715                                          conn->passkey_entered);
5716 }
5717
5718 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5719                                     struct sk_buff *skb)
5720 {
5721         struct hci_ev_keypress_notify *ev = data;
5722         struct hci_conn *conn;
5723
5724         bt_dev_dbg(hdev, "");
5725
5726         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5727         if (!conn)
5728                 return;
5729
5730         switch (ev->type) {
5731         case HCI_KEYPRESS_STARTED:
5732                 conn->passkey_entered = 0;
5733                 return;
5734
5735         case HCI_KEYPRESS_ENTERED:
5736                 conn->passkey_entered++;
5737                 break;
5738
5739         case HCI_KEYPRESS_ERASED:
5740                 conn->passkey_entered--;
5741                 break;
5742
5743         case HCI_KEYPRESS_CLEARED:
5744                 conn->passkey_entered = 0;
5745                 break;
5746
5747         case HCI_KEYPRESS_COMPLETED:
5748                 return;
5749         }
5750
5751         if (hci_dev_test_flag(hdev, HCI_MGMT))
5752                 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5753                                          conn->dst_type, conn->passkey_notify,
5754                                          conn->passkey_entered);
5755 }
5756
5757 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5758                                          struct sk_buff *skb)
5759 {
5760         struct hci_ev_simple_pair_complete *ev = data;
5761         struct hci_conn *conn;
5762
5763         bt_dev_dbg(hdev, "");
5764
5765         hci_dev_lock(hdev);
5766
5767         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5768         if (!conn || !hci_conn_ssp_enabled(conn))
5769                 goto unlock;
5770
5771         /* Reset the authentication requirement to unknown */
5772         conn->remote_auth = 0xff;
5773
5774         /* To avoid duplicate auth_failed events to user space we check
5775          * the HCI_CONN_AUTH_PEND flag which will be set if we
5776          * initiated the authentication. A traditional auth_complete
5777          * event gets always produced as initiator and is also mapped to
5778          * the mgmt_auth_failed event */
5779         if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5780                 mgmt_auth_failed(conn, ev->status);
5781
5782         hci_conn_drop(conn);
5783
5784 unlock:
5785         hci_dev_unlock(hdev);
5786 }
5787
5788 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5789                                          struct sk_buff *skb)
5790 {
5791         struct hci_ev_remote_host_features *ev = data;
5792         struct inquiry_entry *ie;
5793         struct hci_conn *conn;
5794
5795         bt_dev_dbg(hdev, "");
5796
5797         hci_dev_lock(hdev);
5798
5799         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5800         if (conn)
5801                 memcpy(conn->features[1], ev->features, 8);
5802
5803         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5804         if (ie)
5805                 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5806
5807         hci_dev_unlock(hdev);
5808 }
5809
5810 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5811                                             struct sk_buff *skb)
5812 {
5813         struct hci_ev_remote_oob_data_request *ev = edata;
5814         struct oob_data *data;
5815
5816         bt_dev_dbg(hdev, "");
5817
5818         hci_dev_lock(hdev);
5819
5820         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5821                 goto unlock;
5822
5823         data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5824         if (!data) {
5825                 struct hci_cp_remote_oob_data_neg_reply cp;
5826
5827                 bacpy(&cp.bdaddr, &ev->bdaddr);
5828                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5829                              sizeof(cp), &cp);
5830                 goto unlock;
5831         }
5832
5833         if (bredr_sc_enabled(hdev)) {
5834                 struct hci_cp_remote_oob_ext_data_reply cp;
5835
5836                 bacpy(&cp.bdaddr, &ev->bdaddr);
5837                 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5838                         memset(cp.hash192, 0, sizeof(cp.hash192));
5839                         memset(cp.rand192, 0, sizeof(cp.rand192));
5840                 } else {
5841                         memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5842                         memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5843                 }
5844                 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5845                 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5846
5847                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5848                              sizeof(cp), &cp);
5849         } else {
5850                 struct hci_cp_remote_oob_data_reply cp;
5851
5852                 bacpy(&cp.bdaddr, &ev->bdaddr);
5853                 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5854                 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5855
5856                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5857                              sizeof(cp), &cp);
5858         }
5859
5860 unlock:
5861         hci_dev_unlock(hdev);
5862 }
5863
5864 #if IS_ENABLED(CONFIG_BT_HS)
5865 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
5866                                   struct sk_buff *skb)
5867 {
5868         struct hci_ev_channel_selected *ev = data;
5869         struct hci_conn *hcon;
5870
5871         bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
5872
5873         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5874         if (!hcon)
5875                 return;
5876
5877         amp_read_loc_assoc_final_data(hdev, hcon);
5878 }
5879
5880 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
5881                                       struct sk_buff *skb)
5882 {
5883         struct hci_ev_phy_link_complete *ev = data;
5884         struct hci_conn *hcon, *bredr_hcon;
5885
5886         bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
5887                    ev->status);
5888
5889         hci_dev_lock(hdev);
5890
5891         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5892         if (!hcon)
5893                 goto unlock;
5894
5895         if (!hcon->amp_mgr)
5896                 goto unlock;
5897
5898         if (ev->status) {
5899                 hci_conn_del(hcon);
5900                 goto unlock;
5901         }
5902
5903         bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5904
5905         hcon->state = BT_CONNECTED;
5906         bacpy(&hcon->dst, &bredr_hcon->dst);
5907
5908         hci_conn_hold(hcon);
5909         hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5910         hci_conn_drop(hcon);
5911
5912         hci_debugfs_create_conn(hcon);
5913         hci_conn_add_sysfs(hcon);
5914
5915         amp_physical_cfm(bredr_hcon, hcon);
5916
5917 unlock:
5918         hci_dev_unlock(hdev);
5919 }
5920
5921 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
5922                                      struct sk_buff *skb)
5923 {
5924         struct hci_ev_logical_link_complete *ev = data;
5925         struct hci_conn *hcon;
5926         struct hci_chan *hchan;
5927         struct amp_mgr *mgr;
5928
5929         bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5930                    le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
5931
5932         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5933         if (!hcon)
5934                 return;
5935
5936         /* Create AMP hchan */
5937         hchan = hci_chan_create(hcon);
5938         if (!hchan)
5939                 return;
5940
5941         hchan->handle = le16_to_cpu(ev->handle);
5942         hchan->amp = true;
5943
5944         BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5945
5946         mgr = hcon->amp_mgr;
5947         if (mgr && mgr->bredr_chan) {
5948                 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5949
5950                 l2cap_chan_lock(bredr_chan);
5951
5952                 bredr_chan->conn->mtu = hdev->block_mtu;
5953                 l2cap_logical_cfm(bredr_chan, hchan, 0);
5954                 hci_conn_hold(hcon);
5955
5956                 l2cap_chan_unlock(bredr_chan);
5957         }
5958 }
5959
5960 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
5961                                              struct sk_buff *skb)
5962 {
5963         struct hci_ev_disconn_logical_link_complete *ev = data;
5964         struct hci_chan *hchan;
5965
5966         bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
5967                    le16_to_cpu(ev->handle), ev->status);
5968
5969         if (ev->status)
5970                 return;
5971
5972         hci_dev_lock(hdev);
5973
5974         hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5975         if (!hchan || !hchan->amp)
5976                 goto unlock;
5977
5978         amp_destroy_logical_link(hchan, ev->reason);
5979
5980 unlock:
5981         hci_dev_unlock(hdev);
5982 }
5983
5984 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
5985                                              struct sk_buff *skb)
5986 {
5987         struct hci_ev_disconn_phy_link_complete *ev = data;
5988         struct hci_conn *hcon;
5989
5990         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5991
5992         if (ev->status)
5993                 return;
5994
5995         hci_dev_lock(hdev);
5996
5997         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5998         if (hcon && hcon->type == AMP_LINK) {
5999                 hcon->state = BT_CLOSED;
6000                 hci_disconn_cfm(hcon, ev->reason);
6001                 hci_conn_del(hcon);
6002         }
6003
6004         hci_dev_unlock(hdev);
6005 }
6006 #endif
6007
6008 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
6009                                 u8 bdaddr_type, bdaddr_t *local_rpa)
6010 {
6011         if (conn->out) {
6012                 conn->dst_type = bdaddr_type;
6013                 conn->resp_addr_type = bdaddr_type;
6014                 bacpy(&conn->resp_addr, bdaddr);
6015
6016                 /* Check if the controller has set a Local RPA then it must be
6017                  * used instead or hdev->rpa.
6018                  */
6019                 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
6020                         conn->init_addr_type = ADDR_LE_DEV_RANDOM;
6021                         bacpy(&conn->init_addr, local_rpa);
6022                 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
6023                         conn->init_addr_type = ADDR_LE_DEV_RANDOM;
6024                         bacpy(&conn->init_addr, &conn->hdev->rpa);
6025                 } else {
6026                         hci_copy_identity_address(conn->hdev, &conn->init_addr,
6027                                                   &conn->init_addr_type);
6028                 }
6029         } else {
6030                 conn->resp_addr_type = conn->hdev->adv_addr_type;
6031                 /* Check if the controller has set a Local RPA then it must be
6032                  * used instead or hdev->rpa.
6033                  */
6034                 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
6035                         conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
6036                         bacpy(&conn->resp_addr, local_rpa);
6037                 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
6038                         /* In case of ext adv, resp_addr will be updated in
6039                          * Adv Terminated event.
6040                          */
6041                         if (!ext_adv_capable(conn->hdev))
6042                                 bacpy(&conn->resp_addr,
6043                                       &conn->hdev->random_addr);
6044                 } else {
6045                         bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
6046                 }
6047
6048                 conn->init_addr_type = bdaddr_type;
6049                 bacpy(&conn->init_addr, bdaddr);
6050
6051                 /* For incoming connections, set the default minimum
6052                  * and maximum connection interval. They will be used
6053                  * to check if the parameters are in range and if not
6054                  * trigger the connection update procedure.
6055                  */
6056                 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
6057                 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
6058         }
6059 }
6060
6061 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
6062                                  bdaddr_t *bdaddr, u8 bdaddr_type,
6063                                  bdaddr_t *local_rpa, u8 role, u16 handle,
6064                                  u16 interval, u16 latency,
6065                                  u16 supervision_timeout)
6066 {
6067         struct hci_conn_params *params;
6068         struct hci_conn *conn;
6069         struct smp_irk *irk;
6070         u8 addr_type;
6071
6072         hci_dev_lock(hdev);
6073
6074         /* All controllers implicitly stop advertising in the event of a
6075          * connection, so ensure that the state bit is cleared.
6076          */
6077         hci_dev_clear_flag(hdev, HCI_LE_ADV);
6078
6079         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
6080         if (!conn) {
6081                 /* In case of error status and there is no connection pending
6082                  * just unlock as there is nothing to cleanup.
6083                  */
6084                 if (status)
6085                         goto unlock;
6086
6087                 conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
6088                 if (!conn) {
6089                         bt_dev_err(hdev, "no memory for new connection");
6090                         goto unlock;
6091                 }
6092
6093                 conn->dst_type = bdaddr_type;
6094
6095                 /* If we didn't have a hci_conn object previously
6096                  * but we're in central role this must be something
6097                  * initiated using an accept list. Since accept list based
6098                  * connections are not "first class citizens" we don't
6099                  * have full tracking of them. Therefore, we go ahead
6100                  * with a "best effort" approach of determining the
6101                  * initiator address based on the HCI_PRIVACY flag.
6102                  */
6103                 if (conn->out) {
6104                         conn->resp_addr_type = bdaddr_type;
6105                         bacpy(&conn->resp_addr, bdaddr);
6106                         if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6107                                 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
6108                                 bacpy(&conn->init_addr, &hdev->rpa);
6109                         } else {
6110                                 hci_copy_identity_address(hdev,
6111                                                           &conn->init_addr,
6112                                                           &conn->init_addr_type);
6113                         }
6114                 }
6115         } else {
6116 #ifdef TIZEN_BT
6117                 /* LE auto connect */
6118                 bacpy(&conn->dst, bdaddr);
6119 #endif
6120                 cancel_delayed_work(&conn->le_conn_timeout);
6121         }
6122
6123         /* The HCI_LE_Connection_Complete event is only sent once per connection.
6124          * Processing it more than once per connection can corrupt kernel memory.
6125          *
6126          * As the connection handle is set here for the first time, it indicates
6127          * whether the connection is already set up.
6128          */
6129         if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
6130                 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
6131                 goto unlock;
6132         }
6133
6134         le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
6135
6136         /* Lookup the identity address from the stored connection
6137          * address and address type.
6138          *
6139          * When establishing connections to an identity address, the
6140          * connection procedure will store the resolvable random
6141          * address first. Now if it can be converted back into the
6142          * identity address, start using the identity address from
6143          * now on.
6144          */
6145         irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
6146         if (irk) {
6147                 bacpy(&conn->dst, &irk->bdaddr);
6148                 conn->dst_type = irk->addr_type;
6149         }
6150
6151         conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
6152
6153         /* All connection failure handling is taken care of by the
6154          * hci_conn_failed function which is triggered by the HCI
6155          * request completion callbacks used for connecting.
6156          */
6157         if (status || hci_conn_set_handle(conn, handle))
6158                 goto unlock;
6159
6160         /* Drop the connection if it has been aborted */
6161         if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
6162                 hci_conn_drop(conn);
6163                 goto unlock;
6164         }
6165
6166         if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
6167                 addr_type = BDADDR_LE_PUBLIC;
6168         else
6169                 addr_type = BDADDR_LE_RANDOM;
6170
6171         /* Drop the connection if the device is blocked */
6172         if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
6173                 hci_conn_drop(conn);
6174                 goto unlock;
6175         }
6176
6177         if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
6178                 mgmt_device_connected(hdev, conn, NULL, 0);
6179
6180         conn->sec_level = BT_SECURITY_LOW;
6181         conn->state = BT_CONFIG;
6182
6183         /* Store current advertising instance as connection advertising instance
6184          * when sotfware rotation is in use so it can be re-enabled when
6185          * disconnected.
6186          */
6187         if (!ext_adv_capable(hdev))
6188                 conn->adv_instance = hdev->cur_adv_instance;
6189
6190         conn->le_conn_interval = interval;
6191         conn->le_conn_latency = latency;
6192         conn->le_supv_timeout = supervision_timeout;
6193
6194         hci_debugfs_create_conn(conn);
6195         hci_conn_add_sysfs(conn);
6196
6197         /* The remote features procedure is defined for central
6198          * role only. So only in case of an initiated connection
6199          * request the remote features.
6200          *
6201          * If the local controller supports peripheral-initiated features
6202          * exchange, then requesting the remote features in peripheral
6203          * role is possible. Otherwise just transition into the
6204          * connected state without requesting the remote features.
6205          */
6206         if (conn->out ||
6207             (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
6208                 struct hci_cp_le_read_remote_features cp;
6209
6210                 cp.handle = __cpu_to_le16(conn->handle);
6211
6212                 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
6213                              sizeof(cp), &cp);
6214
6215                 hci_conn_hold(conn);
6216         } else {
6217                 conn->state = BT_CONNECTED;
6218                 hci_connect_cfm(conn, status);
6219         }
6220
6221         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
6222                                            conn->dst_type);
6223         if (params) {
6224                 hci_pend_le_list_del_init(params);
6225                 if (params->conn) {
6226                         hci_conn_drop(params->conn);
6227                         hci_conn_put(params->conn);
6228                         params->conn = NULL;
6229                 }
6230         }
6231
6232 unlock:
6233         hci_update_passive_scan(hdev);
6234         hci_dev_unlock(hdev);
6235 }
6236
6237 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
6238                                      struct sk_buff *skb)
6239 {
6240         struct hci_ev_le_conn_complete *ev = data;
6241
6242         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6243
6244         le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6245                              NULL, ev->role, le16_to_cpu(ev->handle),
6246                              le16_to_cpu(ev->interval),
6247                              le16_to_cpu(ev->latency),
6248                              le16_to_cpu(ev->supervision_timeout));
6249 }
6250
6251 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
6252                                          struct sk_buff *skb)
6253 {
6254         struct hci_ev_le_enh_conn_complete *ev = data;
6255
6256         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6257
6258         le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6259                              &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
6260                              le16_to_cpu(ev->interval),
6261                              le16_to_cpu(ev->latency),
6262                              le16_to_cpu(ev->supervision_timeout));
6263 }
6264
6265 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
6266                                     struct sk_buff *skb)
6267 {
6268         struct hci_evt_le_ext_adv_set_term *ev = data;
6269         struct hci_conn *conn;
6270         struct adv_info *adv, *n;
6271
6272         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6273
6274         /* The Bluetooth Core 5.3 specification clearly states that this event
6275          * shall not be sent when the Host disables the advertising set. So in
6276          * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
6277          *
6278          * When the Host disables an advertising set, all cleanup is done via
6279          * its command callback and not needed to be duplicated here.
6280          */
6281         if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
6282                 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
6283                 return;
6284         }
6285
6286         hci_dev_lock(hdev);
6287
6288         adv = hci_find_adv_instance(hdev, ev->handle);
6289
6290         if (ev->status) {
6291                 if (!adv)
6292                         goto unlock;
6293
6294                 /* Remove advertising as it has been terminated */
6295                 hci_remove_adv_instance(hdev, ev->handle);
6296                 mgmt_advertising_removed(NULL, hdev, ev->handle);
6297
6298                 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
6299                         if (adv->enabled)
6300                                 goto unlock;
6301                 }
6302
6303                 /* We are no longer advertising, clear HCI_LE_ADV */
6304                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
6305                 goto unlock;
6306         }
6307
6308         if (adv)
6309                 adv->enabled = false;
6310
6311         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
6312         if (conn) {
6313                 /* Store handle in the connection so the correct advertising
6314                  * instance can be re-enabled when disconnected.
6315                  */
6316                 conn->adv_instance = ev->handle;
6317
6318                 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
6319                     bacmp(&conn->resp_addr, BDADDR_ANY))
6320                         goto unlock;
6321
6322                 if (!ev->handle) {
6323                         bacpy(&conn->resp_addr, &hdev->random_addr);
6324                         goto unlock;
6325                 }
6326
6327                 if (adv)
6328                         bacpy(&conn->resp_addr, &adv->random_addr);
6329         }
6330
6331 unlock:
6332         hci_dev_unlock(hdev);
6333 }
6334
6335 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
6336                                             struct sk_buff *skb)
6337 {
6338         struct hci_ev_le_conn_update_complete *ev = data;
6339         struct hci_conn *conn;
6340
6341         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6342
6343         if (ev->status)
6344                 return;
6345
6346         hci_dev_lock(hdev);
6347
6348         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6349         if (conn) {
6350 #ifdef TIZEN_BT
6351                 if (ev->status) {
6352                         hci_dev_unlock(hdev);
6353                         mgmt_le_conn_update_failed(hdev, &conn->dst,
6354                                 conn->type, conn->dst_type, ev->status);
6355                         return;
6356                 }
6357 #endif
6358                 conn->le_conn_interval = le16_to_cpu(ev->interval);
6359                 conn->le_conn_latency = le16_to_cpu(ev->latency);
6360                 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
6361         }
6362
6363         hci_dev_unlock(hdev);
6364
6365 #ifdef TIZEN_BT
6366         mgmt_le_conn_updated(hdev, &conn->dst, conn->type,
6367                                 conn->dst_type, conn->le_conn_interval,
6368                                 conn->le_conn_latency, conn->le_supv_timeout);
6369 #endif
6370 }
6371
6372 /* This function requires the caller holds hdev->lock */
6373 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
6374                                               bdaddr_t *addr,
6375                                               u8 addr_type, bool addr_resolved,
6376                                               u8 adv_type)
6377 {
6378         struct hci_conn *conn;
6379         struct hci_conn_params *params;
6380
6381         /* If the event is not connectable don't proceed further */
6382         if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
6383                 return NULL;
6384
6385         /* Ignore if the device is blocked or hdev is suspended */
6386         if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
6387             hdev->suspended)
6388                 return NULL;
6389
6390         /* Most controller will fail if we try to create new connections
6391          * while we have an existing one in peripheral role.
6392          */
6393         if (hdev->conn_hash.le_num_peripheral > 0 &&
6394             (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
6395              !(hdev->le_states[3] & 0x10)))
6396                 return NULL;
6397
6398         /* If we're not connectable only connect devices that we have in
6399          * our pend_le_conns list.
6400          */
6401         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
6402                                            addr_type);
6403         if (!params)
6404                 return NULL;
6405
6406         if (!params->explicit_connect) {
6407                 switch (params->auto_connect) {
6408                 case HCI_AUTO_CONN_DIRECT:
6409                         /* Only devices advertising with ADV_DIRECT_IND are
6410                          * triggering a connection attempt. This is allowing
6411                          * incoming connections from peripheral devices.
6412                          */
6413                         if (adv_type != LE_ADV_DIRECT_IND)
6414                                 return NULL;
6415                         break;
6416                 case HCI_AUTO_CONN_ALWAYS:
6417                         /* Devices advertising with ADV_IND or ADV_DIRECT_IND
6418                          * are triggering a connection attempt. This means
6419                          * that incoming connections from peripheral device are
6420                          * accepted and also outgoing connections to peripheral
6421                          * devices are established when found.
6422                          */
6423                         break;
6424                 default:
6425                         return NULL;
6426                 }
6427         }
6428
6429         conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
6430                               BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
6431                               HCI_ROLE_MASTER);
6432         if (!IS_ERR(conn)) {
6433                 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6434                  * by higher layer that tried to connect, if no then
6435                  * store the pointer since we don't really have any
6436                  * other owner of the object besides the params that
6437                  * triggered it. This way we can abort the connection if
6438                  * the parameters get removed and keep the reference
6439                  * count consistent once the connection is established.
6440                  */
6441
6442                 if (!params->explicit_connect)
6443                         params->conn = hci_conn_get(conn);
6444
6445                 return conn;
6446         }
6447
6448         switch (PTR_ERR(conn)) {
6449         case -EBUSY:
6450                 /* If hci_connect() returns -EBUSY it means there is already
6451                  * an LE connection attempt going on. Since controllers don't
6452                  * support more than one connection attempt at the time, we
6453                  * don't consider this an error case.
6454                  */
6455                 break;
6456         default:
6457                 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6458                 return NULL;
6459         }
6460
6461         return NULL;
6462 }
6463
6464 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6465                                u8 bdaddr_type, bdaddr_t *direct_addr,
6466                                u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
6467                                bool ext_adv, bool ctl_time, u64 instant)
6468 {
6469 #ifndef TIZEN_BT
6470         struct discovery_state *d = &hdev->discovery;
6471         bool match;
6472 #endif
6473         struct smp_irk *irk;
6474         struct hci_conn *conn;
6475         bool bdaddr_resolved;
6476         u32 flags;
6477         u8 *ptr;
6478
6479         switch (type) {
6480         case LE_ADV_IND:
6481         case LE_ADV_DIRECT_IND:
6482         case LE_ADV_SCAN_IND:
6483         case LE_ADV_NONCONN_IND:
6484         case LE_ADV_SCAN_RSP:
6485                 break;
6486         default:
6487                 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6488                                        "type: 0x%02x", type);
6489                 return;
6490         }
6491
6492         if (len > max_adv_len(hdev)) {
6493                 bt_dev_err_ratelimited(hdev,
6494                                        "adv larger than maximum supported");
6495                 return;
6496         }
6497
6498         /* Find the end of the data in case the report contains padded zero
6499          * bytes at the end causing an invalid length value.
6500          *
6501          * When data is NULL, len is 0 so there is no need for extra ptr
6502          * check as 'ptr < data + 0' is already false in such case.
6503          */
6504         for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6505                 if (ptr + 1 + *ptr > data + len)
6506                         break;
6507         }
6508
6509         /* Adjust for actual length. This handles the case when remote
6510          * device is advertising with incorrect data length.
6511          */
6512         len = ptr - data;
6513
6514         /* If the direct address is present, then this report is from
6515          * a LE Direct Advertising Report event. In that case it is
6516          * important to see if the address is matching the local
6517          * controller address.
6518          */
6519         if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6520                 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6521                                                   &bdaddr_resolved);
6522
6523                 /* Only resolvable random addresses are valid for these
6524                  * kind of reports and others can be ignored.
6525                  */
6526                 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6527                         return;
6528
6529                 /* If the controller is not using resolvable random
6530                  * addresses, then this report can be ignored.
6531                  */
6532                 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6533                         return;
6534
6535                 /* If the local IRK of the controller does not match
6536                  * with the resolvable random address provided, then
6537                  * this report can be ignored.
6538                  */
6539                 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6540                         return;
6541         }
6542
6543         /* Check if we need to convert to identity address */
6544         irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6545         if (irk) {
6546                 bdaddr = &irk->bdaddr;
6547                 bdaddr_type = irk->addr_type;
6548         }
6549
6550         bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6551
6552         /* Check if we have been requested to connect to this device.
6553          *
6554          * direct_addr is set only for directed advertising reports (it is NULL
6555          * for advertising reports) and is already verified to be RPA above.
6556          */
6557         conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6558                                      type);
6559         if (!ext_adv && conn && type == LE_ADV_IND &&
6560             len <= max_adv_len(hdev)) {
6561                 /* Store report for later inclusion by
6562                  * mgmt_device_connected
6563                  */
6564                 memcpy(conn->le_adv_data, data, len);
6565                 conn->le_adv_data_len = len;
6566         }
6567
6568         if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6569                 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6570         else
6571                 flags = 0;
6572
6573         /* All scan results should be sent up for Mesh systems */
6574         if (hci_dev_test_flag(hdev, HCI_MESH)) {
6575                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6576                                   rssi, flags, data, len, NULL, 0, instant);
6577                 return;
6578         }
6579
6580         /* Passive scanning shouldn't trigger any device found events,
6581          * except for devices marked as CONN_REPORT for which we do send
6582          * device found events, or advertisement monitoring requested.
6583          */
6584         if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6585                 if (type == LE_ADV_DIRECT_IND)
6586                         return;
6587
6588 #ifndef TIZEN_BT
6589                 /* Handle all adv packet in platform */
6590                 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6591                                                bdaddr, bdaddr_type) &&
6592                     idr_is_empty(&hdev->adv_monitors_idr))
6593                         return;
6594 #endif
6595
6596 #ifdef TIZEN_BT
6597                 mgmt_le_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6598                                   rssi, flags, data, len, NULL, 0, type);
6599 #else
6600                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6601                                   rssi, flags, data, len, NULL, 0, 0);
6602 #endif
6603                 return;
6604         }
6605
6606         /* When receiving a scan response, then there is no way to
6607          * know if the remote device is connectable or not. However
6608          * since scan responses are merged with a previously seen
6609          * advertising report, the flags field from that report
6610          * will be used.
6611          *
6612          * In the unlikely case that a controller just sends a scan
6613          * response event that doesn't match the pending report, then
6614          * it is marked as a standalone SCAN_RSP.
6615          */
6616         if (type == LE_ADV_SCAN_RSP)
6617                 flags = MGMT_DEV_FOUND_SCAN_RSP;
6618
6619 #ifdef TIZEN_BT
6620         /* Disable adv ind and scan rsp merging */
6621         mgmt_le_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6622                                   rssi, flags, data, len, NULL, 0, type);
6623 #else
6624         /* If there's nothing pending either store the data from this
6625          * event or send an immediate device found event if the data
6626          * should not be stored for later.
6627          */
6628         if (!ext_adv && !has_pending_adv_report(hdev)) {
6629                 /* If the report will trigger a SCAN_REQ store it for
6630                  * later merging.
6631                  */
6632                 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6633                         store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6634                                                  rssi, flags, data, len);
6635                         return;
6636                 }
6637
6638                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6639                                   rssi, flags, data, len, NULL, 0, 0);
6640                 return;
6641         }
6642
6643         /* Check if the pending report is for the same device as the new one */
6644         match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6645                  bdaddr_type == d->last_adv_addr_type);
6646
6647         /* If the pending data doesn't match this report or this isn't a
6648          * scan response (e.g. we got a duplicate ADV_IND) then force
6649          * sending of the pending data.
6650          */
6651         if (type != LE_ADV_SCAN_RSP || !match) {
6652                 /* Send out whatever is in the cache, but skip duplicates */
6653                 if (!match)
6654                         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6655                                           d->last_adv_addr_type, NULL,
6656                                           d->last_adv_rssi, d->last_adv_flags,
6657                                           d->last_adv_data,
6658                                           d->last_adv_data_len, NULL, 0, 0);
6659
6660                 /* If the new report will trigger a SCAN_REQ store it for
6661                  * later merging.
6662                  */
6663                 if (!ext_adv && (type == LE_ADV_IND ||
6664                                  type == LE_ADV_SCAN_IND)) {
6665                         store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6666                                                  rssi, flags, data, len);
6667                         return;
6668                 }
6669
6670                 /* The advertising reports cannot be merged, so clear
6671                  * the pending report and send out a device found event.
6672                  */
6673                 clear_pending_adv_report(hdev);
6674                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6675                                   rssi, flags, data, len, NULL, 0, 0);
6676                 return;
6677         }
6678
6679         /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6680          * the new event is a SCAN_RSP. We can therefore proceed with
6681          * sending a merged device found event.
6682          */
6683         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6684                           d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6685                           d->last_adv_data, d->last_adv_data_len, data, len, 0);
6686         clear_pending_adv_report(hdev);
6687 #endif
6688 }
6689
6690 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6691                                   struct sk_buff *skb)
6692 {
6693         struct hci_ev_le_advertising_report *ev = data;
6694         u64 instant = jiffies;
6695
6696         if (!ev->num)
6697                 return;
6698
6699         hci_dev_lock(hdev);
6700
6701         while (ev->num--) {
6702                 struct hci_ev_le_advertising_info *info;
6703                 s8 rssi;
6704
6705                 info = hci_le_ev_skb_pull(hdev, skb,
6706                                           HCI_EV_LE_ADVERTISING_REPORT,
6707                                           sizeof(*info));
6708                 if (!info)
6709                         break;
6710
6711                 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6712                                         info->length + 1))
6713                         break;
6714
6715                 if (info->length <= max_adv_len(hdev)) {
6716                         rssi = info->data[info->length];
6717                         process_adv_report(hdev, info->type, &info->bdaddr,
6718                                            info->bdaddr_type, NULL, 0, rssi,
6719                                            info->data, info->length, false,
6720                                            false, instant);
6721                 } else {
6722                         bt_dev_err(hdev, "Dropping invalid advertising data");
6723                 }
6724         }
6725
6726         hci_dev_unlock(hdev);
6727 }
6728
6729 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6730 {
6731         if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6732                 switch (evt_type) {
6733                 case LE_LEGACY_ADV_IND:
6734                         return LE_ADV_IND;
6735                 case LE_LEGACY_ADV_DIRECT_IND:
6736                         return LE_ADV_DIRECT_IND;
6737                 case LE_LEGACY_ADV_SCAN_IND:
6738                         return LE_ADV_SCAN_IND;
6739                 case LE_LEGACY_NONCONN_IND:
6740                         return LE_ADV_NONCONN_IND;
6741                 case LE_LEGACY_SCAN_RSP_ADV:
6742                 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6743                         return LE_ADV_SCAN_RSP;
6744                 }
6745
6746                 goto invalid;
6747         }
6748
6749         if (evt_type & LE_EXT_ADV_CONN_IND) {
6750                 if (evt_type & LE_EXT_ADV_DIRECT_IND)
6751                         return LE_ADV_DIRECT_IND;
6752
6753                 return LE_ADV_IND;
6754         }
6755
6756         if (evt_type & LE_EXT_ADV_SCAN_RSP)
6757                 return LE_ADV_SCAN_RSP;
6758
6759         if (evt_type & LE_EXT_ADV_SCAN_IND)
6760                 return LE_ADV_SCAN_IND;
6761
6762         if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6763             evt_type & LE_EXT_ADV_DIRECT_IND)
6764                 return LE_ADV_NONCONN_IND;
6765
6766 invalid:
6767         bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6768                                evt_type);
6769
6770         return LE_ADV_INVALID;
6771 }
6772
6773 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6774                                       struct sk_buff *skb)
6775 {
6776         struct hci_ev_le_ext_adv_report *ev = data;
6777         u64 instant = jiffies;
6778
6779         if (!ev->num)
6780                 return;
6781
6782         hci_dev_lock(hdev);
6783
6784         while (ev->num--) {
6785                 struct hci_ev_le_ext_adv_info *info;
6786                 u8 legacy_evt_type;
6787                 u16 evt_type;
6788
6789                 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6790                                           sizeof(*info));
6791                 if (!info)
6792                         break;
6793
6794                 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6795                                         info->length))
6796                         break;
6797
6798                 evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
6799                 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6800                 if (legacy_evt_type != LE_ADV_INVALID) {
6801                         process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6802                                            info->bdaddr_type, NULL, 0,
6803                                            info->rssi, info->data, info->length,
6804                                            !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6805                                            false, instant);
6806                 }
6807         }
6808
6809         hci_dev_unlock(hdev);
6810 }
6811
6812 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6813 {
6814         struct hci_cp_le_pa_term_sync cp;
6815
6816         memset(&cp, 0, sizeof(cp));
6817         cp.handle = handle;
6818
6819         return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6820 }
6821
6822 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6823                                             struct sk_buff *skb)
6824 {
6825         struct hci_ev_le_pa_sync_established *ev = data;
6826         int mask = hdev->link_mode;
6827         __u8 flags = 0;
6828         struct hci_conn *pa_sync;
6829
6830         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6831
6832         hci_dev_lock(hdev);
6833
6834         hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6835
6836         mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6837         if (!(mask & HCI_LM_ACCEPT)) {
6838                 hci_le_pa_term_sync(hdev, ev->handle);
6839                 goto unlock;
6840         }
6841
6842         if (!(flags & HCI_PROTO_DEFER))
6843                 goto unlock;
6844
6845         if (ev->status) {
6846                 /* Add connection to indicate the failed PA sync event */
6847                 pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
6848                                              HCI_ROLE_SLAVE);
6849
6850                 if (!pa_sync)
6851                         goto unlock;
6852
6853                 set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
6854
6855                 /* Notify iso layer */
6856                 hci_connect_cfm(pa_sync, ev->status);
6857         }
6858
6859 unlock:
6860         hci_dev_unlock(hdev);
6861 }
6862
6863 static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
6864                                       struct sk_buff *skb)
6865 {
6866         struct hci_ev_le_per_adv_report *ev = data;
6867         int mask = hdev->link_mode;
6868         __u8 flags = 0;
6869
6870         bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
6871
6872         hci_dev_lock(hdev);
6873
6874         mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
6875         if (!(mask & HCI_LM_ACCEPT))
6876                 hci_le_pa_term_sync(hdev, ev->sync_handle);
6877
6878         hci_dev_unlock(hdev);
6879 }
6880
6881 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6882                                             struct sk_buff *skb)
6883 {
6884         struct hci_ev_le_remote_feat_complete *ev = data;
6885         struct hci_conn *conn;
6886
6887         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6888
6889         hci_dev_lock(hdev);
6890
6891         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6892         if (conn) {
6893                 if (!ev->status)
6894                         memcpy(conn->features[0], ev->features, 8);
6895
6896                 if (conn->state == BT_CONFIG) {
6897                         __u8 status;
6898
6899                         /* If the local controller supports peripheral-initiated
6900                          * features exchange, but the remote controller does
6901                          * not, then it is possible that the error code 0x1a
6902                          * for unsupported remote feature gets returned.
6903                          *
6904                          * In this specific case, allow the connection to
6905                          * transition into connected state and mark it as
6906                          * successful.
6907                          */
6908                         if (!conn->out && ev->status == 0x1a &&
6909                             (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6910                                 status = 0x00;
6911                         else
6912                                 status = ev->status;
6913
6914                         conn->state = BT_CONNECTED;
6915                         hci_connect_cfm(conn, status);
6916                         hci_conn_drop(conn);
6917                 }
6918         }
6919
6920         hci_dev_unlock(hdev);
6921 }
6922
6923 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6924                                    struct sk_buff *skb)
6925 {
6926         struct hci_ev_le_ltk_req *ev = data;
6927         struct hci_cp_le_ltk_reply cp;
6928         struct hci_cp_le_ltk_neg_reply neg;
6929         struct hci_conn *conn;
6930         struct smp_ltk *ltk;
6931
6932         bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6933
6934         hci_dev_lock(hdev);
6935
6936         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6937         if (conn == NULL)
6938                 goto not_found;
6939
6940         ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6941         if (!ltk)
6942                 goto not_found;
6943
6944         if (smp_ltk_is_sc(ltk)) {
6945                 /* With SC both EDiv and Rand are set to zero */
6946                 if (ev->ediv || ev->rand)
6947                         goto not_found;
6948         } else {
6949                 /* For non-SC keys check that EDiv and Rand match */
6950                 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6951                         goto not_found;
6952         }
6953
6954         memcpy(cp.ltk, ltk->val, ltk->enc_size);
6955         memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6956         cp.handle = cpu_to_le16(conn->handle);
6957
6958         conn->pending_sec_level = smp_ltk_sec_level(ltk);
6959
6960         conn->enc_key_size = ltk->enc_size;
6961
6962         hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6963
6964         /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6965          * temporary key used to encrypt a connection following
6966          * pairing. It is used during the Encrypted Session Setup to
6967          * distribute the keys. Later, security can be re-established
6968          * using a distributed LTK.
6969          */
6970         if (ltk->type == SMP_STK) {
6971                 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6972                 list_del_rcu(&ltk->list);
6973                 kfree_rcu(ltk, rcu);
6974         } else {
6975                 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6976         }
6977
6978         hci_dev_unlock(hdev);
6979
6980         return;
6981
6982 not_found:
6983         neg.handle = ev->handle;
6984         hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6985         hci_dev_unlock(hdev);
6986 }
6987
6988 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6989                                       u8 reason)
6990 {
6991         struct hci_cp_le_conn_param_req_neg_reply cp;
6992
6993         cp.handle = cpu_to_le16(handle);
6994         cp.reason = reason;
6995
6996         hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6997                      &cp);
6998 }
6999
7000 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
7001                                              struct sk_buff *skb)
7002 {
7003         struct hci_ev_le_remote_conn_param_req *ev = data;
7004         struct hci_cp_le_conn_param_req_reply cp;
7005         struct hci_conn *hcon;
7006         u16 handle, min, max, latency, timeout;
7007
7008         bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
7009
7010         handle = le16_to_cpu(ev->handle);
7011         min = le16_to_cpu(ev->interval_min);
7012         max = le16_to_cpu(ev->interval_max);
7013         latency = le16_to_cpu(ev->latency);
7014         timeout = le16_to_cpu(ev->timeout);
7015
7016         hcon = hci_conn_hash_lookup_handle(hdev, handle);
7017         if (!hcon || hcon->state != BT_CONNECTED)
7018                 return send_conn_param_neg_reply(hdev, handle,
7019                                                  HCI_ERROR_UNKNOWN_CONN_ID);
7020
7021         if (hci_check_conn_params(min, max, latency, timeout))
7022                 return send_conn_param_neg_reply(hdev, handle,
7023                                                  HCI_ERROR_INVALID_LL_PARAMS);
7024
7025         if (hcon->role == HCI_ROLE_MASTER) {
7026                 struct hci_conn_params *params;
7027                 u8 store_hint;
7028
7029                 hci_dev_lock(hdev);
7030
7031                 params = hci_conn_params_lookup(hdev, &hcon->dst,
7032                                                 hcon->dst_type);
7033                 if (params) {
7034                         params->conn_min_interval = min;
7035                         params->conn_max_interval = max;
7036                         params->conn_latency = latency;
7037                         params->supervision_timeout = timeout;
7038                         store_hint = 0x01;
7039                 } else {
7040                         store_hint = 0x00;
7041                 }
7042
7043                 hci_dev_unlock(hdev);
7044
7045                 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
7046                                     store_hint, min, max, latency, timeout);
7047         }
7048
7049         cp.handle = ev->handle;
7050         cp.interval_min = ev->interval_min;
7051         cp.interval_max = ev->interval_max;
7052         cp.latency = ev->latency;
7053         cp.timeout = ev->timeout;
7054         cp.min_ce_len = 0;
7055         cp.max_ce_len = 0;
7056
7057         hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
7058 }
7059
7060 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
7061                                          struct sk_buff *skb)
7062 {
7063         struct hci_ev_le_direct_adv_report *ev = data;
7064         u64 instant = jiffies;
7065         int i;
7066
7067         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
7068                                 flex_array_size(ev, info, ev->num)))
7069                 return;
7070
7071         if (!ev->num)
7072                 return;
7073
7074         hci_dev_lock(hdev);
7075
7076         for (i = 0; i < ev->num; i++) {
7077                 struct hci_ev_le_direct_adv_info *info = &ev->info[i];
7078
7079                 process_adv_report(hdev, info->type, &info->bdaddr,
7080                                    info->bdaddr_type, &info->direct_addr,
7081                                    info->direct_addr_type, info->rssi, NULL, 0,
7082                                    false, false, instant);
7083         }
7084
7085         hci_dev_unlock(hdev);
7086 }
7087
7088 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
7089                                   struct sk_buff *skb)
7090 {
7091         struct hci_ev_le_phy_update_complete *ev = data;
7092         struct hci_conn *conn;
7093
7094         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7095
7096         if (ev->status)
7097                 return;
7098
7099         hci_dev_lock(hdev);
7100
7101         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
7102         if (!conn)
7103                 goto unlock;
7104
7105         conn->le_tx_phy = ev->tx_phy;
7106         conn->le_rx_phy = ev->rx_phy;
7107
7108 unlock:
7109         hci_dev_unlock(hdev);
7110 }
7111
7112 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
7113                                         struct sk_buff *skb)
7114 {
7115         struct hci_evt_le_cis_established *ev = data;
7116         struct hci_conn *conn;
7117         struct bt_iso_qos *qos;
7118         bool pending = false;
7119         u16 handle = __le16_to_cpu(ev->handle);
7120
7121         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7122
7123         hci_dev_lock(hdev);
7124
7125         conn = hci_conn_hash_lookup_handle(hdev, handle);
7126         if (!conn) {
7127                 bt_dev_err(hdev,
7128                            "Unable to find connection with handle 0x%4.4x",
7129                            handle);
7130                 goto unlock;
7131         }
7132
7133         if (conn->type != ISO_LINK) {
7134                 bt_dev_err(hdev,
7135                            "Invalid connection link type handle 0x%4.4x",
7136                            handle);
7137                 goto unlock;
7138         }
7139
7140         qos = &conn->iso_qos;
7141
7142         pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
7143
7144         /* Convert ISO Interval (1.25 ms slots) to SDU Interval (us) */
7145         qos->ucast.in.interval = le16_to_cpu(ev->interval) * 1250;
7146         qos->ucast.out.interval = qos->ucast.in.interval;
7147
7148         switch (conn->role) {
7149         case HCI_ROLE_SLAVE:
7150                 /* Convert Transport Latency (us) to Latency (msec) */
7151                 qos->ucast.in.latency =
7152                         DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
7153                                           1000);
7154                 qos->ucast.out.latency =
7155                         DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
7156                                           1000);
7157                 qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu);
7158                 qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu);
7159                 qos->ucast.in.phy = ev->c_phy;
7160                 qos->ucast.out.phy = ev->p_phy;
7161                 break;
7162         case HCI_ROLE_MASTER:
7163                 /* Convert Transport Latency (us) to Latency (msec) */
7164                 qos->ucast.out.latency =
7165                         DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
7166                                           1000);
7167                 qos->ucast.in.latency =
7168                         DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
7169                                           1000);
7170                 qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu);
7171                 qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu);
7172                 qos->ucast.out.phy = ev->c_phy;
7173                 qos->ucast.in.phy = ev->p_phy;
7174                 break;
7175         }
7176
7177         if (!ev->status) {
7178                 conn->state = BT_CONNECTED;
7179                 hci_debugfs_create_conn(conn);
7180                 hci_conn_add_sysfs(conn);
7181                 hci_iso_setup_path(conn);
7182                 goto unlock;
7183         }
7184
7185         conn->state = BT_CLOSED;
7186         hci_connect_cfm(conn, ev->status);
7187         hci_conn_del(conn);
7188
7189 unlock:
7190         if (pending)
7191                 hci_le_create_cis_pending(hdev);
7192
7193         hci_dev_unlock(hdev);
7194 }
7195
7196 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
7197 {
7198         struct hci_cp_le_reject_cis cp;
7199
7200         memset(&cp, 0, sizeof(cp));
7201         cp.handle = handle;
7202         cp.reason = HCI_ERROR_REJ_BAD_ADDR;
7203         hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
7204 }
7205
7206 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
7207 {
7208         struct hci_cp_le_accept_cis cp;
7209
7210         memset(&cp, 0, sizeof(cp));
7211         cp.handle = handle;
7212         hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
7213 }
7214
7215 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
7216                                struct sk_buff *skb)
7217 {
7218         struct hci_evt_le_cis_req *ev = data;
7219         u16 acl_handle, cis_handle;
7220         struct hci_conn *acl, *cis;
7221         int mask;
7222         __u8 flags = 0;
7223
7224         acl_handle = __le16_to_cpu(ev->acl_handle);
7225         cis_handle = __le16_to_cpu(ev->cis_handle);
7226
7227         bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
7228                    acl_handle, cis_handle, ev->cig_id, ev->cis_id);
7229
7230         hci_dev_lock(hdev);
7231
7232         acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
7233         if (!acl)
7234                 goto unlock;
7235
7236         mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
7237         if (!(mask & HCI_LM_ACCEPT)) {
7238                 hci_le_reject_cis(hdev, ev->cis_handle);
7239                 goto unlock;
7240         }
7241
7242         cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
7243         if (!cis) {
7244                 cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE,
7245                                    cis_handle);
7246                 if (!cis) {
7247                         hci_le_reject_cis(hdev, ev->cis_handle);
7248                         goto unlock;
7249                 }
7250         }
7251
7252         cis->iso_qos.ucast.cig = ev->cig_id;
7253         cis->iso_qos.ucast.cis = ev->cis_id;
7254
7255         if (!(flags & HCI_PROTO_DEFER)) {
7256                 hci_le_accept_cis(hdev, ev->cis_handle);
7257         } else {
7258                 cis->state = BT_CONNECT2;
7259                 hci_connect_cfm(cis, 0);
7260         }
7261
7262 unlock:
7263         hci_dev_unlock(hdev);
7264 }
7265
7266 static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data)
7267 {
7268         u8 handle = PTR_UINT(data);
7269
7270         return hci_le_terminate_big_sync(hdev, handle,
7271                                          HCI_ERROR_LOCAL_HOST_TERM);
7272 }
7273
7274 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
7275                                            struct sk_buff *skb)
7276 {
7277         struct hci_evt_le_create_big_complete *ev = data;
7278         struct hci_conn *conn;
7279         __u8 i = 0;
7280
7281         BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
7282
7283         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
7284                                 flex_array_size(ev, bis_handle, ev->num_bis)))
7285                 return;
7286
7287         hci_dev_lock(hdev);
7288         rcu_read_lock();
7289
7290         /* Connect all BISes that are bound to the BIG */
7291         list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
7292                 if (bacmp(&conn->dst, BDADDR_ANY) ||
7293                     conn->type != ISO_LINK ||
7294                     conn->iso_qos.bcast.big != ev->handle)
7295                         continue;
7296
7297                 if (hci_conn_set_handle(conn,
7298                                         __le16_to_cpu(ev->bis_handle[i++])))
7299                         continue;
7300
7301                 if (!ev->status) {
7302                         conn->state = BT_CONNECTED;
7303                         set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
7304                         rcu_read_unlock();
7305                         hci_debugfs_create_conn(conn);
7306                         hci_conn_add_sysfs(conn);
7307                         hci_iso_setup_path(conn);
7308                         rcu_read_lock();
7309                         continue;
7310                 }
7311
7312                 hci_connect_cfm(conn, ev->status);
7313                 rcu_read_unlock();
7314                 hci_conn_del(conn);
7315                 rcu_read_lock();
7316         }
7317
7318         rcu_read_unlock();
7319
7320         if (!ev->status && !i)
7321                 /* If no BISes have been connected for the BIG,
7322                  * terminate. This is in case all bound connections
7323                  * have been closed before the BIG creation
7324                  * has completed.
7325                  */
7326                 hci_cmd_sync_queue(hdev, hci_iso_term_big_sync,
7327                                    UINT_PTR(ev->handle), NULL);
7328
7329         hci_dev_unlock(hdev);
7330 }
7331
7332 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
7333                                             struct sk_buff *skb)
7334 {
7335         struct hci_evt_le_big_sync_estabilished *ev = data;
7336         struct hci_conn *bis;
7337         struct hci_conn *pa_sync;
7338         int i;
7339
7340         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7341
7342         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7343                                 flex_array_size(ev, bis, ev->num_bis)))
7344                 return;
7345
7346         hci_dev_lock(hdev);
7347
7348         if (!ev->status) {
7349                 pa_sync = hci_conn_hash_lookup_pa_sync_big_handle(hdev, ev->handle);
7350                 if (pa_sync)
7351                         /* Also mark the BIG sync established event on the
7352                          * associated PA sync hcon
7353                          */
7354                         set_bit(HCI_CONN_BIG_SYNC, &pa_sync->flags);
7355         }
7356
7357         for (i = 0; i < ev->num_bis; i++) {
7358                 u16 handle = le16_to_cpu(ev->bis[i]);
7359                 __le32 interval;
7360
7361                 bis = hci_conn_hash_lookup_handle(hdev, handle);
7362                 if (!bis) {
7363                         bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
7364                                            HCI_ROLE_SLAVE, handle);
7365                         if (!bis)
7366                                 continue;
7367                 }
7368
7369                 if (ev->status != 0x42)
7370                         /* Mark PA sync as established */
7371                         set_bit(HCI_CONN_PA_SYNC, &bis->flags);
7372
7373                 bis->iso_qos.bcast.big = ev->handle;
7374                 memset(&interval, 0, sizeof(interval));
7375                 memcpy(&interval, ev->latency, sizeof(ev->latency));
7376                 bis->iso_qos.bcast.in.interval = le32_to_cpu(interval);
7377                 /* Convert ISO Interval (1.25 ms slots) to latency (ms) */
7378                 bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
7379                 bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
7380
7381                 if (!ev->status) {
7382                         set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
7383                         hci_iso_setup_path(bis);
7384                 }
7385         }
7386
7387         /* In case BIG sync failed, notify each failed connection to
7388          * the user after all hci connections have been added
7389          */
7390         if (ev->status)
7391                 for (i = 0; i < ev->num_bis; i++) {
7392                         u16 handle = le16_to_cpu(ev->bis[i]);
7393
7394                         bis = hci_conn_hash_lookup_handle(hdev, handle);
7395
7396                         set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags);
7397                         hci_connect_cfm(bis, ev->status);
7398                 }
7399
7400         hci_dev_unlock(hdev);
7401 }
7402
7403 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7404                                            struct sk_buff *skb)
7405 {
7406         struct hci_evt_le_big_info_adv_report *ev = data;
7407         int mask = hdev->link_mode;
7408         __u8 flags = 0;
7409         struct hci_conn *pa_sync;
7410
7411         bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7412
7413         hci_dev_lock(hdev);
7414
7415         mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
7416         if (!(mask & HCI_LM_ACCEPT)) {
7417                 hci_le_pa_term_sync(hdev, ev->sync_handle);
7418                 goto unlock;
7419         }
7420
7421         if (!(flags & HCI_PROTO_DEFER))
7422                 goto unlock;
7423
7424         pa_sync = hci_conn_hash_lookup_pa_sync_handle
7425                         (hdev,
7426                         le16_to_cpu(ev->sync_handle));
7427
7428         if (pa_sync)
7429                 goto unlock;
7430
7431         /* Add connection to indicate the PA sync event */
7432         pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
7433                                      HCI_ROLE_SLAVE);
7434
7435         if (!pa_sync)
7436                 goto unlock;
7437
7438         pa_sync->sync_handle = le16_to_cpu(ev->sync_handle);
7439         set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags);
7440
7441         /* Notify iso layer */
7442         hci_connect_cfm(pa_sync, 0x00);
7443
7444 unlock:
7445         hci_dev_unlock(hdev);
7446 }
7447
7448 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7449 [_op] = { \
7450         .func = _func, \
7451         .min_len = _min_len, \
7452         .max_len = _max_len, \
7453 }
7454
7455 #define HCI_LE_EV(_op, _func, _len) \
7456         HCI_LE_EV_VL(_op, _func, _len, _len)
7457
7458 #define HCI_LE_EV_STATUS(_op, _func) \
7459         HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7460
7461 /* Entries in this table shall have their position according to the subevent
7462  * opcode they handle so the use of the macros above is recommend since it does
7463  * attempt to initialize at its proper index using Designated Initializers that
7464  * way events without a callback function can be ommited.
7465  */
7466 static const struct hci_le_ev {
7467         void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7468         u16  min_len;
7469         u16  max_len;
7470 } hci_le_ev_table[U8_MAX + 1] = {
7471         /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7472         HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7473                   sizeof(struct hci_ev_le_conn_complete)),
7474         /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7475         HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7476                      sizeof(struct hci_ev_le_advertising_report),
7477                      HCI_MAX_EVENT_SIZE),
7478         /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7479         HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7480                   hci_le_conn_update_complete_evt,
7481                   sizeof(struct hci_ev_le_conn_update_complete)),
7482         /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7483         HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7484                   hci_le_remote_feat_complete_evt,
7485                   sizeof(struct hci_ev_le_remote_feat_complete)),
7486         /* [0x05 = HCI_EV_LE_LTK_REQ] */
7487         HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7488                   sizeof(struct hci_ev_le_ltk_req)),
7489         /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7490         HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7491                   hci_le_remote_conn_param_req_evt,
7492                   sizeof(struct hci_ev_le_remote_conn_param_req)),
7493         /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7494         HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7495                   hci_le_enh_conn_complete_evt,
7496                   sizeof(struct hci_ev_le_enh_conn_complete)),
7497         /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7498         HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7499                      sizeof(struct hci_ev_le_direct_adv_report),
7500                      HCI_MAX_EVENT_SIZE),
7501         /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7502         HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7503                   sizeof(struct hci_ev_le_phy_update_complete)),
7504         /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7505         HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7506                      sizeof(struct hci_ev_le_ext_adv_report),
7507                      HCI_MAX_EVENT_SIZE),
7508         /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7509         HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7510                   hci_le_pa_sync_estabilished_evt,
7511                   sizeof(struct hci_ev_le_pa_sync_established)),
7512         /* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */
7513         HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT,
7514                                  hci_le_per_adv_report_evt,
7515                                  sizeof(struct hci_ev_le_per_adv_report),
7516                                  HCI_MAX_EVENT_SIZE),
7517         /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7518         HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7519                   sizeof(struct hci_evt_le_ext_adv_set_term)),
7520         /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7521         HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7522                   sizeof(struct hci_evt_le_cis_established)),
7523         /* [0x1a = HCI_EVT_LE_CIS_REQ] */
7524         HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7525                   sizeof(struct hci_evt_le_cis_req)),
7526         /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7527         HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7528                      hci_le_create_big_complete_evt,
7529                      sizeof(struct hci_evt_le_create_big_complete),
7530                      HCI_MAX_EVENT_SIZE),
7531         /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7532         HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7533                      hci_le_big_sync_established_evt,
7534                      sizeof(struct hci_evt_le_big_sync_estabilished),
7535                      HCI_MAX_EVENT_SIZE),
7536         /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7537         HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7538                      hci_le_big_info_adv_report_evt,
7539                      sizeof(struct hci_evt_le_big_info_adv_report),
7540                      HCI_MAX_EVENT_SIZE),
7541 };
7542
7543 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7544                             struct sk_buff *skb, u16 *opcode, u8 *status,
7545                             hci_req_complete_t *req_complete,
7546                             hci_req_complete_skb_t *req_complete_skb)
7547 {
7548         struct hci_ev_le_meta *ev = data;
7549         const struct hci_le_ev *subev;
7550
7551         bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7552
7553         /* Only match event if command OGF is for LE */
7554         if (hdev->sent_cmd &&
7555             hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
7556             hci_skb_event(hdev->sent_cmd) == ev->subevent) {
7557                 *opcode = hci_skb_opcode(hdev->sent_cmd);
7558                 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7559                                      req_complete_skb);
7560         }
7561
7562         subev = &hci_le_ev_table[ev->subevent];
7563         if (!subev->func)
7564                 return;
7565
7566         if (skb->len < subev->min_len) {
7567                 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7568                            ev->subevent, skb->len, subev->min_len);
7569                 return;
7570         }
7571
7572         /* Just warn if the length is over max_len size it still be
7573          * possible to partially parse the event so leave to callback to
7574          * decide if that is acceptable.
7575          */
7576         if (skb->len > subev->max_len)
7577                 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7578                             ev->subevent, skb->len, subev->max_len);
7579         data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7580         if (!data)
7581                 return;
7582
7583         subev->func(hdev, data, skb);
7584 }
7585
7586 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7587                                  u8 event, struct sk_buff *skb)
7588 {
7589         struct hci_ev_cmd_complete *ev;
7590         struct hci_event_hdr *hdr;
7591
7592         if (!skb)
7593                 return false;
7594
7595         hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7596         if (!hdr)
7597                 return false;
7598
7599         if (event) {
7600                 if (hdr->evt != event)
7601                         return false;
7602                 return true;
7603         }
7604
7605         /* Check if request ended in Command Status - no way to retrieve
7606          * any extra parameters in this case.
7607          */
7608         if (hdr->evt == HCI_EV_CMD_STATUS)
7609                 return false;
7610
7611         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7612                 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7613                            hdr->evt);
7614                 return false;
7615         }
7616
7617         ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7618         if (!ev)
7619                 return false;
7620
7621         if (opcode != __le16_to_cpu(ev->opcode)) {
7622                 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7623                        __le16_to_cpu(ev->opcode));
7624                 return false;
7625         }
7626
7627         return true;
7628 }
7629
7630 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7631                                   struct sk_buff *skb)
7632 {
7633         struct hci_ev_le_advertising_info *adv;
7634         struct hci_ev_le_direct_adv_info *direct_adv;
7635         struct hci_ev_le_ext_adv_info *ext_adv;
7636         const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7637         const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7638
7639         hci_dev_lock(hdev);
7640
7641         /* If we are currently suspended and this is the first BT event seen,
7642          * save the wake reason associated with the event.
7643          */
7644         if (!hdev->suspended || hdev->wake_reason)
7645                 goto unlock;
7646
7647         /* Default to remote wake. Values for wake_reason are documented in the
7648          * Bluez mgmt api docs.
7649          */
7650         hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7651
7652         /* Once configured for remote wakeup, we should only wake up for
7653          * reconnections. It's useful to see which device is waking us up so
7654          * keep track of the bdaddr of the connection event that woke us up.
7655          */
7656         if (event == HCI_EV_CONN_REQUEST) {
7657                 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7658                 hdev->wake_addr_type = BDADDR_BREDR;
7659         } else if (event == HCI_EV_CONN_COMPLETE) {
7660                 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7661                 hdev->wake_addr_type = BDADDR_BREDR;
7662         } else if (event == HCI_EV_LE_META) {
7663                 struct hci_ev_le_meta *le_ev = (void *)skb->data;
7664                 u8 subevent = le_ev->subevent;
7665                 u8 *ptr = &skb->data[sizeof(*le_ev)];
7666                 u8 num_reports = *ptr;
7667
7668                 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7669                      subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7670                      subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7671                     num_reports) {
7672                         adv = (void *)(ptr + 1);
7673                         direct_adv = (void *)(ptr + 1);
7674                         ext_adv = (void *)(ptr + 1);
7675
7676                         switch (subevent) {
7677                         case HCI_EV_LE_ADVERTISING_REPORT:
7678                                 bacpy(&hdev->wake_addr, &adv->bdaddr);
7679                                 hdev->wake_addr_type = adv->bdaddr_type;
7680                                 break;
7681                         case HCI_EV_LE_DIRECT_ADV_REPORT:
7682                                 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7683                                 hdev->wake_addr_type = direct_adv->bdaddr_type;
7684                                 break;
7685                         case HCI_EV_LE_EXT_ADV_REPORT:
7686                                 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7687                                 hdev->wake_addr_type = ext_adv->bdaddr_type;
7688                                 break;
7689                         }
7690                 }
7691         } else {
7692                 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7693         }
7694
7695 unlock:
7696         hci_dev_unlock(hdev);
7697 }
7698
7699 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7700 [_op] = { \
7701         .req = false, \
7702         .func = _func, \
7703         .min_len = _min_len, \
7704         .max_len = _max_len, \
7705 }
7706
7707 #define HCI_EV(_op, _func, _len) \
7708         HCI_EV_VL(_op, _func, _len, _len)
7709
7710 #define HCI_EV_STATUS(_op, _func) \
7711         HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7712
7713 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7714 [_op] = { \
7715         .req = true, \
7716         .func_req = _func, \
7717         .min_len = _min_len, \
7718         .max_len = _max_len, \
7719 }
7720
7721 #define HCI_EV_REQ(_op, _func, _len) \
7722         HCI_EV_REQ_VL(_op, _func, _len, _len)
7723
7724 /* Entries in this table shall have their position according to the event opcode
7725  * they handle so the use of the macros above is recommend since it does attempt
7726  * to initialize at its proper index using Designated Initializers that way
7727  * events without a callback function don't have entered.
7728  */
7729 static const struct hci_ev {
7730         bool req;
7731         union {
7732                 void (*func)(struct hci_dev *hdev, void *data,
7733                              struct sk_buff *skb);
7734                 void (*func_req)(struct hci_dev *hdev, void *data,
7735                                  struct sk_buff *skb, u16 *opcode, u8 *status,
7736                                  hci_req_complete_t *req_complete,
7737                                  hci_req_complete_skb_t *req_complete_skb);
7738         };
7739         u16  min_len;
7740         u16  max_len;
7741 } hci_ev_table[U8_MAX + 1] = {
7742         /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7743         HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7744         /* [0x02 = HCI_EV_INQUIRY_RESULT] */
7745         HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7746                   sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7747         /* [0x03 = HCI_EV_CONN_COMPLETE] */
7748         HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7749                sizeof(struct hci_ev_conn_complete)),
7750         /* [0x04 = HCI_EV_CONN_REQUEST] */
7751         HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7752                sizeof(struct hci_ev_conn_request)),
7753         /* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7754         HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7755                sizeof(struct hci_ev_disconn_complete)),
7756         /* [0x06 = HCI_EV_AUTH_COMPLETE] */
7757         HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7758                sizeof(struct hci_ev_auth_complete)),
7759         /* [0x07 = HCI_EV_REMOTE_NAME] */
7760         HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7761                sizeof(struct hci_ev_remote_name)),
7762         /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7763         HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7764                sizeof(struct hci_ev_encrypt_change)),
7765         /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7766         HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7767                hci_change_link_key_complete_evt,
7768                sizeof(struct hci_ev_change_link_key_complete)),
7769         /* [0x0b = HCI_EV_REMOTE_FEATURES] */
7770         HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7771                sizeof(struct hci_ev_remote_features)),
7772         /* [0x0e = HCI_EV_CMD_COMPLETE] */
7773         HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7774                       sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7775         /* [0x0f = HCI_EV_CMD_STATUS] */
7776         HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7777                    sizeof(struct hci_ev_cmd_status)),
7778         /* [0x10 = HCI_EV_CMD_STATUS] */
7779         HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7780                sizeof(struct hci_ev_hardware_error)),
7781         /* [0x12 = HCI_EV_ROLE_CHANGE] */
7782         HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7783                sizeof(struct hci_ev_role_change)),
7784         /* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7785         HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7786                   sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7787         /* [0x14 = HCI_EV_MODE_CHANGE] */
7788         HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7789                sizeof(struct hci_ev_mode_change)),
7790         /* [0x16 = HCI_EV_PIN_CODE_REQ] */
7791         HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7792                sizeof(struct hci_ev_pin_code_req)),
7793         /* [0x17 = HCI_EV_LINK_KEY_REQ] */
7794         HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7795                sizeof(struct hci_ev_link_key_req)),
7796         /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7797         HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7798                sizeof(struct hci_ev_link_key_notify)),
7799         /* [0x1c = HCI_EV_CLOCK_OFFSET] */
7800         HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7801                sizeof(struct hci_ev_clock_offset)),
7802         /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7803         HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7804                sizeof(struct hci_ev_pkt_type_change)),
7805         /* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7806         HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7807                sizeof(struct hci_ev_pscan_rep_mode)),
7808         /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7809         HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7810                   hci_inquiry_result_with_rssi_evt,
7811                   sizeof(struct hci_ev_inquiry_result_rssi),
7812                   HCI_MAX_EVENT_SIZE),
7813         /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7814         HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7815                sizeof(struct hci_ev_remote_ext_features)),
7816         /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7817         HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7818                sizeof(struct hci_ev_sync_conn_complete)),
7819         /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7820         HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7821                   hci_extended_inquiry_result_evt,
7822                   sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7823         /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7824         HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7825                sizeof(struct hci_ev_key_refresh_complete)),
7826         /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7827         HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7828                sizeof(struct hci_ev_io_capa_request)),
7829         /* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7830         HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7831                sizeof(struct hci_ev_io_capa_reply)),
7832         /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7833         HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7834                sizeof(struct hci_ev_user_confirm_req)),
7835         /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7836         HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7837                sizeof(struct hci_ev_user_passkey_req)),
7838         /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7839         HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7840                sizeof(struct hci_ev_remote_oob_data_request)),
7841         /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7842         HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7843                sizeof(struct hci_ev_simple_pair_complete)),
7844         /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7845         HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7846                sizeof(struct hci_ev_user_passkey_notify)),
7847         /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7848         HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7849                sizeof(struct hci_ev_keypress_notify)),
7850         /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7851         HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7852                sizeof(struct hci_ev_remote_host_features)),
7853         /* [0x3e = HCI_EV_LE_META] */
7854         HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7855                       sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7856 #if IS_ENABLED(CONFIG_BT_HS)
7857         /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
7858         HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
7859                sizeof(struct hci_ev_phy_link_complete)),
7860         /* [0x41 = HCI_EV_CHANNEL_SELECTED] */
7861         HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
7862                sizeof(struct hci_ev_channel_selected)),
7863         /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
7864         HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
7865                hci_disconn_loglink_complete_evt,
7866                sizeof(struct hci_ev_disconn_logical_link_complete)),
7867         /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
7868         HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
7869                sizeof(struct hci_ev_logical_link_complete)),
7870         /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
7871         HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
7872                hci_disconn_phylink_complete_evt,
7873                sizeof(struct hci_ev_disconn_phy_link_complete)),
7874 #endif
7875         /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
7876         HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
7877                sizeof(struct hci_ev_num_comp_blocks)),
7878 #ifdef TIZEN_BT
7879         /* [0xFF = HCI_EV_VENDOR_SPECIFIC] */
7880         HCI_EV(HCI_EV_VENDOR_SPECIFIC, hci_vendor_specific_evt,
7881                sizeof(struct hci_ev_vendor_specific)),
7882 #else
7883         /* [0xff = HCI_EV_VENDOR] */
7884         HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7885 #endif
7886 };
7887
7888 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7889                            u16 *opcode, u8 *status,
7890                            hci_req_complete_t *req_complete,
7891                            hci_req_complete_skb_t *req_complete_skb)
7892 {
7893         const struct hci_ev *ev = &hci_ev_table[event];
7894         void *data;
7895
7896         if (!ev->func)
7897                 return;
7898
7899         if (skb->len < ev->min_len) {
7900                 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7901                            event, skb->len, ev->min_len);
7902                 return;
7903         }
7904
7905         /* Just warn if the length is over max_len size it still be
7906          * possible to partially parse the event so leave to callback to
7907          * decide if that is acceptable.
7908          */
7909         if (skb->len > ev->max_len)
7910                 bt_dev_warn_ratelimited(hdev,
7911                                         "unexpected event 0x%2.2x length: %u > %u",
7912                                         event, skb->len, ev->max_len);
7913
7914         data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7915         if (!data)
7916                 return;
7917
7918         if (ev->req)
7919                 ev->func_req(hdev, data, skb, opcode, status, req_complete,
7920                              req_complete_skb);
7921         else
7922                 ev->func(hdev, data, skb);
7923 }
7924
7925 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7926 {
7927         struct hci_event_hdr *hdr = (void *) skb->data;
7928         hci_req_complete_t req_complete = NULL;
7929         hci_req_complete_skb_t req_complete_skb = NULL;
7930         struct sk_buff *orig_skb = NULL;
7931         u8 status = 0, event, req_evt = 0;
7932         u16 opcode = HCI_OP_NOP;
7933
7934         if (skb->len < sizeof(*hdr)) {
7935                 bt_dev_err(hdev, "Malformed HCI Event");
7936                 goto done;
7937         }
7938
7939         kfree_skb(hdev->recv_event);
7940         hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7941
7942         event = hdr->evt;
7943         if (!event) {
7944                 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7945                             event);
7946                 goto done;
7947         }
7948
7949         /* Only match event if command OGF is not for LE */
7950         if (hdev->sent_cmd &&
7951             hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
7952             hci_skb_event(hdev->sent_cmd) == event) {
7953                 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
7954                                      status, &req_complete, &req_complete_skb);
7955                 req_evt = event;
7956         }
7957
7958         /* If it looks like we might end up having to call
7959          * req_complete_skb, store a pristine copy of the skb since the
7960          * various handlers may modify the original one through
7961          * skb_pull() calls, etc.
7962          */
7963         if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7964             event == HCI_EV_CMD_COMPLETE)
7965                 orig_skb = skb_clone(skb, GFP_KERNEL);
7966
7967         skb_pull(skb, HCI_EVENT_HDR_SIZE);
7968
7969         /* Store wake reason if we're suspended */
7970         hci_store_wake_reason(hdev, event, skb);
7971
7972         bt_dev_dbg(hdev, "event 0x%2.2x", event);
7973
7974         hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7975                        &req_complete_skb);
7976
7977         if (req_complete) {
7978                 req_complete(hdev, status, opcode);
7979         } else if (req_complete_skb) {
7980                 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7981                         kfree_skb(orig_skb);
7982                         orig_skb = NULL;
7983                 }
7984                 req_complete_skb(hdev, status, opcode, orig_skb);
7985         }
7986
7987 done:
7988         kfree_skb(orig_skb);
7989         kfree_skb(skb);
7990         hdev->stat.evt_rx++;
7991 }