Bluetooth: Add Advertising Packet Configuration
[platform/kernel/linux-starfive.git] / net / bluetooth / hci_sock.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26 #include <linux/compat.h>
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
31
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
36 #ifdef TIZEN_BT
37 #include <net/bluetooth/mgmt_tizen.h>
38 #endif
39
40 #include "mgmt_util.h"
41
42 static LIST_HEAD(mgmt_chan_list);
43 static DEFINE_MUTEX(mgmt_chan_list_lock);
44
45 static DEFINE_IDA(sock_cookie_ida);
46
47 static atomic_t monitor_promisc = ATOMIC_INIT(0);
48
49 /* ----- HCI socket interface ----- */
50
51 /* Socket info */
52 #define hci_pi(sk) ((struct hci_pinfo *) sk)
53
54 struct hci_pinfo {
55         struct bt_sock    bt;
56         struct hci_dev    *hdev;
57         struct hci_filter filter;
58         __u8              cmsg_mask;
59         unsigned short    channel;
60         unsigned long     flags;
61         __u32             cookie;
62         char              comm[TASK_COMM_LEN];
63         __u16             mtu;
64 };
65
66 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
67 {
68         struct hci_dev *hdev = hci_pi(sk)->hdev;
69
70         if (!hdev)
71                 return ERR_PTR(-EBADFD);
72         if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
73                 return ERR_PTR(-EPIPE);
74         return hdev;
75 }
76
77 void hci_sock_set_flag(struct sock *sk, int nr)
78 {
79         set_bit(nr, &hci_pi(sk)->flags);
80 }
81
82 void hci_sock_clear_flag(struct sock *sk, int nr)
83 {
84         clear_bit(nr, &hci_pi(sk)->flags);
85 }
86
87 int hci_sock_test_flag(struct sock *sk, int nr)
88 {
89         return test_bit(nr, &hci_pi(sk)->flags);
90 }
91
92 unsigned short hci_sock_get_channel(struct sock *sk)
93 {
94         return hci_pi(sk)->channel;
95 }
96
97 u32 hci_sock_get_cookie(struct sock *sk)
98 {
99         return hci_pi(sk)->cookie;
100 }
101
102 static bool hci_sock_gen_cookie(struct sock *sk)
103 {
104         int id = hci_pi(sk)->cookie;
105
106         if (!id) {
107                 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
108                 if (id < 0)
109                         id = 0xffffffff;
110
111                 hci_pi(sk)->cookie = id;
112                 get_task_comm(hci_pi(sk)->comm, current);
113                 return true;
114         }
115
116         return false;
117 }
118
119 static void hci_sock_free_cookie(struct sock *sk)
120 {
121         int id = hci_pi(sk)->cookie;
122
123         if (id) {
124                 hci_pi(sk)->cookie = 0xffffffff;
125                 ida_simple_remove(&sock_cookie_ida, id);
126         }
127 }
128
129 static inline int hci_test_bit(int nr, const void *addr)
130 {
131         return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
132 }
133
134 /* Security filter */
135 #define HCI_SFLT_MAX_OGF  5
136
137 struct hci_sec_filter {
138         __u32 type_mask;
139         __u32 event_mask[2];
140         __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
141 };
142
143 static const struct hci_sec_filter hci_sec_filter = {
144         /* Packet types */
145         0x10,
146         /* Events */
147         { 0x1000d9fe, 0x0000b00c },
148         /* Commands */
149         {
150                 { 0x0 },
151                 /* OGF_LINK_CTL */
152                 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
153                 /* OGF_LINK_POLICY */
154                 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
155                 /* OGF_HOST_CTL */
156                 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
157                 /* OGF_INFO_PARAM */
158                 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
159                 /* OGF_STATUS_PARAM */
160                 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
161         }
162 };
163
164 static struct bt_sock_list hci_sk_list = {
165         .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
166 };
167
168 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
169 {
170         struct hci_filter *flt;
171         int flt_type, flt_event;
172
173         /* Apply filter */
174         flt = &hci_pi(sk)->filter;
175
176         flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
177
178         if (!test_bit(flt_type, &flt->type_mask))
179                 return true;
180
181         /* Extra filter for event packets only */
182         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
183                 return false;
184
185         flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
186
187         if (!hci_test_bit(flt_event, &flt->event_mask))
188                 return true;
189
190         /* Check filter only when opcode is set */
191         if (!flt->opcode)
192                 return false;
193
194         if (flt_event == HCI_EV_CMD_COMPLETE &&
195             flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
196                 return true;
197
198         if (flt_event == HCI_EV_CMD_STATUS &&
199             flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
200                 return true;
201
202         return false;
203 }
204
205 /* Send frame to RAW socket */
206 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
207 {
208         struct sock *sk;
209         struct sk_buff *skb_copy = NULL;
210
211         BT_DBG("hdev %p len %d", hdev, skb->len);
212
213         read_lock(&hci_sk_list.lock);
214
215         sk_for_each(sk, &hci_sk_list.head) {
216                 struct sk_buff *nskb;
217
218                 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
219                         continue;
220
221                 /* Don't send frame to the socket it came from */
222                 if (skb->sk == sk)
223                         continue;
224
225                 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
226                         if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
227                             hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
228                             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
229                             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
230                             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
231                                 continue;
232                         if (is_filtered_packet(sk, skb))
233                                 continue;
234                 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
235                         if (!bt_cb(skb)->incoming)
236                                 continue;
237                         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
238                             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
239                             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
240                             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
241                                 continue;
242                 } else {
243                         /* Don't send frame to other channel types */
244                         continue;
245                 }
246
247                 if (!skb_copy) {
248                         /* Create a private copy with headroom */
249                         skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
250                         if (!skb_copy)
251                                 continue;
252
253                         /* Put type byte before the data */
254                         memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
255                 }
256
257                 nskb = skb_clone(skb_copy, GFP_ATOMIC);
258                 if (!nskb)
259                         continue;
260
261                 if (sock_queue_rcv_skb(sk, nskb))
262                         kfree_skb(nskb);
263         }
264
265         read_unlock(&hci_sk_list.lock);
266
267         kfree_skb(skb_copy);
268 }
269
270 /* Send frame to sockets with specific channel */
271 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
272                                   int flag, struct sock *skip_sk)
273 {
274         struct sock *sk;
275
276         BT_DBG("channel %u len %d", channel, skb->len);
277
278         sk_for_each(sk, &hci_sk_list.head) {
279                 struct sk_buff *nskb;
280
281                 /* Ignore socket without the flag set */
282                 if (!hci_sock_test_flag(sk, flag))
283                         continue;
284
285                 /* Skip the original socket */
286                 if (sk == skip_sk)
287                         continue;
288
289                 if (sk->sk_state != BT_BOUND)
290                         continue;
291
292                 if (hci_pi(sk)->channel != channel)
293                         continue;
294
295                 nskb = skb_clone(skb, GFP_ATOMIC);
296                 if (!nskb)
297                         continue;
298
299                 if (sock_queue_rcv_skb(sk, nskb))
300                         kfree_skb(nskb);
301         }
302
303 }
304
305 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
306                          int flag, struct sock *skip_sk)
307 {
308         read_lock(&hci_sk_list.lock);
309         __hci_send_to_channel(channel, skb, flag, skip_sk);
310         read_unlock(&hci_sk_list.lock);
311 }
312
313 /* Send frame to monitor socket */
314 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
315 {
316         struct sk_buff *skb_copy = NULL;
317         struct hci_mon_hdr *hdr;
318         __le16 opcode;
319
320         if (!atomic_read(&monitor_promisc))
321                 return;
322
323         BT_DBG("hdev %p len %d", hdev, skb->len);
324
325         switch (hci_skb_pkt_type(skb)) {
326         case HCI_COMMAND_PKT:
327                 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
328                 break;
329         case HCI_EVENT_PKT:
330                 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
331                 break;
332         case HCI_ACLDATA_PKT:
333                 if (bt_cb(skb)->incoming)
334                         opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
335                 else
336                         opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
337                 break;
338         case HCI_SCODATA_PKT:
339                 if (bt_cb(skb)->incoming)
340                         opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
341                 else
342                         opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
343                 break;
344         case HCI_ISODATA_PKT:
345                 if (bt_cb(skb)->incoming)
346                         opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
347                 else
348                         opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
349                 break;
350         case HCI_DIAG_PKT:
351                 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
352                 break;
353         default:
354                 return;
355         }
356
357         /* Create a private copy with headroom */
358         skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
359         if (!skb_copy)
360                 return;
361
362         /* Put header before the data */
363         hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
364         hdr->opcode = opcode;
365         hdr->index = cpu_to_le16(hdev->id);
366         hdr->len = cpu_to_le16(skb->len);
367
368         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
369                             HCI_SOCK_TRUSTED, NULL);
370         kfree_skb(skb_copy);
371 }
372
373 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
374                                  void *data, u16 data_len, ktime_t tstamp,
375                                  int flag, struct sock *skip_sk)
376 {
377         struct sock *sk;
378         __le16 index;
379
380         if (hdev)
381                 index = cpu_to_le16(hdev->id);
382         else
383                 index = cpu_to_le16(MGMT_INDEX_NONE);
384
385         read_lock(&hci_sk_list.lock);
386
387         sk_for_each(sk, &hci_sk_list.head) {
388                 struct hci_mon_hdr *hdr;
389                 struct sk_buff *skb;
390
391                 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
392                         continue;
393
394                 /* Ignore socket without the flag set */
395                 if (!hci_sock_test_flag(sk, flag))
396                         continue;
397
398                 /* Skip the original socket */
399                 if (sk == skip_sk)
400                         continue;
401
402                 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
403                 if (!skb)
404                         continue;
405
406                 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
407                 put_unaligned_le16(event, skb_put(skb, 2));
408
409                 if (data)
410                         skb_put_data(skb, data, data_len);
411
412                 skb->tstamp = tstamp;
413
414                 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
415                 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
416                 hdr->index = index;
417                 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
418
419                 __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
420                                       HCI_SOCK_TRUSTED, NULL);
421                 kfree_skb(skb);
422         }
423
424         read_unlock(&hci_sk_list.lock);
425 }
426
427 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
428 {
429         struct hci_mon_hdr *hdr;
430         struct hci_mon_new_index *ni;
431         struct hci_mon_index_info *ii;
432         struct sk_buff *skb;
433         __le16 opcode;
434
435         switch (event) {
436         case HCI_DEV_REG:
437                 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
438                 if (!skb)
439                         return NULL;
440
441                 ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
442                 ni->type = hdev->dev_type;
443                 ni->bus = hdev->bus;
444                 bacpy(&ni->bdaddr, &hdev->bdaddr);
445                 memcpy(ni->name, hdev->name, 8);
446
447                 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
448                 break;
449
450         case HCI_DEV_UNREG:
451                 skb = bt_skb_alloc(0, GFP_ATOMIC);
452                 if (!skb)
453                         return NULL;
454
455                 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
456                 break;
457
458         case HCI_DEV_SETUP:
459                 if (hdev->manufacturer == 0xffff)
460                         return NULL;
461                 fallthrough;
462
463         case HCI_DEV_UP:
464                 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
465                 if (!skb)
466                         return NULL;
467
468                 ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
469                 bacpy(&ii->bdaddr, &hdev->bdaddr);
470                 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
471
472                 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
473                 break;
474
475         case HCI_DEV_OPEN:
476                 skb = bt_skb_alloc(0, GFP_ATOMIC);
477                 if (!skb)
478                         return NULL;
479
480                 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
481                 break;
482
483         case HCI_DEV_CLOSE:
484                 skb = bt_skb_alloc(0, GFP_ATOMIC);
485                 if (!skb)
486                         return NULL;
487
488                 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
489                 break;
490
491         default:
492                 return NULL;
493         }
494
495         __net_timestamp(skb);
496
497         hdr = skb_push(skb, HCI_MON_HDR_SIZE);
498         hdr->opcode = opcode;
499         hdr->index = cpu_to_le16(hdev->id);
500         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
501
502         return skb;
503 }
504
505 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
506 {
507         struct hci_mon_hdr *hdr;
508         struct sk_buff *skb;
509         u16 format;
510         u8 ver[3];
511         u32 flags;
512
513         /* No message needed when cookie is not present */
514         if (!hci_pi(sk)->cookie)
515                 return NULL;
516
517         switch (hci_pi(sk)->channel) {
518         case HCI_CHANNEL_RAW:
519                 format = 0x0000;
520                 ver[0] = BT_SUBSYS_VERSION;
521                 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
522                 break;
523         case HCI_CHANNEL_USER:
524                 format = 0x0001;
525                 ver[0] = BT_SUBSYS_VERSION;
526                 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
527                 break;
528         case HCI_CHANNEL_CONTROL:
529                 format = 0x0002;
530                 mgmt_fill_version_info(ver);
531                 break;
532         default:
533                 /* No message for unsupported format */
534                 return NULL;
535         }
536
537         skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
538         if (!skb)
539                 return NULL;
540
541         flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
542
543         put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
544         put_unaligned_le16(format, skb_put(skb, 2));
545         skb_put_data(skb, ver, sizeof(ver));
546         put_unaligned_le32(flags, skb_put(skb, 4));
547         skb_put_u8(skb, TASK_COMM_LEN);
548         skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
549
550         __net_timestamp(skb);
551
552         hdr = skb_push(skb, HCI_MON_HDR_SIZE);
553         hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
554         if (hci_pi(sk)->hdev)
555                 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
556         else
557                 hdr->index = cpu_to_le16(HCI_DEV_NONE);
558         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
559
560         return skb;
561 }
562
563 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
564 {
565         struct hci_mon_hdr *hdr;
566         struct sk_buff *skb;
567
568         /* No message needed when cookie is not present */
569         if (!hci_pi(sk)->cookie)
570                 return NULL;
571
572         switch (hci_pi(sk)->channel) {
573         case HCI_CHANNEL_RAW:
574         case HCI_CHANNEL_USER:
575         case HCI_CHANNEL_CONTROL:
576                 break;
577         default:
578                 /* No message for unsupported format */
579                 return NULL;
580         }
581
582         skb = bt_skb_alloc(4, GFP_ATOMIC);
583         if (!skb)
584                 return NULL;
585
586         put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
587
588         __net_timestamp(skb);
589
590         hdr = skb_push(skb, HCI_MON_HDR_SIZE);
591         hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
592         if (hci_pi(sk)->hdev)
593                 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
594         else
595                 hdr->index = cpu_to_le16(HCI_DEV_NONE);
596         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
597
598         return skb;
599 }
600
601 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
602                                                    u16 opcode, u16 len,
603                                                    const void *buf)
604 {
605         struct hci_mon_hdr *hdr;
606         struct sk_buff *skb;
607
608         skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
609         if (!skb)
610                 return NULL;
611
612         put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
613         put_unaligned_le16(opcode, skb_put(skb, 2));
614
615         if (buf)
616                 skb_put_data(skb, buf, len);
617
618         __net_timestamp(skb);
619
620         hdr = skb_push(skb, HCI_MON_HDR_SIZE);
621         hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
622         hdr->index = cpu_to_le16(index);
623         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
624
625         return skb;
626 }
627
628 static void __printf(2, 3)
629 send_monitor_note(struct sock *sk, const char *fmt, ...)
630 {
631         size_t len;
632         struct hci_mon_hdr *hdr;
633         struct sk_buff *skb;
634         va_list args;
635
636         va_start(args, fmt);
637         len = vsnprintf(NULL, 0, fmt, args);
638         va_end(args);
639
640         skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
641         if (!skb)
642                 return;
643
644         va_start(args, fmt);
645         vsprintf(skb_put(skb, len), fmt, args);
646         *(u8 *)skb_put(skb, 1) = 0;
647         va_end(args);
648
649         __net_timestamp(skb);
650
651         hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
652         hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
653         hdr->index = cpu_to_le16(HCI_DEV_NONE);
654         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
655
656         if (sock_queue_rcv_skb(sk, skb))
657                 kfree_skb(skb);
658 }
659
660 static void send_monitor_replay(struct sock *sk)
661 {
662         struct hci_dev *hdev;
663
664         read_lock(&hci_dev_list_lock);
665
666         list_for_each_entry(hdev, &hci_dev_list, list) {
667                 struct sk_buff *skb;
668
669                 skb = create_monitor_event(hdev, HCI_DEV_REG);
670                 if (!skb)
671                         continue;
672
673                 if (sock_queue_rcv_skb(sk, skb))
674                         kfree_skb(skb);
675
676                 if (!test_bit(HCI_RUNNING, &hdev->flags))
677                         continue;
678
679                 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
680                 if (!skb)
681                         continue;
682
683                 if (sock_queue_rcv_skb(sk, skb))
684                         kfree_skb(skb);
685
686                 if (test_bit(HCI_UP, &hdev->flags))
687                         skb = create_monitor_event(hdev, HCI_DEV_UP);
688                 else if (hci_dev_test_flag(hdev, HCI_SETUP))
689                         skb = create_monitor_event(hdev, HCI_DEV_SETUP);
690                 else
691                         skb = NULL;
692
693                 if (skb) {
694                         if (sock_queue_rcv_skb(sk, skb))
695                                 kfree_skb(skb);
696                 }
697         }
698
699         read_unlock(&hci_dev_list_lock);
700 }
701
702 static void send_monitor_control_replay(struct sock *mon_sk)
703 {
704         struct sock *sk;
705
706         read_lock(&hci_sk_list.lock);
707
708         sk_for_each(sk, &hci_sk_list.head) {
709                 struct sk_buff *skb;
710
711                 skb = create_monitor_ctrl_open(sk);
712                 if (!skb)
713                         continue;
714
715                 if (sock_queue_rcv_skb(mon_sk, skb))
716                         kfree_skb(skb);
717         }
718
719         read_unlock(&hci_sk_list.lock);
720 }
721
722 /* Generate internal stack event */
723 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
724 {
725         struct hci_event_hdr *hdr;
726         struct hci_ev_stack_internal *ev;
727         struct sk_buff *skb;
728
729         skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
730         if (!skb)
731                 return;
732
733         hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
734         hdr->evt  = HCI_EV_STACK_INTERNAL;
735         hdr->plen = sizeof(*ev) + dlen;
736
737         ev = skb_put(skb, sizeof(*ev) + dlen);
738         ev->type = type;
739         memcpy(ev->data, data, dlen);
740
741         bt_cb(skb)->incoming = 1;
742         __net_timestamp(skb);
743
744         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
745         hci_send_to_sock(hdev, skb);
746         kfree_skb(skb);
747 }
748
749 void hci_sock_dev_event(struct hci_dev *hdev, int event)
750 {
751         BT_DBG("hdev %s event %d", hdev->name, event);
752
753         if (atomic_read(&monitor_promisc)) {
754                 struct sk_buff *skb;
755
756                 /* Send event to monitor */
757                 skb = create_monitor_event(hdev, event);
758                 if (skb) {
759                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
760                                             HCI_SOCK_TRUSTED, NULL);
761                         kfree_skb(skb);
762                 }
763         }
764
765         if (event <= HCI_DEV_DOWN) {
766                 struct hci_ev_si_device ev;
767
768                 /* Send event to sockets */
769                 ev.event  = event;
770                 ev.dev_id = hdev->id;
771                 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
772         }
773
774         if (event == HCI_DEV_UNREG) {
775                 struct sock *sk;
776
777                 /* Wake up sockets using this dead device */
778                 read_lock(&hci_sk_list.lock);
779                 sk_for_each(sk, &hci_sk_list.head) {
780                         if (hci_pi(sk)->hdev == hdev) {
781                                 sk->sk_err = EPIPE;
782                                 sk->sk_state_change(sk);
783                         }
784                 }
785                 read_unlock(&hci_sk_list.lock);
786         }
787 }
788
789 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
790 {
791         struct hci_mgmt_chan *c;
792
793         list_for_each_entry(c, &mgmt_chan_list, list) {
794                 if (c->channel == channel)
795                         return c;
796         }
797
798         return NULL;
799 }
800
801 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
802 {
803         struct hci_mgmt_chan *c;
804
805         mutex_lock(&mgmt_chan_list_lock);
806         c = __hci_mgmt_chan_find(channel);
807         mutex_unlock(&mgmt_chan_list_lock);
808
809         return c;
810 }
811
812 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
813 {
814         if (c->channel < HCI_CHANNEL_CONTROL)
815                 return -EINVAL;
816
817         mutex_lock(&mgmt_chan_list_lock);
818         if (__hci_mgmt_chan_find(c->channel)) {
819                 mutex_unlock(&mgmt_chan_list_lock);
820                 return -EALREADY;
821         }
822
823         list_add_tail(&c->list, &mgmt_chan_list);
824
825         mutex_unlock(&mgmt_chan_list_lock);
826
827         return 0;
828 }
829 EXPORT_SYMBOL(hci_mgmt_chan_register);
830
831 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
832 {
833         mutex_lock(&mgmt_chan_list_lock);
834         list_del(&c->list);
835         mutex_unlock(&mgmt_chan_list_lock);
836 }
837 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
838
839 static int hci_sock_release(struct socket *sock)
840 {
841         struct sock *sk = sock->sk;
842         struct hci_dev *hdev;
843         struct sk_buff *skb;
844
845         BT_DBG("sock %p sk %p", sock, sk);
846
847         if (!sk)
848                 return 0;
849
850         lock_sock(sk);
851
852         switch (hci_pi(sk)->channel) {
853         case HCI_CHANNEL_MONITOR:
854                 atomic_dec(&monitor_promisc);
855                 break;
856         case HCI_CHANNEL_RAW:
857         case HCI_CHANNEL_USER:
858         case HCI_CHANNEL_CONTROL:
859                 /* Send event to monitor */
860                 skb = create_monitor_ctrl_close(sk);
861                 if (skb) {
862                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
863                                             HCI_SOCK_TRUSTED, NULL);
864                         kfree_skb(skb);
865                 }
866
867                 hci_sock_free_cookie(sk);
868                 break;
869         }
870
871         bt_sock_unlink(&hci_sk_list, sk);
872
873         hdev = hci_pi(sk)->hdev;
874         if (hdev) {
875                 if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
876                     !hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
877                         /* When releasing a user channel exclusive access,
878                          * call hci_dev_do_close directly instead of calling
879                          * hci_dev_close to ensure the exclusive access will
880                          * be released and the controller brought back down.
881                          *
882                          * The checking of HCI_AUTO_OFF is not needed in this
883                          * case since it will have been cleared already when
884                          * opening the user channel.
885                          *
886                          * Make sure to also check that we haven't already
887                          * unregistered since all the cleanup will have already
888                          * been complete and hdev will get released when we put
889                          * below.
890                          */
891                         hci_dev_do_close(hdev);
892                         hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
893                         mgmt_index_added(hdev);
894                 }
895
896                 atomic_dec(&hdev->promisc);
897                 hci_dev_put(hdev);
898         }
899
900         sock_orphan(sk);
901         release_sock(sk);
902         sock_put(sk);
903         return 0;
904 }
905
906 static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
907 {
908         bdaddr_t bdaddr;
909         int err;
910
911         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
912                 return -EFAULT;
913
914         hci_dev_lock(hdev);
915
916         err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
917
918         hci_dev_unlock(hdev);
919
920         return err;
921 }
922
923 static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
924 {
925         bdaddr_t bdaddr;
926         int err;
927
928         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
929                 return -EFAULT;
930
931         hci_dev_lock(hdev);
932
933         err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
934
935         hci_dev_unlock(hdev);
936
937         return err;
938 }
939
940 /* Ioctls that require bound socket */
941 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
942                                 unsigned long arg)
943 {
944         struct hci_dev *hdev = hci_hdev_from_sock(sk);
945
946         if (IS_ERR(hdev))
947                 return PTR_ERR(hdev);
948
949         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
950                 return -EBUSY;
951
952         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
953                 return -EOPNOTSUPP;
954
955         if (hdev->dev_type != HCI_PRIMARY)
956                 return -EOPNOTSUPP;
957
958         switch (cmd) {
959         case HCISETRAW:
960                 if (!capable(CAP_NET_ADMIN))
961                         return -EPERM;
962                 return -EOPNOTSUPP;
963
964         case HCIGETCONNINFO:
965                 return hci_get_conn_info(hdev, (void __user *)arg);
966
967         case HCIGETAUTHINFO:
968                 return hci_get_auth_info(hdev, (void __user *)arg);
969
970         case HCIBLOCKADDR:
971                 if (!capable(CAP_NET_ADMIN))
972                         return -EPERM;
973                 return hci_sock_reject_list_add(hdev, (void __user *)arg);
974
975         case HCIUNBLOCKADDR:
976                 if (!capable(CAP_NET_ADMIN))
977                         return -EPERM;
978                 return hci_sock_reject_list_del(hdev, (void __user *)arg);
979         }
980
981         return -ENOIOCTLCMD;
982 }
983
984 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
985                           unsigned long arg)
986 {
987         void __user *argp = (void __user *)arg;
988         struct sock *sk = sock->sk;
989         int err;
990
991         BT_DBG("cmd %x arg %lx", cmd, arg);
992
993         /* Make sure the cmd is valid before doing anything */
994         switch (cmd) {
995         case HCIGETDEVLIST:
996         case HCIGETDEVINFO:
997         case HCIGETCONNLIST:
998         case HCIDEVUP:
999         case HCIDEVDOWN:
1000         case HCIDEVRESET:
1001         case HCIDEVRESTAT:
1002         case HCISETSCAN:
1003         case HCISETAUTH:
1004         case HCISETENCRYPT:
1005         case HCISETPTYPE:
1006         case HCISETLINKPOL:
1007         case HCISETLINKMODE:
1008         case HCISETACLMTU:
1009         case HCISETSCOMTU:
1010         case HCIINQUIRY:
1011         case HCISETRAW:
1012         case HCIGETCONNINFO:
1013         case HCIGETAUTHINFO:
1014         case HCIBLOCKADDR:
1015         case HCIUNBLOCKADDR:
1016                 break;
1017         default:
1018                 return -ENOIOCTLCMD;
1019         }
1020
1021         lock_sock(sk);
1022
1023         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1024                 err = -EBADFD;
1025                 goto done;
1026         }
1027
1028         /* When calling an ioctl on an unbound raw socket, then ensure
1029          * that the monitor gets informed. Ensure that the resulting event
1030          * is only send once by checking if the cookie exists or not. The
1031          * socket cookie will be only ever generated once for the lifetime
1032          * of a given socket.
1033          */
1034         if (hci_sock_gen_cookie(sk)) {
1035                 struct sk_buff *skb;
1036
1037                 /* Perform careful checks before setting the HCI_SOCK_TRUSTED
1038                  * flag. Make sure that not only the current task but also
1039                  * the socket opener has the required capability, since
1040                  * privileged programs can be tricked into making ioctl calls
1041                  * on HCI sockets, and the socket should not be marked as
1042                  * trusted simply because the ioctl caller is privileged.
1043                  */
1044                 if (sk_capable(sk, CAP_NET_ADMIN))
1045                         hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1046
1047                 /* Send event to monitor */
1048                 skb = create_monitor_ctrl_open(sk);
1049                 if (skb) {
1050                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1051                                             HCI_SOCK_TRUSTED, NULL);
1052                         kfree_skb(skb);
1053                 }
1054         }
1055
1056         release_sock(sk);
1057
1058         switch (cmd) {
1059         case HCIGETDEVLIST:
1060                 return hci_get_dev_list(argp);
1061
1062         case HCIGETDEVINFO:
1063                 return hci_get_dev_info(argp);
1064
1065         case HCIGETCONNLIST:
1066                 return hci_get_conn_list(argp);
1067
1068         case HCIDEVUP:
1069                 if (!capable(CAP_NET_ADMIN))
1070                         return -EPERM;
1071                 return hci_dev_open(arg);
1072
1073         case HCIDEVDOWN:
1074                 if (!capable(CAP_NET_ADMIN))
1075                         return -EPERM;
1076                 return hci_dev_close(arg);
1077
1078         case HCIDEVRESET:
1079                 if (!capable(CAP_NET_ADMIN))
1080                         return -EPERM;
1081                 return hci_dev_reset(arg);
1082
1083         case HCIDEVRESTAT:
1084                 if (!capable(CAP_NET_ADMIN))
1085                         return -EPERM;
1086                 return hci_dev_reset_stat(arg);
1087
1088         case HCISETSCAN:
1089         case HCISETAUTH:
1090         case HCISETENCRYPT:
1091         case HCISETPTYPE:
1092         case HCISETLINKPOL:
1093         case HCISETLINKMODE:
1094         case HCISETACLMTU:
1095         case HCISETSCOMTU:
1096                 if (!capable(CAP_NET_ADMIN))
1097                         return -EPERM;
1098                 return hci_dev_cmd(cmd, argp);
1099
1100         case HCIINQUIRY:
1101                 return hci_inquiry(argp);
1102         }
1103
1104         lock_sock(sk);
1105
1106         err = hci_sock_bound_ioctl(sk, cmd, arg);
1107
1108 done:
1109         release_sock(sk);
1110         return err;
1111 }
1112
1113 #ifdef CONFIG_COMPAT
1114 static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1115                                  unsigned long arg)
1116 {
1117         switch (cmd) {
1118         case HCIDEVUP:
1119         case HCIDEVDOWN:
1120         case HCIDEVRESET:
1121         case HCIDEVRESTAT:
1122                 return hci_sock_ioctl(sock, cmd, arg);
1123         }
1124
1125         return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1126 }
1127 #endif
1128
1129 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1130                          int addr_len)
1131 {
1132         struct sockaddr_hci haddr;
1133         struct sock *sk = sock->sk;
1134         struct hci_dev *hdev = NULL;
1135         struct sk_buff *skb;
1136         int len, err = 0;
1137
1138         BT_DBG("sock %p sk %p", sock, sk);
1139
1140         if (!addr)
1141                 return -EINVAL;
1142
1143         memset(&haddr, 0, sizeof(haddr));
1144         len = min_t(unsigned int, sizeof(haddr), addr_len);
1145         memcpy(&haddr, addr, len);
1146
1147         if (haddr.hci_family != AF_BLUETOOTH)
1148                 return -EINVAL;
1149
1150         lock_sock(sk);
1151
1152         /* Allow detaching from dead device and attaching to alive device, if
1153          * the caller wants to re-bind (instead of close) this socket in
1154          * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1155          */
1156         hdev = hci_pi(sk)->hdev;
1157         if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1158                 hci_pi(sk)->hdev = NULL;
1159                 sk->sk_state = BT_OPEN;
1160                 hci_dev_put(hdev);
1161         }
1162         hdev = NULL;
1163
1164         if (sk->sk_state == BT_BOUND) {
1165                 err = -EALREADY;
1166                 goto done;
1167         }
1168
1169         switch (haddr.hci_channel) {
1170         case HCI_CHANNEL_RAW:
1171                 if (hci_pi(sk)->hdev) {
1172                         err = -EALREADY;
1173                         goto done;
1174                 }
1175
1176                 if (haddr.hci_dev != HCI_DEV_NONE) {
1177                         hdev = hci_dev_get(haddr.hci_dev);
1178                         if (!hdev) {
1179                                 err = -ENODEV;
1180                                 goto done;
1181                         }
1182
1183                         atomic_inc(&hdev->promisc);
1184                 }
1185
1186                 hci_pi(sk)->channel = haddr.hci_channel;
1187
1188                 if (!hci_sock_gen_cookie(sk)) {
1189                         /* In the case when a cookie has already been assigned,
1190                          * then there has been already an ioctl issued against
1191                          * an unbound socket and with that triggered an open
1192                          * notification. Send a close notification first to
1193                          * allow the state transition to bounded.
1194                          */
1195                         skb = create_monitor_ctrl_close(sk);
1196                         if (skb) {
1197                                 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1198                                                     HCI_SOCK_TRUSTED, NULL);
1199                                 kfree_skb(skb);
1200                         }
1201                 }
1202
1203                 if (capable(CAP_NET_ADMIN))
1204                         hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1205
1206                 hci_pi(sk)->hdev = hdev;
1207
1208                 /* Send event to monitor */
1209                 skb = create_monitor_ctrl_open(sk);
1210                 if (skb) {
1211                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1212                                             HCI_SOCK_TRUSTED, NULL);
1213                         kfree_skb(skb);
1214                 }
1215                 break;
1216
1217         case HCI_CHANNEL_USER:
1218                 if (hci_pi(sk)->hdev) {
1219                         err = -EALREADY;
1220                         goto done;
1221                 }
1222
1223                 if (haddr.hci_dev == HCI_DEV_NONE) {
1224                         err = -EINVAL;
1225                         goto done;
1226                 }
1227
1228                 if (!capable(CAP_NET_ADMIN)) {
1229                         err = -EPERM;
1230                         goto done;
1231                 }
1232
1233                 hdev = hci_dev_get(haddr.hci_dev);
1234                 if (!hdev) {
1235                         err = -ENODEV;
1236                         goto done;
1237                 }
1238
1239                 if (test_bit(HCI_INIT, &hdev->flags) ||
1240                     hci_dev_test_flag(hdev, HCI_SETUP) ||
1241                     hci_dev_test_flag(hdev, HCI_CONFIG) ||
1242                     (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1243                      test_bit(HCI_UP, &hdev->flags))) {
1244                         err = -EBUSY;
1245                         hci_dev_put(hdev);
1246                         goto done;
1247                 }
1248
1249                 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1250                         err = -EUSERS;
1251                         hci_dev_put(hdev);
1252                         goto done;
1253                 }
1254
1255                 mgmt_index_removed(hdev);
1256
1257                 err = hci_dev_open(hdev->id);
1258                 if (err) {
1259                         if (err == -EALREADY) {
1260                                 /* In case the transport is already up and
1261                                  * running, clear the error here.
1262                                  *
1263                                  * This can happen when opening a user
1264                                  * channel and HCI_AUTO_OFF grace period
1265                                  * is still active.
1266                                  */
1267                                 err = 0;
1268                         } else {
1269                                 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1270                                 mgmt_index_added(hdev);
1271                                 hci_dev_put(hdev);
1272                                 goto done;
1273                         }
1274                 }
1275
1276                 hci_pi(sk)->channel = haddr.hci_channel;
1277
1278                 if (!hci_sock_gen_cookie(sk)) {
1279                         /* In the case when a cookie has already been assigned,
1280                          * this socket will transition from a raw socket into
1281                          * a user channel socket. For a clean transition, send
1282                          * the close notification first.
1283                          */
1284                         skb = create_monitor_ctrl_close(sk);
1285                         if (skb) {
1286                                 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1287                                                     HCI_SOCK_TRUSTED, NULL);
1288                                 kfree_skb(skb);
1289                         }
1290                 }
1291
1292                 /* The user channel is restricted to CAP_NET_ADMIN
1293                  * capabilities and with that implicitly trusted.
1294                  */
1295                 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1296
1297                 hci_pi(sk)->hdev = hdev;
1298
1299                 /* Send event to monitor */
1300                 skb = create_monitor_ctrl_open(sk);
1301                 if (skb) {
1302                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1303                                             HCI_SOCK_TRUSTED, NULL);
1304                         kfree_skb(skb);
1305                 }
1306
1307                 atomic_inc(&hdev->promisc);
1308                 break;
1309
1310         case HCI_CHANNEL_MONITOR:
1311                 if (haddr.hci_dev != HCI_DEV_NONE) {
1312                         err = -EINVAL;
1313                         goto done;
1314                 }
1315
1316                 if (!capable(CAP_NET_RAW)) {
1317                         err = -EPERM;
1318                         goto done;
1319                 }
1320
1321                 hci_pi(sk)->channel = haddr.hci_channel;
1322
1323                 /* The monitor interface is restricted to CAP_NET_RAW
1324                  * capabilities and with that implicitly trusted.
1325                  */
1326                 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1327
1328                 send_monitor_note(sk, "Linux version %s (%s)",
1329                                   init_utsname()->release,
1330                                   init_utsname()->machine);
1331                 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1332                                   BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1333                 send_monitor_replay(sk);
1334                 send_monitor_control_replay(sk);
1335
1336                 atomic_inc(&monitor_promisc);
1337                 break;
1338
1339         case HCI_CHANNEL_LOGGING:
1340                 if (haddr.hci_dev != HCI_DEV_NONE) {
1341                         err = -EINVAL;
1342                         goto done;
1343                 }
1344
1345                 if (!capable(CAP_NET_ADMIN)) {
1346                         err = -EPERM;
1347                         goto done;
1348                 }
1349
1350                 hci_pi(sk)->channel = haddr.hci_channel;
1351                 break;
1352
1353         default:
1354                 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1355                         err = -EINVAL;
1356                         goto done;
1357                 }
1358
1359                 if (haddr.hci_dev != HCI_DEV_NONE) {
1360                         err = -EINVAL;
1361                         goto done;
1362                 }
1363
1364                 /* Users with CAP_NET_ADMIN capabilities are allowed
1365                  * access to all management commands and events. For
1366                  * untrusted users the interface is restricted and
1367                  * also only untrusted events are sent.
1368                  */
1369                 if (capable(CAP_NET_ADMIN))
1370                         hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1371
1372                 hci_pi(sk)->channel = haddr.hci_channel;
1373
1374                 /* At the moment the index and unconfigured index events
1375                  * are enabled unconditionally. Setting them on each
1376                  * socket when binding keeps this functionality. They
1377                  * however might be cleared later and then sending of these
1378                  * events will be disabled, but that is then intentional.
1379                  *
1380                  * This also enables generic events that are safe to be
1381                  * received by untrusted users. Example for such events
1382                  * are changes to settings, class of device, name etc.
1383                  */
1384                 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1385                         if (!hci_sock_gen_cookie(sk)) {
1386                                 /* In the case when a cookie has already been
1387                                  * assigned, this socket will transition from
1388                                  * a raw socket into a control socket. To
1389                                  * allow for a clean transition, send the
1390                                  * close notification first.
1391                                  */
1392                                 skb = create_monitor_ctrl_close(sk);
1393                                 if (skb) {
1394                                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1395                                                             HCI_SOCK_TRUSTED, NULL);
1396                                         kfree_skb(skb);
1397                                 }
1398                         }
1399
1400                         /* Send event to monitor */
1401                         skb = create_monitor_ctrl_open(sk);
1402                         if (skb) {
1403                                 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1404                                                     HCI_SOCK_TRUSTED, NULL);
1405                                 kfree_skb(skb);
1406                         }
1407
1408                         hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1409                         hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1410                         hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1411                         hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1412                         hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1413                         hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1414                 }
1415                 break;
1416         }
1417
1418         /* Default MTU to HCI_MAX_FRAME_SIZE if not set */
1419         if (!hci_pi(sk)->mtu)
1420                 hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE;
1421
1422         sk->sk_state = BT_BOUND;
1423
1424 done:
1425         release_sock(sk);
1426         return err;
1427 }
1428
1429 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1430                             int peer)
1431 {
1432         struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1433         struct sock *sk = sock->sk;
1434         struct hci_dev *hdev;
1435         int err = 0;
1436
1437         BT_DBG("sock %p sk %p", sock, sk);
1438
1439         if (peer)
1440                 return -EOPNOTSUPP;
1441
1442         lock_sock(sk);
1443
1444         hdev = hci_hdev_from_sock(sk);
1445         if (IS_ERR(hdev)) {
1446                 err = PTR_ERR(hdev);
1447                 goto done;
1448         }
1449
1450         haddr->hci_family = AF_BLUETOOTH;
1451         haddr->hci_dev    = hdev->id;
1452         haddr->hci_channel= hci_pi(sk)->channel;
1453         err = sizeof(*haddr);
1454
1455 done:
1456         release_sock(sk);
1457         return err;
1458 }
1459
1460 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1461                           struct sk_buff *skb)
1462 {
1463         __u8 mask = hci_pi(sk)->cmsg_mask;
1464
1465         if (mask & HCI_CMSG_DIR) {
1466                 int incoming = bt_cb(skb)->incoming;
1467                 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1468                          &incoming);
1469         }
1470
1471         if (mask & HCI_CMSG_TSTAMP) {
1472 #ifdef CONFIG_COMPAT
1473                 struct old_timeval32 ctv;
1474 #endif
1475                 struct __kernel_old_timeval tv;
1476                 void *data;
1477                 int len;
1478
1479                 skb_get_timestamp(skb, &tv);
1480
1481                 data = &tv;
1482                 len = sizeof(tv);
1483 #ifdef CONFIG_COMPAT
1484                 if (!COMPAT_USE_64BIT_TIME &&
1485                     (msg->msg_flags & MSG_CMSG_COMPAT)) {
1486                         ctv.tv_sec = tv.tv_sec;
1487                         ctv.tv_usec = tv.tv_usec;
1488                         data = &ctv;
1489                         len = sizeof(ctv);
1490                 }
1491 #endif
1492
1493                 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1494         }
1495 }
1496
1497 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1498                             size_t len, int flags)
1499 {
1500         struct sock *sk = sock->sk;
1501         struct sk_buff *skb;
1502         int copied, err;
1503         unsigned int skblen;
1504
1505         BT_DBG("sock %p, sk %p", sock, sk);
1506
1507         if (flags & MSG_OOB)
1508                 return -EOPNOTSUPP;
1509
1510         if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1511                 return -EOPNOTSUPP;
1512
1513         if (sk->sk_state == BT_CLOSED)
1514                 return 0;
1515
1516         skb = skb_recv_datagram(sk, flags, &err);
1517         if (!skb)
1518                 return err;
1519
1520         skblen = skb->len;
1521         copied = skb->len;
1522         if (len < copied) {
1523                 msg->msg_flags |= MSG_TRUNC;
1524                 copied = len;
1525         }
1526
1527         skb_reset_transport_header(skb);
1528         err = skb_copy_datagram_msg(skb, 0, msg, copied);
1529
1530         switch (hci_pi(sk)->channel) {
1531         case HCI_CHANNEL_RAW:
1532                 hci_sock_cmsg(sk, msg, skb);
1533                 break;
1534         case HCI_CHANNEL_USER:
1535         case HCI_CHANNEL_MONITOR:
1536                 sock_recv_timestamp(msg, sk, skb);
1537                 break;
1538         default:
1539                 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1540                         sock_recv_timestamp(msg, sk, skb);
1541                 break;
1542         }
1543
1544         skb_free_datagram(sk, skb);
1545
1546         if (flags & MSG_TRUNC)
1547                 copied = skblen;
1548
1549         return err ? : copied;
1550 }
1551
1552 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1553                         struct sk_buff *skb)
1554 {
1555         u8 *cp;
1556         struct mgmt_hdr *hdr;
1557         u16 opcode, index, len;
1558         struct hci_dev *hdev = NULL;
1559         const struct hci_mgmt_handler *handler;
1560         bool var_len, no_hdev;
1561         int err;
1562
1563         BT_DBG("got %d bytes", skb->len);
1564
1565         if (skb->len < sizeof(*hdr))
1566                 return -EINVAL;
1567
1568         hdr = (void *)skb->data;
1569         opcode = __le16_to_cpu(hdr->opcode);
1570         index = __le16_to_cpu(hdr->index);
1571         len = __le16_to_cpu(hdr->len);
1572
1573         if (len != skb->len - sizeof(*hdr)) {
1574                 err = -EINVAL;
1575                 goto done;
1576         }
1577
1578 #ifdef TIZEN_BT
1579    if (opcode >= TIZEN_OP_CODE_BASE) {
1580        u16 tizen_opcode_index = opcode - TIZEN_OP_CODE_BASE;
1581        if (tizen_opcode_index >= chan->tizen_handler_count ||
1582            chan->tizen_handlers[tizen_opcode_index].func == NULL) {
1583            BT_DBG("Unknown op %u", opcode);
1584            err = mgmt_cmd_status(sk, index, opcode,
1585                          MGMT_STATUS_UNKNOWN_COMMAND);
1586            goto done;
1587        }
1588
1589        handler = &chan->tizen_handlers[tizen_opcode_index];
1590
1591    } else {
1592 #endif
1593
1594         if (chan->channel == HCI_CHANNEL_CONTROL) {
1595                 struct sk_buff *cmd;
1596
1597                 /* Send event to monitor */
1598                 cmd = create_monitor_ctrl_command(sk, index, opcode, len,
1599                                                   skb->data + sizeof(*hdr));
1600                 if (cmd) {
1601                         hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd,
1602                                             HCI_SOCK_TRUSTED, NULL);
1603                         kfree_skb(cmd);
1604                 }
1605         }
1606
1607         if (opcode >= chan->handler_count ||
1608             chan->handlers[opcode].func == NULL) {
1609                 BT_DBG("Unknown op %u", opcode);
1610                 err = mgmt_cmd_status(sk, index, opcode,
1611                                       MGMT_STATUS_UNKNOWN_COMMAND);
1612                 goto done;
1613         }
1614
1615         handler = &chan->handlers[opcode];
1616 #ifdef TIZEN_BT
1617         }
1618 #endif
1619
1620         if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1621             !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1622                 err = mgmt_cmd_status(sk, index, opcode,
1623                                       MGMT_STATUS_PERMISSION_DENIED);
1624                 goto done;
1625         }
1626
1627         if (index != MGMT_INDEX_NONE) {
1628                 hdev = hci_dev_get(index);
1629                 if (!hdev) {
1630                         err = mgmt_cmd_status(sk, index, opcode,
1631                                               MGMT_STATUS_INVALID_INDEX);
1632                         goto done;
1633                 }
1634
1635                 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1636                     hci_dev_test_flag(hdev, HCI_CONFIG) ||
1637                     hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1638                         err = mgmt_cmd_status(sk, index, opcode,
1639                                               MGMT_STATUS_INVALID_INDEX);
1640                         goto done;
1641                 }
1642
1643                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1644                     !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1645                         err = mgmt_cmd_status(sk, index, opcode,
1646                                               MGMT_STATUS_INVALID_INDEX);
1647                         goto done;
1648                 }
1649         }
1650
1651         if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
1652                 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1653                 if (no_hdev != !hdev) {
1654                         err = mgmt_cmd_status(sk, index, opcode,
1655                                               MGMT_STATUS_INVALID_INDEX);
1656                         goto done;
1657                 }
1658         }
1659
1660         var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1661         if ((var_len && len < handler->data_len) ||
1662             (!var_len && len != handler->data_len)) {
1663                 err = mgmt_cmd_status(sk, index, opcode,
1664                                       MGMT_STATUS_INVALID_PARAMS);
1665                 goto done;
1666         }
1667
1668         if (hdev && chan->hdev_init)
1669                 chan->hdev_init(sk, hdev);
1670
1671         cp = skb->data + sizeof(*hdr);
1672
1673         err = handler->func(sk, hdev, cp, len);
1674         if (err < 0)
1675                 goto done;
1676
1677         err = skb->len;
1678
1679 done:
1680         if (hdev)
1681                 hci_dev_put(hdev);
1682
1683         return err;
1684 }
1685
1686 static int hci_logging_frame(struct sock *sk, struct sk_buff *skb,
1687                              unsigned int flags)
1688 {
1689         struct hci_mon_hdr *hdr;
1690         struct hci_dev *hdev;
1691         u16 index;
1692         int err;
1693
1694         /* The logging frame consists at minimum of the standard header,
1695          * the priority byte, the ident length byte and at least one string
1696          * terminator NUL byte. Anything shorter are invalid packets.
1697          */
1698         if (skb->len < sizeof(*hdr) + 3)
1699                 return -EINVAL;
1700
1701         hdr = (void *)skb->data;
1702
1703         if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr))
1704                 return -EINVAL;
1705
1706         if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1707                 __u8 priority = skb->data[sizeof(*hdr)];
1708                 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1709
1710                 /* Only the priorities 0-7 are valid and with that any other
1711                  * value results in an invalid packet.
1712                  *
1713                  * The priority byte is followed by an ident length byte and
1714                  * the NUL terminated ident string. Check that the ident
1715                  * length is not overflowing the packet and also that the
1716                  * ident string itself is NUL terminated. In case the ident
1717                  * length is zero, the length value actually doubles as NUL
1718                  * terminator identifier.
1719                  *
1720                  * The message follows the ident string (if present) and
1721                  * must be NUL terminated. Otherwise it is not a valid packet.
1722                  */
1723                 if (priority > 7 || skb->data[skb->len - 1] != 0x00 ||
1724                     ident_len > skb->len - sizeof(*hdr) - 3 ||
1725                     skb->data[sizeof(*hdr) + ident_len + 1] != 0x00)
1726                         return -EINVAL;
1727         } else {
1728                 return -EINVAL;
1729         }
1730
1731         index = __le16_to_cpu(hdr->index);
1732
1733         if (index != MGMT_INDEX_NONE) {
1734                 hdev = hci_dev_get(index);
1735                 if (!hdev)
1736                         return -ENODEV;
1737         } else {
1738                 hdev = NULL;
1739         }
1740
1741         hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1742
1743         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1744         err = skb->len;
1745
1746         if (hdev)
1747                 hci_dev_put(hdev);
1748
1749         return err;
1750 }
1751
1752 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1753                             size_t len)
1754 {
1755         struct sock *sk = sock->sk;
1756         struct hci_mgmt_chan *chan;
1757         struct hci_dev *hdev;
1758         struct sk_buff *skb;
1759         int err;
1760         const unsigned int flags = msg->msg_flags;
1761
1762         BT_DBG("sock %p sk %p", sock, sk);
1763
1764         if (flags & MSG_OOB)
1765                 return -EOPNOTSUPP;
1766
1767         if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
1768                 return -EINVAL;
1769
1770         if (len < 4 || len > hci_pi(sk)->mtu)
1771                 return -EINVAL;
1772
1773         skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0);
1774         if (IS_ERR(skb))
1775                 return PTR_ERR(skb);
1776
1777         lock_sock(sk);
1778
1779         switch (hci_pi(sk)->channel) {
1780         case HCI_CHANNEL_RAW:
1781         case HCI_CHANNEL_USER:
1782                 break;
1783         case HCI_CHANNEL_MONITOR:
1784                 err = -EOPNOTSUPP;
1785                 goto drop;
1786         case HCI_CHANNEL_LOGGING:
1787                 err = hci_logging_frame(sk, skb, flags);
1788                 goto drop;
1789         default:
1790                 mutex_lock(&mgmt_chan_list_lock);
1791                 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1792                 if (chan)
1793                         err = hci_mgmt_cmd(chan, sk, skb);
1794                 else
1795                         err = -EINVAL;
1796
1797                 mutex_unlock(&mgmt_chan_list_lock);
1798                 goto drop;
1799         }
1800
1801         hdev = hci_hdev_from_sock(sk);
1802         if (IS_ERR(hdev)) {
1803                 err = PTR_ERR(hdev);
1804                 goto drop;
1805         }
1806
1807         if (!test_bit(HCI_UP, &hdev->flags)) {
1808                 err = -ENETDOWN;
1809                 goto drop;
1810         }
1811
1812         hci_skb_pkt_type(skb) = skb->data[0];
1813         skb_pull(skb, 1);
1814
1815         if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1816                 /* No permission check is needed for user channel
1817                  * since that gets enforced when binding the socket.
1818                  *
1819                  * However check that the packet type is valid.
1820                  */
1821                 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1822                     hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1823                     hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1824                     hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1825                         err = -EINVAL;
1826                         goto drop;
1827                 }
1828
1829                 skb_queue_tail(&hdev->raw_q, skb);
1830                 queue_work(hdev->workqueue, &hdev->tx_work);
1831         } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1832                 u16 opcode = get_unaligned_le16(skb->data);
1833                 u16 ogf = hci_opcode_ogf(opcode);
1834                 u16 ocf = hci_opcode_ocf(opcode);
1835
1836                 if (((ogf > HCI_SFLT_MAX_OGF) ||
1837                      !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1838                                    &hci_sec_filter.ocf_mask[ogf])) &&
1839                     !capable(CAP_NET_RAW)) {
1840                         err = -EPERM;
1841                         goto drop;
1842                 }
1843
1844                 /* Since the opcode has already been extracted here, store
1845                  * a copy of the value for later use by the drivers.
1846                  */
1847                 hci_skb_opcode(skb) = opcode;
1848
1849                 if (ogf == 0x3f) {
1850                         skb_queue_tail(&hdev->raw_q, skb);
1851                         queue_work(hdev->workqueue, &hdev->tx_work);
1852                 } else {
1853                         /* Stand-alone HCI commands must be flagged as
1854                          * single-command requests.
1855                          */
1856                         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1857
1858                         skb_queue_tail(&hdev->cmd_q, skb);
1859                         queue_work(hdev->workqueue, &hdev->cmd_work);
1860                 }
1861         } else {
1862                 if (!capable(CAP_NET_RAW)) {
1863                         err = -EPERM;
1864                         goto drop;
1865                 }
1866
1867                 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1868                     hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1869                     hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1870                         err = -EINVAL;
1871                         goto drop;
1872                 }
1873
1874                 skb_queue_tail(&hdev->raw_q, skb);
1875                 queue_work(hdev->workqueue, &hdev->tx_work);
1876         }
1877
1878         err = len;
1879
1880 done:
1881         release_sock(sk);
1882         return err;
1883
1884 drop:
1885         kfree_skb(skb);
1886         goto done;
1887 }
1888
1889 static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
1890                                    sockptr_t optval, unsigned int len)
1891 {
1892         struct hci_ufilter uf = { .opcode = 0 };
1893         struct sock *sk = sock->sk;
1894         int err = 0, opt = 0;
1895
1896         BT_DBG("sk %p, opt %d", sk, optname);
1897
1898         lock_sock(sk);
1899
1900         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1901                 err = -EBADFD;
1902                 goto done;
1903         }
1904
1905         switch (optname) {
1906         case HCI_DATA_DIR:
1907                 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1908                         err = -EFAULT;
1909                         break;
1910                 }
1911
1912                 if (opt)
1913                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1914                 else
1915                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1916                 break;
1917
1918         case HCI_TIME_STAMP:
1919                 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1920                         err = -EFAULT;
1921                         break;
1922                 }
1923
1924                 if (opt)
1925                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1926                 else
1927                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1928                 break;
1929
1930         case HCI_FILTER:
1931                 {
1932                         struct hci_filter *f = &hci_pi(sk)->filter;
1933
1934                         uf.type_mask = f->type_mask;
1935                         uf.opcode    = f->opcode;
1936                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1937                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1938                 }
1939
1940                 len = min_t(unsigned int, len, sizeof(uf));
1941                 if (copy_from_sockptr(&uf, optval, len)) {
1942                         err = -EFAULT;
1943                         break;
1944                 }
1945
1946                 if (!capable(CAP_NET_RAW)) {
1947                         uf.type_mask &= hci_sec_filter.type_mask;
1948                         uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1949                         uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1950                 }
1951
1952                 {
1953                         struct hci_filter *f = &hci_pi(sk)->filter;
1954
1955                         f->type_mask = uf.type_mask;
1956                         f->opcode    = uf.opcode;
1957                         *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1958                         *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1959                 }
1960                 break;
1961
1962         default:
1963                 err = -ENOPROTOOPT;
1964                 break;
1965         }
1966
1967 done:
1968         release_sock(sk);
1969         return err;
1970 }
1971
1972 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1973                                sockptr_t optval, unsigned int len)
1974 {
1975         struct sock *sk = sock->sk;
1976         int err = 0;
1977         u16 opt;
1978
1979         BT_DBG("sk %p, opt %d", sk, optname);
1980
1981         if (level == SOL_HCI)
1982                 return hci_sock_setsockopt_old(sock, level, optname, optval,
1983                                                len);
1984
1985         if (level != SOL_BLUETOOTH)
1986                 return -ENOPROTOOPT;
1987
1988         lock_sock(sk);
1989
1990         switch (optname) {
1991         case BT_SNDMTU:
1992         case BT_RCVMTU:
1993                 switch (hci_pi(sk)->channel) {
1994                 /* Don't allow changing MTU for channels that are meant for HCI
1995                  * traffic only.
1996                  */
1997                 case HCI_CHANNEL_RAW:
1998                 case HCI_CHANNEL_USER:
1999                         err = -ENOPROTOOPT;
2000                         goto done;
2001                 }
2002
2003                 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
2004                         err = -EFAULT;
2005                         break;
2006                 }
2007
2008                 hci_pi(sk)->mtu = opt;
2009                 break;
2010
2011         default:
2012                 err = -ENOPROTOOPT;
2013                 break;
2014         }
2015
2016 done:
2017         release_sock(sk);
2018         return err;
2019 }
2020
2021 static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname,
2022                                    char __user *optval, int __user *optlen)
2023 {
2024         struct hci_ufilter uf;
2025         struct sock *sk = sock->sk;
2026         int len, opt, err = 0;
2027
2028         BT_DBG("sk %p, opt %d", sk, optname);
2029
2030         if (get_user(len, optlen))
2031                 return -EFAULT;
2032
2033         lock_sock(sk);
2034
2035         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
2036                 err = -EBADFD;
2037                 goto done;
2038         }
2039
2040         switch (optname) {
2041         case HCI_DATA_DIR:
2042                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
2043                         opt = 1;
2044                 else
2045                         opt = 0;
2046
2047                 if (put_user(opt, optval))
2048                         err = -EFAULT;
2049                 break;
2050
2051         case HCI_TIME_STAMP:
2052                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
2053                         opt = 1;
2054                 else
2055                         opt = 0;
2056
2057                 if (put_user(opt, optval))
2058                         err = -EFAULT;
2059                 break;
2060
2061         case HCI_FILTER:
2062                 {
2063                         struct hci_filter *f = &hci_pi(sk)->filter;
2064
2065                         memset(&uf, 0, sizeof(uf));
2066                         uf.type_mask = f->type_mask;
2067                         uf.opcode    = f->opcode;
2068                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2069                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2070                 }
2071
2072                 len = min_t(unsigned int, len, sizeof(uf));
2073                 if (copy_to_user(optval, &uf, len))
2074                         err = -EFAULT;
2075                 break;
2076
2077         default:
2078                 err = -ENOPROTOOPT;
2079                 break;
2080         }
2081
2082 done:
2083         release_sock(sk);
2084         return err;
2085 }
2086
2087 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
2088                                char __user *optval, int __user *optlen)
2089 {
2090         struct sock *sk = sock->sk;
2091         int err = 0;
2092
2093         BT_DBG("sk %p, opt %d", sk, optname);
2094
2095         if (level == SOL_HCI)
2096                 return hci_sock_getsockopt_old(sock, level, optname, optval,
2097                                                optlen);
2098
2099         if (level != SOL_BLUETOOTH)
2100                 return -ENOPROTOOPT;
2101
2102         lock_sock(sk);
2103
2104         switch (optname) {
2105         case BT_SNDMTU:
2106         case BT_RCVMTU:
2107                 if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval))
2108                         err = -EFAULT;
2109                 break;
2110
2111         default:
2112                 err = -ENOPROTOOPT;
2113                 break;
2114         }
2115
2116         release_sock(sk);
2117         return err;
2118 }
2119
2120 static void hci_sock_destruct(struct sock *sk)
2121 {
2122         mgmt_cleanup(sk);
2123         skb_queue_purge(&sk->sk_receive_queue);
2124         skb_queue_purge(&sk->sk_write_queue);
2125 }
2126
2127 static const struct proto_ops hci_sock_ops = {
2128         .family         = PF_BLUETOOTH,
2129         .owner          = THIS_MODULE,
2130         .release        = hci_sock_release,
2131         .bind           = hci_sock_bind,
2132         .getname        = hci_sock_getname,
2133         .sendmsg        = hci_sock_sendmsg,
2134         .recvmsg        = hci_sock_recvmsg,
2135         .ioctl          = hci_sock_ioctl,
2136 #ifdef CONFIG_COMPAT
2137         .compat_ioctl   = hci_sock_compat_ioctl,
2138 #endif
2139         .poll           = datagram_poll,
2140         .listen         = sock_no_listen,
2141         .shutdown       = sock_no_shutdown,
2142         .setsockopt     = hci_sock_setsockopt,
2143         .getsockopt     = hci_sock_getsockopt,
2144         .connect        = sock_no_connect,
2145         .socketpair     = sock_no_socketpair,
2146         .accept         = sock_no_accept,
2147         .mmap           = sock_no_mmap
2148 };
2149
2150 static struct proto hci_sk_proto = {
2151         .name           = "HCI",
2152         .owner          = THIS_MODULE,
2153         .obj_size       = sizeof(struct hci_pinfo)
2154 };
2155
2156 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2157                            int kern)
2158 {
2159         struct sock *sk;
2160
2161         BT_DBG("sock %p", sock);
2162
2163         if (sock->type != SOCK_RAW)
2164                 return -ESOCKTNOSUPPORT;
2165
2166         sock->ops = &hci_sock_ops;
2167
2168         sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2169         if (!sk)
2170                 return -ENOMEM;
2171
2172         sock_init_data(sock, sk);
2173
2174         sock_reset_flag(sk, SOCK_ZAPPED);
2175
2176         sk->sk_protocol = protocol;
2177
2178         sock->state = SS_UNCONNECTED;
2179         sk->sk_state = BT_OPEN;
2180         sk->sk_destruct = hci_sock_destruct;
2181
2182         bt_sock_link(&hci_sk_list, sk);
2183         return 0;
2184 }
2185
2186 static const struct net_proto_family hci_sock_family_ops = {
2187         .family = PF_BLUETOOTH,
2188         .owner  = THIS_MODULE,
2189         .create = hci_sock_create,
2190 };
2191
2192 int __init hci_sock_init(void)
2193 {
2194         int err;
2195
2196         BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2197
2198         err = proto_register(&hci_sk_proto, 0);
2199         if (err < 0)
2200                 return err;
2201
2202         err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2203         if (err < 0) {
2204                 BT_ERR("HCI socket registration failed");
2205                 goto error;
2206         }
2207
2208         err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2209         if (err < 0) {
2210                 BT_ERR("Failed to create HCI proc file");
2211                 bt_sock_unregister(BTPROTO_HCI);
2212                 goto error;
2213         }
2214
2215         BT_INFO("HCI socket layer initialized");
2216
2217         return 0;
2218
2219 error:
2220         proto_unregister(&hci_sk_proto);
2221         return err;
2222 }
2223
2224 void hci_sock_cleanup(void)
2225 {
2226         bt_procfs_cleanup(&init_net, "hci");
2227         bt_sock_unregister(BTPROTO_HCI);
2228         proto_unregister(&hci_sk_proto);
2229 }