Bluetooth: Cancel the Sniff timer
[platform/kernel/linux-rpi.git] / net / bluetooth / hci_sock.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26 #include <linux/compat.h>
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
31
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
36 #ifdef TIZEN_BT
37 #include <net/bluetooth/mgmt_tizen.h>
38 #endif
39
40 #include "mgmt_util.h"
41
42 static LIST_HEAD(mgmt_chan_list);
43 static DEFINE_MUTEX(mgmt_chan_list_lock);
44
45 static DEFINE_IDA(sock_cookie_ida);
46
47 static atomic_t monitor_promisc = ATOMIC_INIT(0);
48
49 /* ----- HCI socket interface ----- */
50
51 /* Socket info */
52 #define hci_pi(sk) ((struct hci_pinfo *) sk)
53
54 struct hci_pinfo {
55         struct bt_sock    bt;
56         struct hci_dev    *hdev;
57         struct hci_filter filter;
58         __u8              cmsg_mask;
59         unsigned short    channel;
60         unsigned long     flags;
61         __u32             cookie;
62         char              comm[TASK_COMM_LEN];
63 };
64
65 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
66 {
67         struct hci_dev *hdev = hci_pi(sk)->hdev;
68
69         if (!hdev)
70                 return ERR_PTR(-EBADFD);
71         if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
72                 return ERR_PTR(-EPIPE);
73         return hdev;
74 }
75
76 void hci_sock_set_flag(struct sock *sk, int nr)
77 {
78         set_bit(nr, &hci_pi(sk)->flags);
79 }
80
81 void hci_sock_clear_flag(struct sock *sk, int nr)
82 {
83         clear_bit(nr, &hci_pi(sk)->flags);
84 }
85
86 int hci_sock_test_flag(struct sock *sk, int nr)
87 {
88         return test_bit(nr, &hci_pi(sk)->flags);
89 }
90
91 unsigned short hci_sock_get_channel(struct sock *sk)
92 {
93         return hci_pi(sk)->channel;
94 }
95
96 u32 hci_sock_get_cookie(struct sock *sk)
97 {
98         return hci_pi(sk)->cookie;
99 }
100
101 static bool hci_sock_gen_cookie(struct sock *sk)
102 {
103         int id = hci_pi(sk)->cookie;
104
105         if (!id) {
106                 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
107                 if (id < 0)
108                         id = 0xffffffff;
109
110                 hci_pi(sk)->cookie = id;
111                 get_task_comm(hci_pi(sk)->comm, current);
112                 return true;
113         }
114
115         return false;
116 }
117
118 static void hci_sock_free_cookie(struct sock *sk)
119 {
120         int id = hci_pi(sk)->cookie;
121
122         if (id) {
123                 hci_pi(sk)->cookie = 0xffffffff;
124                 ida_simple_remove(&sock_cookie_ida, id);
125         }
126 }
127
128 static inline int hci_test_bit(int nr, const void *addr)
129 {
130         return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
131 }
132
133 /* Security filter */
134 #define HCI_SFLT_MAX_OGF  5
135
136 struct hci_sec_filter {
137         __u32 type_mask;
138         __u32 event_mask[2];
139         __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
140 };
141
142 static const struct hci_sec_filter hci_sec_filter = {
143         /* Packet types */
144         0x10,
145         /* Events */
146         { 0x1000d9fe, 0x0000b00c },
147         /* Commands */
148         {
149                 { 0x0 },
150                 /* OGF_LINK_CTL */
151                 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
152                 /* OGF_LINK_POLICY */
153                 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
154                 /* OGF_HOST_CTL */
155                 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
156                 /* OGF_INFO_PARAM */
157                 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
158                 /* OGF_STATUS_PARAM */
159                 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
160         }
161 };
162
163 static struct bt_sock_list hci_sk_list = {
164         .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
165 };
166
167 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
168 {
169         struct hci_filter *flt;
170         int flt_type, flt_event;
171
172         /* Apply filter */
173         flt = &hci_pi(sk)->filter;
174
175         flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
176
177         if (!test_bit(flt_type, &flt->type_mask))
178                 return true;
179
180         /* Extra filter for event packets only */
181         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
182                 return false;
183
184         flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
185
186         if (!hci_test_bit(flt_event, &flt->event_mask))
187                 return true;
188
189         /* Check filter only when opcode is set */
190         if (!flt->opcode)
191                 return false;
192
193         if (flt_event == HCI_EV_CMD_COMPLETE &&
194             flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
195                 return true;
196
197         if (flt_event == HCI_EV_CMD_STATUS &&
198             flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
199                 return true;
200
201         return false;
202 }
203
204 /* Send frame to RAW socket */
205 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
206 {
207         struct sock *sk;
208         struct sk_buff *skb_copy = NULL;
209
210         BT_DBG("hdev %p len %d", hdev, skb->len);
211
212         read_lock(&hci_sk_list.lock);
213
214         sk_for_each(sk, &hci_sk_list.head) {
215                 struct sk_buff *nskb;
216
217                 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
218                         continue;
219
220                 /* Don't send frame to the socket it came from */
221                 if (skb->sk == sk)
222                         continue;
223
224                 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
225                         if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
226                             hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
227                             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
228                             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
229                             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
230                                 continue;
231                         if (is_filtered_packet(sk, skb))
232                                 continue;
233                 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
234                         if (!bt_cb(skb)->incoming)
235                                 continue;
236                         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
237                             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
238                             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
239                             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
240                                 continue;
241                 } else {
242                         /* Don't send frame to other channel types */
243                         continue;
244                 }
245
246                 if (!skb_copy) {
247                         /* Create a private copy with headroom */
248                         skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
249                         if (!skb_copy)
250                                 continue;
251
252                         /* Put type byte before the data */
253                         memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
254                 }
255
256                 nskb = skb_clone(skb_copy, GFP_ATOMIC);
257                 if (!nskb)
258                         continue;
259
260                 if (sock_queue_rcv_skb(sk, nskb))
261                         kfree_skb(nskb);
262         }
263
264         read_unlock(&hci_sk_list.lock);
265
266         kfree_skb(skb_copy);
267 }
268
269 /* Send frame to sockets with specific channel */
270 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
271                                   int flag, struct sock *skip_sk)
272 {
273         struct sock *sk;
274
275         BT_DBG("channel %u len %d", channel, skb->len);
276
277         sk_for_each(sk, &hci_sk_list.head) {
278                 struct sk_buff *nskb;
279
280                 /* Ignore socket without the flag set */
281                 if (!hci_sock_test_flag(sk, flag))
282                         continue;
283
284                 /* Skip the original socket */
285                 if (sk == skip_sk)
286                         continue;
287
288                 if (sk->sk_state != BT_BOUND)
289                         continue;
290
291                 if (hci_pi(sk)->channel != channel)
292                         continue;
293
294                 nskb = skb_clone(skb, GFP_ATOMIC);
295                 if (!nskb)
296                         continue;
297
298                 if (sock_queue_rcv_skb(sk, nskb))
299                         kfree_skb(nskb);
300         }
301
302 }
303
304 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
305                          int flag, struct sock *skip_sk)
306 {
307         read_lock(&hci_sk_list.lock);
308         __hci_send_to_channel(channel, skb, flag, skip_sk);
309         read_unlock(&hci_sk_list.lock);
310 }
311
312 /* Send frame to monitor socket */
313 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
314 {
315         struct sk_buff *skb_copy = NULL;
316         struct hci_mon_hdr *hdr;
317         __le16 opcode;
318
319         if (!atomic_read(&monitor_promisc))
320                 return;
321
322         BT_DBG("hdev %p len %d", hdev, skb->len);
323
324         switch (hci_skb_pkt_type(skb)) {
325         case HCI_COMMAND_PKT:
326                 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
327                 break;
328         case HCI_EVENT_PKT:
329                 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
330                 break;
331         case HCI_ACLDATA_PKT:
332                 if (bt_cb(skb)->incoming)
333                         opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
334                 else
335                         opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
336                 break;
337         case HCI_SCODATA_PKT:
338                 if (bt_cb(skb)->incoming)
339                         opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
340                 else
341                         opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
342                 break;
343         case HCI_ISODATA_PKT:
344                 if (bt_cb(skb)->incoming)
345                         opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
346                 else
347                         opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
348                 break;
349         case HCI_DIAG_PKT:
350                 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
351                 break;
352         default:
353                 return;
354         }
355
356         /* Create a private copy with headroom */
357         skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
358         if (!skb_copy)
359                 return;
360
361         /* Put header before the data */
362         hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
363         hdr->opcode = opcode;
364         hdr->index = cpu_to_le16(hdev->id);
365         hdr->len = cpu_to_le16(skb->len);
366
367         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
368                             HCI_SOCK_TRUSTED, NULL);
369         kfree_skb(skb_copy);
370 }
371
372 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
373                                  void *data, u16 data_len, ktime_t tstamp,
374                                  int flag, struct sock *skip_sk)
375 {
376         struct sock *sk;
377         __le16 index;
378
379         if (hdev)
380                 index = cpu_to_le16(hdev->id);
381         else
382                 index = cpu_to_le16(MGMT_INDEX_NONE);
383
384         read_lock(&hci_sk_list.lock);
385
386         sk_for_each(sk, &hci_sk_list.head) {
387                 struct hci_mon_hdr *hdr;
388                 struct sk_buff *skb;
389
390                 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
391                         continue;
392
393                 /* Ignore socket without the flag set */
394                 if (!hci_sock_test_flag(sk, flag))
395                         continue;
396
397                 /* Skip the original socket */
398                 if (sk == skip_sk)
399                         continue;
400
401                 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
402                 if (!skb)
403                         continue;
404
405                 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
406                 put_unaligned_le16(event, skb_put(skb, 2));
407
408                 if (data)
409                         skb_put_data(skb, data, data_len);
410
411                 skb->tstamp = tstamp;
412
413                 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
414                 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
415                 hdr->index = index;
416                 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
417
418                 __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
419                                       HCI_SOCK_TRUSTED, NULL);
420                 kfree_skb(skb);
421         }
422
423         read_unlock(&hci_sk_list.lock);
424 }
425
426 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
427 {
428         struct hci_mon_hdr *hdr;
429         struct hci_mon_new_index *ni;
430         struct hci_mon_index_info *ii;
431         struct sk_buff *skb;
432         __le16 opcode;
433
434         switch (event) {
435         case HCI_DEV_REG:
436                 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
437                 if (!skb)
438                         return NULL;
439
440                 ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
441                 ni->type = hdev->dev_type;
442                 ni->bus = hdev->bus;
443                 bacpy(&ni->bdaddr, &hdev->bdaddr);
444                 memcpy(ni->name, hdev->name, 8);
445
446                 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
447                 break;
448
449         case HCI_DEV_UNREG:
450                 skb = bt_skb_alloc(0, GFP_ATOMIC);
451                 if (!skb)
452                         return NULL;
453
454                 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
455                 break;
456
457         case HCI_DEV_SETUP:
458                 if (hdev->manufacturer == 0xffff)
459                         return NULL;
460                 fallthrough;
461
462         case HCI_DEV_UP:
463                 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
464                 if (!skb)
465                         return NULL;
466
467                 ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
468                 bacpy(&ii->bdaddr, &hdev->bdaddr);
469                 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
470
471                 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
472                 break;
473
474         case HCI_DEV_OPEN:
475                 skb = bt_skb_alloc(0, GFP_ATOMIC);
476                 if (!skb)
477                         return NULL;
478
479                 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
480                 break;
481
482         case HCI_DEV_CLOSE:
483                 skb = bt_skb_alloc(0, GFP_ATOMIC);
484                 if (!skb)
485                         return NULL;
486
487                 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
488                 break;
489
490         default:
491                 return NULL;
492         }
493
494         __net_timestamp(skb);
495
496         hdr = skb_push(skb, HCI_MON_HDR_SIZE);
497         hdr->opcode = opcode;
498         hdr->index = cpu_to_le16(hdev->id);
499         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
500
501         return skb;
502 }
503
504 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
505 {
506         struct hci_mon_hdr *hdr;
507         struct sk_buff *skb;
508         u16 format;
509         u8 ver[3];
510         u32 flags;
511
512         /* No message needed when cookie is not present */
513         if (!hci_pi(sk)->cookie)
514                 return NULL;
515
516         switch (hci_pi(sk)->channel) {
517         case HCI_CHANNEL_RAW:
518                 format = 0x0000;
519                 ver[0] = BT_SUBSYS_VERSION;
520                 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
521                 break;
522         case HCI_CHANNEL_USER:
523                 format = 0x0001;
524                 ver[0] = BT_SUBSYS_VERSION;
525                 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
526                 break;
527         case HCI_CHANNEL_CONTROL:
528                 format = 0x0002;
529                 mgmt_fill_version_info(ver);
530                 break;
531         default:
532                 /* No message for unsupported format */
533                 return NULL;
534         }
535
536         skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
537         if (!skb)
538                 return NULL;
539
540         flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
541
542         put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
543         put_unaligned_le16(format, skb_put(skb, 2));
544         skb_put_data(skb, ver, sizeof(ver));
545         put_unaligned_le32(flags, skb_put(skb, 4));
546         skb_put_u8(skb, TASK_COMM_LEN);
547         skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
548
549         __net_timestamp(skb);
550
551         hdr = skb_push(skb, HCI_MON_HDR_SIZE);
552         hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
553         if (hci_pi(sk)->hdev)
554                 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
555         else
556                 hdr->index = cpu_to_le16(HCI_DEV_NONE);
557         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
558
559         return skb;
560 }
561
562 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
563 {
564         struct hci_mon_hdr *hdr;
565         struct sk_buff *skb;
566
567         /* No message needed when cookie is not present */
568         if (!hci_pi(sk)->cookie)
569                 return NULL;
570
571         switch (hci_pi(sk)->channel) {
572         case HCI_CHANNEL_RAW:
573         case HCI_CHANNEL_USER:
574         case HCI_CHANNEL_CONTROL:
575                 break;
576         default:
577                 /* No message for unsupported format */
578                 return NULL;
579         }
580
581         skb = bt_skb_alloc(4, GFP_ATOMIC);
582         if (!skb)
583                 return NULL;
584
585         put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
586
587         __net_timestamp(skb);
588
589         hdr = skb_push(skb, HCI_MON_HDR_SIZE);
590         hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
591         if (hci_pi(sk)->hdev)
592                 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
593         else
594                 hdr->index = cpu_to_le16(HCI_DEV_NONE);
595         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
596
597         return skb;
598 }
599
600 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
601                                                    u16 opcode, u16 len,
602                                                    const void *buf)
603 {
604         struct hci_mon_hdr *hdr;
605         struct sk_buff *skb;
606
607         skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
608         if (!skb)
609                 return NULL;
610
611         put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
612         put_unaligned_le16(opcode, skb_put(skb, 2));
613
614         if (buf)
615                 skb_put_data(skb, buf, len);
616
617         __net_timestamp(skb);
618
619         hdr = skb_push(skb, HCI_MON_HDR_SIZE);
620         hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
621         hdr->index = cpu_to_le16(index);
622         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
623
624         return skb;
625 }
626
627 static void __printf(2, 3)
628 send_monitor_note(struct sock *sk, const char *fmt, ...)
629 {
630         size_t len;
631         struct hci_mon_hdr *hdr;
632         struct sk_buff *skb;
633         va_list args;
634
635         va_start(args, fmt);
636         len = vsnprintf(NULL, 0, fmt, args);
637         va_end(args);
638
639         skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
640         if (!skb)
641                 return;
642
643         va_start(args, fmt);
644         vsprintf(skb_put(skb, len), fmt, args);
645         *(u8 *)skb_put(skb, 1) = 0;
646         va_end(args);
647
648         __net_timestamp(skb);
649
650         hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
651         hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
652         hdr->index = cpu_to_le16(HCI_DEV_NONE);
653         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
654
655         if (sock_queue_rcv_skb(sk, skb))
656                 kfree_skb(skb);
657 }
658
659 static void send_monitor_replay(struct sock *sk)
660 {
661         struct hci_dev *hdev;
662
663         read_lock(&hci_dev_list_lock);
664
665         list_for_each_entry(hdev, &hci_dev_list, list) {
666                 struct sk_buff *skb;
667
668                 skb = create_monitor_event(hdev, HCI_DEV_REG);
669                 if (!skb)
670                         continue;
671
672                 if (sock_queue_rcv_skb(sk, skb))
673                         kfree_skb(skb);
674
675                 if (!test_bit(HCI_RUNNING, &hdev->flags))
676                         continue;
677
678                 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
679                 if (!skb)
680                         continue;
681
682                 if (sock_queue_rcv_skb(sk, skb))
683                         kfree_skb(skb);
684
685                 if (test_bit(HCI_UP, &hdev->flags))
686                         skb = create_monitor_event(hdev, HCI_DEV_UP);
687                 else if (hci_dev_test_flag(hdev, HCI_SETUP))
688                         skb = create_monitor_event(hdev, HCI_DEV_SETUP);
689                 else
690                         skb = NULL;
691
692                 if (skb) {
693                         if (sock_queue_rcv_skb(sk, skb))
694                                 kfree_skb(skb);
695                 }
696         }
697
698         read_unlock(&hci_dev_list_lock);
699 }
700
701 static void send_monitor_control_replay(struct sock *mon_sk)
702 {
703         struct sock *sk;
704
705         read_lock(&hci_sk_list.lock);
706
707         sk_for_each(sk, &hci_sk_list.head) {
708                 struct sk_buff *skb;
709
710                 skb = create_monitor_ctrl_open(sk);
711                 if (!skb)
712                         continue;
713
714                 if (sock_queue_rcv_skb(mon_sk, skb))
715                         kfree_skb(skb);
716         }
717
718         read_unlock(&hci_sk_list.lock);
719 }
720
721 /* Generate internal stack event */
722 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
723 {
724         struct hci_event_hdr *hdr;
725         struct hci_ev_stack_internal *ev;
726         struct sk_buff *skb;
727
728         skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
729         if (!skb)
730                 return;
731
732         hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
733         hdr->evt  = HCI_EV_STACK_INTERNAL;
734         hdr->plen = sizeof(*ev) + dlen;
735
736         ev = skb_put(skb, sizeof(*ev) + dlen);
737         ev->type = type;
738         memcpy(ev->data, data, dlen);
739
740         bt_cb(skb)->incoming = 1;
741         __net_timestamp(skb);
742
743         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
744         hci_send_to_sock(hdev, skb);
745         kfree_skb(skb);
746 }
747
748 void hci_sock_dev_event(struct hci_dev *hdev, int event)
749 {
750         BT_DBG("hdev %s event %d", hdev->name, event);
751
752         if (atomic_read(&monitor_promisc)) {
753                 struct sk_buff *skb;
754
755                 /* Send event to monitor */
756                 skb = create_monitor_event(hdev, event);
757                 if (skb) {
758                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
759                                             HCI_SOCK_TRUSTED, NULL);
760                         kfree_skb(skb);
761                 }
762         }
763
764         if (event <= HCI_DEV_DOWN) {
765                 struct hci_ev_si_device ev;
766
767                 /* Send event to sockets */
768                 ev.event  = event;
769                 ev.dev_id = hdev->id;
770                 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
771         }
772
773         if (event == HCI_DEV_UNREG) {
774                 struct sock *sk;
775
776                 /* Wake up sockets using this dead device */
777                 read_lock(&hci_sk_list.lock);
778                 sk_for_each(sk, &hci_sk_list.head) {
779                         if (hci_pi(sk)->hdev == hdev) {
780                                 sk->sk_err = EPIPE;
781                                 sk->sk_state_change(sk);
782                         }
783                 }
784                 read_unlock(&hci_sk_list.lock);
785         }
786 }
787
788 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
789 {
790         struct hci_mgmt_chan *c;
791
792         list_for_each_entry(c, &mgmt_chan_list, list) {
793                 if (c->channel == channel)
794                         return c;
795         }
796
797         return NULL;
798 }
799
800 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
801 {
802         struct hci_mgmt_chan *c;
803
804         mutex_lock(&mgmt_chan_list_lock);
805         c = __hci_mgmt_chan_find(channel);
806         mutex_unlock(&mgmt_chan_list_lock);
807
808         return c;
809 }
810
811 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
812 {
813         if (c->channel < HCI_CHANNEL_CONTROL)
814                 return -EINVAL;
815
816         mutex_lock(&mgmt_chan_list_lock);
817         if (__hci_mgmt_chan_find(c->channel)) {
818                 mutex_unlock(&mgmt_chan_list_lock);
819                 return -EALREADY;
820         }
821
822         list_add_tail(&c->list, &mgmt_chan_list);
823
824         mutex_unlock(&mgmt_chan_list_lock);
825
826         return 0;
827 }
828 EXPORT_SYMBOL(hci_mgmt_chan_register);
829
830 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
831 {
832         mutex_lock(&mgmt_chan_list_lock);
833         list_del(&c->list);
834         mutex_unlock(&mgmt_chan_list_lock);
835 }
836 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
837
838 static int hci_sock_release(struct socket *sock)
839 {
840         struct sock *sk = sock->sk;
841         struct hci_dev *hdev;
842         struct sk_buff *skb;
843
844         BT_DBG("sock %p sk %p", sock, sk);
845
846         if (!sk)
847                 return 0;
848
849         lock_sock(sk);
850
851         switch (hci_pi(sk)->channel) {
852         case HCI_CHANNEL_MONITOR:
853                 atomic_dec(&monitor_promisc);
854                 break;
855         case HCI_CHANNEL_RAW:
856         case HCI_CHANNEL_USER:
857         case HCI_CHANNEL_CONTROL:
858                 /* Send event to monitor */
859                 skb = create_monitor_ctrl_close(sk);
860                 if (skb) {
861                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
862                                             HCI_SOCK_TRUSTED, NULL);
863                         kfree_skb(skb);
864                 }
865
866                 hci_sock_free_cookie(sk);
867                 break;
868         }
869
870         bt_sock_unlink(&hci_sk_list, sk);
871
872         hdev = hci_pi(sk)->hdev;
873         if (hdev) {
874                 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
875                         /* When releasing a user channel exclusive access,
876                          * call hci_dev_do_close directly instead of calling
877                          * hci_dev_close to ensure the exclusive access will
878                          * be released and the controller brought back down.
879                          *
880                          * The checking of HCI_AUTO_OFF is not needed in this
881                          * case since it will have been cleared already when
882                          * opening the user channel.
883                          */
884                         hci_dev_do_close(hdev);
885                         hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
886                         mgmt_index_added(hdev);
887                 }
888
889                 atomic_dec(&hdev->promisc);
890                 hci_dev_put(hdev);
891         }
892
893         sock_orphan(sk);
894
895         skb_queue_purge(&sk->sk_receive_queue);
896         skb_queue_purge(&sk->sk_write_queue);
897
898         release_sock(sk);
899         sock_put(sk);
900         return 0;
901 }
902
903 static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
904 {
905         bdaddr_t bdaddr;
906         int err;
907
908         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
909                 return -EFAULT;
910
911         hci_dev_lock(hdev);
912
913         err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
914
915         hci_dev_unlock(hdev);
916
917         return err;
918 }
919
920 static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
921 {
922         bdaddr_t bdaddr;
923         int err;
924
925         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
926                 return -EFAULT;
927
928         hci_dev_lock(hdev);
929
930         err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
931
932         hci_dev_unlock(hdev);
933
934         return err;
935 }
936
937 /* Ioctls that require bound socket */
938 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
939                                 unsigned long arg)
940 {
941         struct hci_dev *hdev = hci_hdev_from_sock(sk);
942
943         if (IS_ERR(hdev))
944                 return PTR_ERR(hdev);
945
946         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
947                 return -EBUSY;
948
949         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
950                 return -EOPNOTSUPP;
951
952         if (hdev->dev_type != HCI_PRIMARY)
953                 return -EOPNOTSUPP;
954
955         switch (cmd) {
956         case HCISETRAW:
957                 if (!capable(CAP_NET_ADMIN))
958                         return -EPERM;
959                 return -EOPNOTSUPP;
960
961         case HCIGETCONNINFO:
962                 return hci_get_conn_info(hdev, (void __user *)arg);
963
964         case HCIGETAUTHINFO:
965                 return hci_get_auth_info(hdev, (void __user *)arg);
966
967         case HCIBLOCKADDR:
968                 if (!capable(CAP_NET_ADMIN))
969                         return -EPERM;
970                 return hci_sock_reject_list_add(hdev, (void __user *)arg);
971
972         case HCIUNBLOCKADDR:
973                 if (!capable(CAP_NET_ADMIN))
974                         return -EPERM;
975                 return hci_sock_reject_list_del(hdev, (void __user *)arg);
976         }
977
978         return -ENOIOCTLCMD;
979 }
980
981 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
982                           unsigned long arg)
983 {
984         void __user *argp = (void __user *)arg;
985         struct sock *sk = sock->sk;
986         int err;
987
988         BT_DBG("cmd %x arg %lx", cmd, arg);
989
990         lock_sock(sk);
991
992         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
993                 err = -EBADFD;
994                 goto done;
995         }
996
997         /* When calling an ioctl on an unbound raw socket, then ensure
998          * that the monitor gets informed. Ensure that the resulting event
999          * is only send once by checking if the cookie exists or not. The
1000          * socket cookie will be only ever generated once for the lifetime
1001          * of a given socket.
1002          */
1003         if (hci_sock_gen_cookie(sk)) {
1004                 struct sk_buff *skb;
1005
1006                 if (capable(CAP_NET_ADMIN))
1007                         hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1008
1009                 /* Send event to monitor */
1010                 skb = create_monitor_ctrl_open(sk);
1011                 if (skb) {
1012                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1013                                             HCI_SOCK_TRUSTED, NULL);
1014                         kfree_skb(skb);
1015                 }
1016         }
1017
1018         release_sock(sk);
1019
1020         switch (cmd) {
1021         case HCIGETDEVLIST:
1022                 return hci_get_dev_list(argp);
1023
1024         case HCIGETDEVINFO:
1025                 return hci_get_dev_info(argp);
1026
1027         case HCIGETCONNLIST:
1028                 return hci_get_conn_list(argp);
1029
1030         case HCIDEVUP:
1031                 if (!capable(CAP_NET_ADMIN))
1032                         return -EPERM;
1033                 return hci_dev_open(arg);
1034
1035         case HCIDEVDOWN:
1036                 if (!capable(CAP_NET_ADMIN))
1037                         return -EPERM;
1038                 return hci_dev_close(arg);
1039
1040         case HCIDEVRESET:
1041                 if (!capable(CAP_NET_ADMIN))
1042                         return -EPERM;
1043                 return hci_dev_reset(arg);
1044
1045         case HCIDEVRESTAT:
1046                 if (!capable(CAP_NET_ADMIN))
1047                         return -EPERM;
1048                 return hci_dev_reset_stat(arg);
1049
1050         case HCISETSCAN:
1051         case HCISETAUTH:
1052         case HCISETENCRYPT:
1053         case HCISETPTYPE:
1054         case HCISETLINKPOL:
1055         case HCISETLINKMODE:
1056         case HCISETACLMTU:
1057         case HCISETSCOMTU:
1058                 if (!capable(CAP_NET_ADMIN))
1059                         return -EPERM;
1060                 return hci_dev_cmd(cmd, argp);
1061
1062         case HCIINQUIRY:
1063                 return hci_inquiry(argp);
1064         }
1065
1066         lock_sock(sk);
1067
1068         err = hci_sock_bound_ioctl(sk, cmd, arg);
1069
1070 done:
1071         release_sock(sk);
1072         return err;
1073 }
1074
1075 #ifdef CONFIG_COMPAT
1076 static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1077                                  unsigned long arg)
1078 {
1079         switch (cmd) {
1080         case HCIDEVUP:
1081         case HCIDEVDOWN:
1082         case HCIDEVRESET:
1083         case HCIDEVRESTAT:
1084                 return hci_sock_ioctl(sock, cmd, arg);
1085         }
1086
1087         return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1088 }
1089 #endif
1090
1091 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1092                          int addr_len)
1093 {
1094         struct sockaddr_hci haddr;
1095         struct sock *sk = sock->sk;
1096         struct hci_dev *hdev = NULL;
1097         struct sk_buff *skb;
1098         int len, err = 0;
1099
1100         BT_DBG("sock %p sk %p", sock, sk);
1101
1102         if (!addr)
1103                 return -EINVAL;
1104
1105         memset(&haddr, 0, sizeof(haddr));
1106         len = min_t(unsigned int, sizeof(haddr), addr_len);
1107         memcpy(&haddr, addr, len);
1108
1109         if (haddr.hci_family != AF_BLUETOOTH)
1110                 return -EINVAL;
1111
1112         lock_sock(sk);
1113
1114         /* Allow detaching from dead device and attaching to alive device, if
1115          * the caller wants to re-bind (instead of close) this socket in
1116          * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1117          */
1118         hdev = hci_pi(sk)->hdev;
1119         if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1120                 hci_pi(sk)->hdev = NULL;
1121                 sk->sk_state = BT_OPEN;
1122                 hci_dev_put(hdev);
1123         }
1124         hdev = NULL;
1125
1126         if (sk->sk_state == BT_BOUND) {
1127                 err = -EALREADY;
1128                 goto done;
1129         }
1130
1131         switch (haddr.hci_channel) {
1132         case HCI_CHANNEL_RAW:
1133                 if (hci_pi(sk)->hdev) {
1134                         err = -EALREADY;
1135                         goto done;
1136                 }
1137
1138                 if (haddr.hci_dev != HCI_DEV_NONE) {
1139                         hdev = hci_dev_get(haddr.hci_dev);
1140                         if (!hdev) {
1141                                 err = -ENODEV;
1142                                 goto done;
1143                         }
1144
1145                         atomic_inc(&hdev->promisc);
1146                 }
1147
1148                 hci_pi(sk)->channel = haddr.hci_channel;
1149
1150                 if (!hci_sock_gen_cookie(sk)) {
1151                         /* In the case when a cookie has already been assigned,
1152                          * then there has been already an ioctl issued against
1153                          * an unbound socket and with that triggered an open
1154                          * notification. Send a close notification first to
1155                          * allow the state transition to bounded.
1156                          */
1157                         skb = create_monitor_ctrl_close(sk);
1158                         if (skb) {
1159                                 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1160                                                     HCI_SOCK_TRUSTED, NULL);
1161                                 kfree_skb(skb);
1162                         }
1163                 }
1164
1165                 if (capable(CAP_NET_ADMIN))
1166                         hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1167
1168                 hci_pi(sk)->hdev = hdev;
1169
1170                 /* Send event to monitor */
1171                 skb = create_monitor_ctrl_open(sk);
1172                 if (skb) {
1173                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1174                                             HCI_SOCK_TRUSTED, NULL);
1175                         kfree_skb(skb);
1176                 }
1177                 break;
1178
1179         case HCI_CHANNEL_USER:
1180                 if (hci_pi(sk)->hdev) {
1181                         err = -EALREADY;
1182                         goto done;
1183                 }
1184
1185                 if (haddr.hci_dev == HCI_DEV_NONE) {
1186                         err = -EINVAL;
1187                         goto done;
1188                 }
1189
1190                 if (!capable(CAP_NET_ADMIN)) {
1191                         err = -EPERM;
1192                         goto done;
1193                 }
1194
1195                 hdev = hci_dev_get(haddr.hci_dev);
1196                 if (!hdev) {
1197                         err = -ENODEV;
1198                         goto done;
1199                 }
1200
1201                 if (test_bit(HCI_INIT, &hdev->flags) ||
1202                     hci_dev_test_flag(hdev, HCI_SETUP) ||
1203                     hci_dev_test_flag(hdev, HCI_CONFIG) ||
1204                     (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1205                      test_bit(HCI_UP, &hdev->flags))) {
1206                         err = -EBUSY;
1207                         hci_dev_put(hdev);
1208                         goto done;
1209                 }
1210
1211                 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1212                         err = -EUSERS;
1213                         hci_dev_put(hdev);
1214                         goto done;
1215                 }
1216
1217                 mgmt_index_removed(hdev);
1218
1219                 err = hci_dev_open(hdev->id);
1220                 if (err) {
1221                         if (err == -EALREADY) {
1222                                 /* In case the transport is already up and
1223                                  * running, clear the error here.
1224                                  *
1225                                  * This can happen when opening a user
1226                                  * channel and HCI_AUTO_OFF grace period
1227                                  * is still active.
1228                                  */
1229                                 err = 0;
1230                         } else {
1231                                 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1232                                 mgmt_index_added(hdev);
1233                                 hci_dev_put(hdev);
1234                                 goto done;
1235                         }
1236                 }
1237
1238                 hci_pi(sk)->channel = haddr.hci_channel;
1239
1240                 if (!hci_sock_gen_cookie(sk)) {
1241                         /* In the case when a cookie has already been assigned,
1242                          * this socket will transition from a raw socket into
1243                          * a user channel socket. For a clean transition, send
1244                          * the close notification first.
1245                          */
1246                         skb = create_monitor_ctrl_close(sk);
1247                         if (skb) {
1248                                 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1249                                                     HCI_SOCK_TRUSTED, NULL);
1250                                 kfree_skb(skb);
1251                         }
1252                 }
1253
1254                 /* The user channel is restricted to CAP_NET_ADMIN
1255                  * capabilities and with that implicitly trusted.
1256                  */
1257                 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1258
1259                 hci_pi(sk)->hdev = hdev;
1260
1261                 /* Send event to monitor */
1262                 skb = create_monitor_ctrl_open(sk);
1263                 if (skb) {
1264                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1265                                             HCI_SOCK_TRUSTED, NULL);
1266                         kfree_skb(skb);
1267                 }
1268
1269                 atomic_inc(&hdev->promisc);
1270                 break;
1271
1272         case HCI_CHANNEL_MONITOR:
1273                 if (haddr.hci_dev != HCI_DEV_NONE) {
1274                         err = -EINVAL;
1275                         goto done;
1276                 }
1277
1278                 if (!capable(CAP_NET_RAW)) {
1279                         err = -EPERM;
1280                         goto done;
1281                 }
1282
1283                 hci_pi(sk)->channel = haddr.hci_channel;
1284
1285                 /* The monitor interface is restricted to CAP_NET_RAW
1286                  * capabilities and with that implicitly trusted.
1287                  */
1288                 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1289
1290                 send_monitor_note(sk, "Linux version %s (%s)",
1291                                   init_utsname()->release,
1292                                   init_utsname()->machine);
1293                 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1294                                   BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1295                 send_monitor_replay(sk);
1296                 send_monitor_control_replay(sk);
1297
1298                 atomic_inc(&monitor_promisc);
1299                 break;
1300
1301         case HCI_CHANNEL_LOGGING:
1302                 if (haddr.hci_dev != HCI_DEV_NONE) {
1303                         err = -EINVAL;
1304                         goto done;
1305                 }
1306
1307                 if (!capable(CAP_NET_ADMIN)) {
1308                         err = -EPERM;
1309                         goto done;
1310                 }
1311
1312                 hci_pi(sk)->channel = haddr.hci_channel;
1313                 break;
1314
1315         default:
1316                 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1317                         err = -EINVAL;
1318                         goto done;
1319                 }
1320
1321                 if (haddr.hci_dev != HCI_DEV_NONE) {
1322                         err = -EINVAL;
1323                         goto done;
1324                 }
1325
1326                 /* Users with CAP_NET_ADMIN capabilities are allowed
1327                  * access to all management commands and events. For
1328                  * untrusted users the interface is restricted and
1329                  * also only untrusted events are sent.
1330                  */
1331                 if (capable(CAP_NET_ADMIN))
1332                         hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1333
1334                 hci_pi(sk)->channel = haddr.hci_channel;
1335
1336                 /* At the moment the index and unconfigured index events
1337                  * are enabled unconditionally. Setting them on each
1338                  * socket when binding keeps this functionality. They
1339                  * however might be cleared later and then sending of these
1340                  * events will be disabled, but that is then intentional.
1341                  *
1342                  * This also enables generic events that are safe to be
1343                  * received by untrusted users. Example for such events
1344                  * are changes to settings, class of device, name etc.
1345                  */
1346                 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1347                         if (!hci_sock_gen_cookie(sk)) {
1348                                 /* In the case when a cookie has already been
1349                                  * assigned, this socket will transition from
1350                                  * a raw socket into a control socket. To
1351                                  * allow for a clean transition, send the
1352                                  * close notification first.
1353                                  */
1354                                 skb = create_monitor_ctrl_close(sk);
1355                                 if (skb) {
1356                                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1357                                                             HCI_SOCK_TRUSTED, NULL);
1358                                         kfree_skb(skb);
1359                                 }
1360                         }
1361
1362                         /* Send event to monitor */
1363                         skb = create_monitor_ctrl_open(sk);
1364                         if (skb) {
1365                                 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1366                                                     HCI_SOCK_TRUSTED, NULL);
1367                                 kfree_skb(skb);
1368                         }
1369
1370                         hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1371                         hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1372                         hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1373                         hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1374                         hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1375                         hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1376                 }
1377                 break;
1378         }
1379
1380         sk->sk_state = BT_BOUND;
1381
1382 done:
1383         release_sock(sk);
1384         return err;
1385 }
1386
1387 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1388                             int peer)
1389 {
1390         struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1391         struct sock *sk = sock->sk;
1392         struct hci_dev *hdev;
1393         int err = 0;
1394
1395         BT_DBG("sock %p sk %p", sock, sk);
1396
1397         if (peer)
1398                 return -EOPNOTSUPP;
1399
1400         lock_sock(sk);
1401
1402         hdev = hci_hdev_from_sock(sk);
1403         if (IS_ERR(hdev)) {
1404                 err = PTR_ERR(hdev);
1405                 goto done;
1406         }
1407
1408         haddr->hci_family = AF_BLUETOOTH;
1409         haddr->hci_dev    = hdev->id;
1410         haddr->hci_channel= hci_pi(sk)->channel;
1411         err = sizeof(*haddr);
1412
1413 done:
1414         release_sock(sk);
1415         return err;
1416 }
1417
1418 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1419                           struct sk_buff *skb)
1420 {
1421         __u8 mask = hci_pi(sk)->cmsg_mask;
1422
1423         if (mask & HCI_CMSG_DIR) {
1424                 int incoming = bt_cb(skb)->incoming;
1425                 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1426                          &incoming);
1427         }
1428
1429         if (mask & HCI_CMSG_TSTAMP) {
1430 #ifdef CONFIG_COMPAT
1431                 struct old_timeval32 ctv;
1432 #endif
1433                 struct __kernel_old_timeval tv;
1434                 void *data;
1435                 int len;
1436
1437                 skb_get_timestamp(skb, &tv);
1438
1439                 data = &tv;
1440                 len = sizeof(tv);
1441 #ifdef CONFIG_COMPAT
1442                 if (!COMPAT_USE_64BIT_TIME &&
1443                     (msg->msg_flags & MSG_CMSG_COMPAT)) {
1444                         ctv.tv_sec = tv.tv_sec;
1445                         ctv.tv_usec = tv.tv_usec;
1446                         data = &ctv;
1447                         len = sizeof(ctv);
1448                 }
1449 #endif
1450
1451                 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1452         }
1453 }
1454
1455 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1456                             size_t len, int flags)
1457 {
1458         int noblock = flags & MSG_DONTWAIT;
1459         struct sock *sk = sock->sk;
1460         struct sk_buff *skb;
1461         int copied, err;
1462         unsigned int skblen;
1463
1464         BT_DBG("sock %p, sk %p", sock, sk);
1465
1466         if (flags & MSG_OOB)
1467                 return -EOPNOTSUPP;
1468
1469         if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1470                 return -EOPNOTSUPP;
1471
1472         if (sk->sk_state == BT_CLOSED)
1473                 return 0;
1474
1475         skb = skb_recv_datagram(sk, flags, noblock, &err);
1476         if (!skb)
1477                 return err;
1478
1479         skblen = skb->len;
1480         copied = skb->len;
1481         if (len < copied) {
1482                 msg->msg_flags |= MSG_TRUNC;
1483                 copied = len;
1484         }
1485
1486         skb_reset_transport_header(skb);
1487         err = skb_copy_datagram_msg(skb, 0, msg, copied);
1488
1489         switch (hci_pi(sk)->channel) {
1490         case HCI_CHANNEL_RAW:
1491                 hci_sock_cmsg(sk, msg, skb);
1492                 break;
1493         case HCI_CHANNEL_USER:
1494         case HCI_CHANNEL_MONITOR:
1495                 sock_recv_timestamp(msg, sk, skb);
1496                 break;
1497         default:
1498                 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1499                         sock_recv_timestamp(msg, sk, skb);
1500                 break;
1501         }
1502
1503         skb_free_datagram(sk, skb);
1504
1505         if (flags & MSG_TRUNC)
1506                 copied = skblen;
1507
1508         return err ? : copied;
1509 }
1510
1511 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1512                         struct msghdr *msg, size_t msglen)
1513 {
1514         void *buf;
1515         u8 *cp;
1516         struct mgmt_hdr *hdr;
1517         u16 opcode, index, len;
1518         struct hci_dev *hdev = NULL;
1519         const struct hci_mgmt_handler *handler;
1520         bool var_len, no_hdev;
1521         int err;
1522
1523         BT_DBG("got %zu bytes", msglen);
1524
1525         if (msglen < sizeof(*hdr))
1526                 return -EINVAL;
1527
1528         buf = kmalloc(msglen, GFP_KERNEL);
1529         if (!buf)
1530                 return -ENOMEM;
1531
1532         if (memcpy_from_msg(buf, msg, msglen)) {
1533                 err = -EFAULT;
1534                 goto done;
1535         }
1536
1537         hdr = buf;
1538         opcode = __le16_to_cpu(hdr->opcode);
1539         index = __le16_to_cpu(hdr->index);
1540         len = __le16_to_cpu(hdr->len);
1541
1542         if (len != msglen - sizeof(*hdr)) {
1543                 err = -EINVAL;
1544                 goto done;
1545         }
1546
1547 #ifdef TIZEN_BT
1548         if (opcode >= TIZEN_OP_CODE_BASE) {
1549                 u16 tizen_opcode_index = opcode - TIZEN_OP_CODE_BASE;
1550                 if (tizen_opcode_index >= chan->tizen_handler_count ||
1551                     chan->tizen_handlers[tizen_opcode_index].func == NULL) {
1552                         BT_DBG("Unknown op %u", opcode);
1553                         err = mgmt_cmd_status(sk, index, opcode,
1554                                               MGMT_STATUS_UNKNOWN_COMMAND);
1555                         goto done;
1556                 }
1557
1558                 handler = &chan->tizen_handlers[tizen_opcode_index];
1559
1560         } else {
1561 #endif
1562         if (chan->channel == HCI_CHANNEL_CONTROL) {
1563                 struct sk_buff *skb;
1564
1565                 /* Send event to monitor */
1566                 skb = create_monitor_ctrl_command(sk, index, opcode, len,
1567                                                   buf + sizeof(*hdr));
1568                 if (skb) {
1569                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1570                                             HCI_SOCK_TRUSTED, NULL);
1571                         kfree_skb(skb);
1572                 }
1573         }
1574
1575         if (opcode >= chan->handler_count ||
1576             chan->handlers[opcode].func == NULL) {
1577                 BT_DBG("Unknown op %u", opcode);
1578                 err = mgmt_cmd_status(sk, index, opcode,
1579                                       MGMT_STATUS_UNKNOWN_COMMAND);
1580                 goto done;
1581         }
1582
1583         handler = &chan->handlers[opcode];
1584 #ifdef TIZEN_BT
1585         }
1586 #endif
1587
1588         if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1589             !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1590                 err = mgmt_cmd_status(sk, index, opcode,
1591                                       MGMT_STATUS_PERMISSION_DENIED);
1592                 goto done;
1593         }
1594
1595         if (index != MGMT_INDEX_NONE) {
1596                 hdev = hci_dev_get(index);
1597                 if (!hdev) {
1598                         err = mgmt_cmd_status(sk, index, opcode,
1599                                               MGMT_STATUS_INVALID_INDEX);
1600                         goto done;
1601                 }
1602
1603                 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1604                     hci_dev_test_flag(hdev, HCI_CONFIG) ||
1605                     hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1606                         err = mgmt_cmd_status(sk, index, opcode,
1607                                               MGMT_STATUS_INVALID_INDEX);
1608                         goto done;
1609                 }
1610
1611                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1612                     !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1613                         err = mgmt_cmd_status(sk, index, opcode,
1614                                               MGMT_STATUS_INVALID_INDEX);
1615                         goto done;
1616                 }
1617         }
1618
1619         if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
1620                 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1621                 if (no_hdev != !hdev) {
1622                         err = mgmt_cmd_status(sk, index, opcode,
1623                                               MGMT_STATUS_INVALID_INDEX);
1624                         goto done;
1625                 }
1626         }
1627
1628         var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1629         if ((var_len && len < handler->data_len) ||
1630             (!var_len && len != handler->data_len)) {
1631                 err = mgmt_cmd_status(sk, index, opcode,
1632                                       MGMT_STATUS_INVALID_PARAMS);
1633                 goto done;
1634         }
1635
1636         if (hdev && chan->hdev_init)
1637                 chan->hdev_init(sk, hdev);
1638
1639         cp = buf + sizeof(*hdr);
1640
1641         err = handler->func(sk, hdev, cp, len);
1642         if (err < 0)
1643                 goto done;
1644
1645         err = msglen;
1646
1647 done:
1648         if (hdev)
1649                 hci_dev_put(hdev);
1650
1651         kfree(buf);
1652         return err;
1653 }
1654
1655 static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1656 {
1657         struct hci_mon_hdr *hdr;
1658         struct sk_buff *skb;
1659         struct hci_dev *hdev;
1660         u16 index;
1661         int err;
1662
1663         /* The logging frame consists at minimum of the standard header,
1664          * the priority byte, the ident length byte and at least one string
1665          * terminator NUL byte. Anything shorter are invalid packets.
1666          */
1667         if (len < sizeof(*hdr) + 3)
1668                 return -EINVAL;
1669
1670         skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1671         if (!skb)
1672                 return err;
1673
1674         if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1675                 err = -EFAULT;
1676                 goto drop;
1677         }
1678
1679         hdr = (void *)skb->data;
1680
1681         if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1682                 err = -EINVAL;
1683                 goto drop;
1684         }
1685
1686         if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1687                 __u8 priority = skb->data[sizeof(*hdr)];
1688                 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1689
1690                 /* Only the priorities 0-7 are valid and with that any other
1691                  * value results in an invalid packet.
1692                  *
1693                  * The priority byte is followed by an ident length byte and
1694                  * the NUL terminated ident string. Check that the ident
1695                  * length is not overflowing the packet and also that the
1696                  * ident string itself is NUL terminated. In case the ident
1697                  * length is zero, the length value actually doubles as NUL
1698                  * terminator identifier.
1699                  *
1700                  * The message follows the ident string (if present) and
1701                  * must be NUL terminated. Otherwise it is not a valid packet.
1702                  */
1703                 if (priority > 7 || skb->data[len - 1] != 0x00 ||
1704                     ident_len > len - sizeof(*hdr) - 3 ||
1705                     skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1706                         err = -EINVAL;
1707                         goto drop;
1708                 }
1709         } else {
1710                 err = -EINVAL;
1711                 goto drop;
1712         }
1713
1714         index = __le16_to_cpu(hdr->index);
1715
1716         if (index != MGMT_INDEX_NONE) {
1717                 hdev = hci_dev_get(index);
1718                 if (!hdev) {
1719                         err = -ENODEV;
1720                         goto drop;
1721                 }
1722         } else {
1723                 hdev = NULL;
1724         }
1725
1726         hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1727
1728         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1729         err = len;
1730
1731         if (hdev)
1732                 hci_dev_put(hdev);
1733
1734 drop:
1735         kfree_skb(skb);
1736         return err;
1737 }
1738
1739 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1740                             size_t len)
1741 {
1742         struct sock *sk = sock->sk;
1743         struct hci_mgmt_chan *chan;
1744         struct hci_dev *hdev;
1745         struct sk_buff *skb;
1746         int err;
1747
1748         BT_DBG("sock %p sk %p", sock, sk);
1749
1750         if (msg->msg_flags & MSG_OOB)
1751                 return -EOPNOTSUPP;
1752
1753         if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1754                                MSG_CMSG_COMPAT))
1755                 return -EINVAL;
1756
1757         if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1758                 return -EINVAL;
1759
1760         lock_sock(sk);
1761
1762         switch (hci_pi(sk)->channel) {
1763         case HCI_CHANNEL_RAW:
1764         case HCI_CHANNEL_USER:
1765                 break;
1766         case HCI_CHANNEL_MONITOR:
1767                 err = -EOPNOTSUPP;
1768                 goto done;
1769         case HCI_CHANNEL_LOGGING:
1770                 err = hci_logging_frame(sk, msg, len);
1771                 goto done;
1772         default:
1773                 mutex_lock(&mgmt_chan_list_lock);
1774                 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1775                 if (chan)
1776                         err = hci_mgmt_cmd(chan, sk, msg, len);
1777                 else
1778                         err = -EINVAL;
1779
1780                 mutex_unlock(&mgmt_chan_list_lock);
1781                 goto done;
1782         }
1783
1784         hdev = hci_hdev_from_sock(sk);
1785         if (IS_ERR(hdev)) {
1786                 err = PTR_ERR(hdev);
1787                 goto done;
1788         }
1789
1790         if (!test_bit(HCI_UP, &hdev->flags)) {
1791                 err = -ENETDOWN;
1792                 goto done;
1793         }
1794
1795         skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1796         if (!skb)
1797                 goto done;
1798
1799         if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1800                 err = -EFAULT;
1801                 goto drop;
1802         }
1803
1804         hci_skb_pkt_type(skb) = skb->data[0];
1805         skb_pull(skb, 1);
1806
1807         if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1808                 /* No permission check is needed for user channel
1809                  * since that gets enforced when binding the socket.
1810                  *
1811                  * However check that the packet type is valid.
1812                  */
1813                 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1814                     hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1815                     hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1816                     hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1817                         err = -EINVAL;
1818                         goto drop;
1819                 }
1820
1821                 skb_queue_tail(&hdev->raw_q, skb);
1822                 queue_work(hdev->workqueue, &hdev->tx_work);
1823         } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1824                 u16 opcode = get_unaligned_le16(skb->data);
1825                 u16 ogf = hci_opcode_ogf(opcode);
1826                 u16 ocf = hci_opcode_ocf(opcode);
1827
1828                 if (((ogf > HCI_SFLT_MAX_OGF) ||
1829                      !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1830                                    &hci_sec_filter.ocf_mask[ogf])) &&
1831                     !capable(CAP_NET_RAW)) {
1832                         err = -EPERM;
1833                         goto drop;
1834                 }
1835
1836                 /* Since the opcode has already been extracted here, store
1837                  * a copy of the value for later use by the drivers.
1838                  */
1839                 hci_skb_opcode(skb) = opcode;
1840
1841                 if (ogf == 0x3f) {
1842                         skb_queue_tail(&hdev->raw_q, skb);
1843                         queue_work(hdev->workqueue, &hdev->tx_work);
1844                 } else {
1845                         /* Stand-alone HCI commands must be flagged as
1846                          * single-command requests.
1847                          */
1848                         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1849
1850                         skb_queue_tail(&hdev->cmd_q, skb);
1851                         queue_work(hdev->workqueue, &hdev->cmd_work);
1852                 }
1853         } else {
1854                 if (!capable(CAP_NET_RAW)) {
1855                         err = -EPERM;
1856                         goto drop;
1857                 }
1858
1859                 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1860                     hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1861                     hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1862                         err = -EINVAL;
1863                         goto drop;
1864                 }
1865
1866                 skb_queue_tail(&hdev->raw_q, skb);
1867                 queue_work(hdev->workqueue, &hdev->tx_work);
1868         }
1869
1870         err = len;
1871
1872 done:
1873         release_sock(sk);
1874         return err;
1875
1876 drop:
1877         kfree_skb(skb);
1878         goto done;
1879 }
1880
1881 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1882                                sockptr_t optval, unsigned int len)
1883 {
1884         struct hci_ufilter uf = { .opcode = 0 };
1885         struct sock *sk = sock->sk;
1886         int err = 0, opt = 0;
1887
1888         BT_DBG("sk %p, opt %d", sk, optname);
1889
1890         if (level != SOL_HCI)
1891                 return -ENOPROTOOPT;
1892
1893         lock_sock(sk);
1894
1895         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1896                 err = -EBADFD;
1897                 goto done;
1898         }
1899
1900         switch (optname) {
1901         case HCI_DATA_DIR:
1902                 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1903                         err = -EFAULT;
1904                         break;
1905                 }
1906
1907                 if (opt)
1908                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1909                 else
1910                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1911                 break;
1912
1913         case HCI_TIME_STAMP:
1914                 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1915                         err = -EFAULT;
1916                         break;
1917                 }
1918
1919                 if (opt)
1920                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1921                 else
1922                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1923                 break;
1924
1925         case HCI_FILTER:
1926                 {
1927                         struct hci_filter *f = &hci_pi(sk)->filter;
1928
1929                         uf.type_mask = f->type_mask;
1930                         uf.opcode    = f->opcode;
1931                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1932                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1933                 }
1934
1935                 len = min_t(unsigned int, len, sizeof(uf));
1936                 if (copy_from_sockptr(&uf, optval, len)) {
1937                         err = -EFAULT;
1938                         break;
1939                 }
1940
1941                 if (!capable(CAP_NET_RAW)) {
1942                         uf.type_mask &= hci_sec_filter.type_mask;
1943                         uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1944                         uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1945                 }
1946
1947                 {
1948                         struct hci_filter *f = &hci_pi(sk)->filter;
1949
1950                         f->type_mask = uf.type_mask;
1951                         f->opcode    = uf.opcode;
1952                         *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1953                         *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1954                 }
1955                 break;
1956
1957         default:
1958                 err = -ENOPROTOOPT;
1959                 break;
1960         }
1961
1962 done:
1963         release_sock(sk);
1964         return err;
1965 }
1966
1967 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1968                                char __user *optval, int __user *optlen)
1969 {
1970         struct hci_ufilter uf;
1971         struct sock *sk = sock->sk;
1972         int len, opt, err = 0;
1973
1974         BT_DBG("sk %p, opt %d", sk, optname);
1975
1976         if (level != SOL_HCI)
1977                 return -ENOPROTOOPT;
1978
1979         if (get_user(len, optlen))
1980                 return -EFAULT;
1981
1982         lock_sock(sk);
1983
1984         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1985                 err = -EBADFD;
1986                 goto done;
1987         }
1988
1989         switch (optname) {
1990         case HCI_DATA_DIR:
1991                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1992                         opt = 1;
1993                 else
1994                         opt = 0;
1995
1996                 if (put_user(opt, optval))
1997                         err = -EFAULT;
1998                 break;
1999
2000         case HCI_TIME_STAMP:
2001                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
2002                         opt = 1;
2003                 else
2004                         opt = 0;
2005
2006                 if (put_user(opt, optval))
2007                         err = -EFAULT;
2008                 break;
2009
2010         case HCI_FILTER:
2011                 {
2012                         struct hci_filter *f = &hci_pi(sk)->filter;
2013
2014                         memset(&uf, 0, sizeof(uf));
2015                         uf.type_mask = f->type_mask;
2016                         uf.opcode    = f->opcode;
2017                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2018                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2019                 }
2020
2021                 len = min_t(unsigned int, len, sizeof(uf));
2022                 if (copy_to_user(optval, &uf, len))
2023                         err = -EFAULT;
2024                 break;
2025
2026         default:
2027                 err = -ENOPROTOOPT;
2028                 break;
2029         }
2030
2031 done:
2032         release_sock(sk);
2033         return err;
2034 }
2035
2036 static const struct proto_ops hci_sock_ops = {
2037         .family         = PF_BLUETOOTH,
2038         .owner          = THIS_MODULE,
2039         .release        = hci_sock_release,
2040         .bind           = hci_sock_bind,
2041         .getname        = hci_sock_getname,
2042         .sendmsg        = hci_sock_sendmsg,
2043         .recvmsg        = hci_sock_recvmsg,
2044         .ioctl          = hci_sock_ioctl,
2045 #ifdef CONFIG_COMPAT
2046         .compat_ioctl   = hci_sock_compat_ioctl,
2047 #endif
2048         .poll           = datagram_poll,
2049         .listen         = sock_no_listen,
2050         .shutdown       = sock_no_shutdown,
2051         .setsockopt     = hci_sock_setsockopt,
2052         .getsockopt     = hci_sock_getsockopt,
2053         .connect        = sock_no_connect,
2054         .socketpair     = sock_no_socketpair,
2055         .accept         = sock_no_accept,
2056         .mmap           = sock_no_mmap
2057 };
2058
2059 static struct proto hci_sk_proto = {
2060         .name           = "HCI",
2061         .owner          = THIS_MODULE,
2062         .obj_size       = sizeof(struct hci_pinfo)
2063 };
2064
2065 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2066                            int kern)
2067 {
2068         struct sock *sk;
2069
2070         BT_DBG("sock %p", sock);
2071
2072         if (sock->type != SOCK_RAW)
2073                 return -ESOCKTNOSUPPORT;
2074
2075         sock->ops = &hci_sock_ops;
2076
2077         sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2078         if (!sk)
2079                 return -ENOMEM;
2080
2081         sock_init_data(sock, sk);
2082
2083         sock_reset_flag(sk, SOCK_ZAPPED);
2084
2085         sk->sk_protocol = protocol;
2086
2087         sock->state = SS_UNCONNECTED;
2088         sk->sk_state = BT_OPEN;
2089
2090         bt_sock_link(&hci_sk_list, sk);
2091         return 0;
2092 }
2093
2094 static const struct net_proto_family hci_sock_family_ops = {
2095         .family = PF_BLUETOOTH,
2096         .owner  = THIS_MODULE,
2097         .create = hci_sock_create,
2098 };
2099
2100 int __init hci_sock_init(void)
2101 {
2102         int err;
2103
2104         BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2105
2106         err = proto_register(&hci_sk_proto, 0);
2107         if (err < 0)
2108                 return err;
2109
2110         err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2111         if (err < 0) {
2112                 BT_ERR("HCI socket registration failed");
2113                 goto error;
2114         }
2115
2116         err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2117         if (err < 0) {
2118                 BT_ERR("Failed to create HCI proc file");
2119                 bt_sock_unregister(BTPROTO_HCI);
2120                 goto error;
2121         }
2122
2123         BT_INFO("HCI socket layer initialized");
2124
2125         return 0;
2126
2127 error:
2128         proto_unregister(&hci_sk_proto);
2129         return err;
2130 }
2131
2132 void hci_sock_cleanup(void)
2133 {
2134         bt_procfs_cleanup(&init_net, "hci");
2135         bt_sock_unregister(BTPROTO_HCI);
2136         proto_unregister(&hci_sk_proto);
2137 }