Bluetooth: Set link Supervision timeout for a connection
[platform/kernel/linux-rpi.git] / net / bluetooth / hci_sock.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26 #include <linux/compat.h>
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
31
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
36 #ifdef TIZEN_BT
37 #include <net/bluetooth/mgmt_tizen.h>
38 #endif
39
40 #include "mgmt_util.h"
41
42 static LIST_HEAD(mgmt_chan_list);
43 static DEFINE_MUTEX(mgmt_chan_list_lock);
44
45 static DEFINE_IDA(sock_cookie_ida);
46
47 static atomic_t monitor_promisc = ATOMIC_INIT(0);
48
49 /* ----- HCI socket interface ----- */
50
51 /* Socket info */
52 #define hci_pi(sk) ((struct hci_pinfo *) sk)
53
54 struct hci_pinfo {
55         struct bt_sock    bt;
56         struct hci_dev    *hdev;
57         struct hci_filter filter;
58         __u8              cmsg_mask;
59         unsigned short    channel;
60         unsigned long     flags;
61         __u32             cookie;
62         char              comm[TASK_COMM_LEN];
63         __u16             mtu;
64 };
65
66 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
67 {
68         struct hci_dev *hdev = hci_pi(sk)->hdev;
69
70         if (!hdev)
71                 return ERR_PTR(-EBADFD);
72         if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
73                 return ERR_PTR(-EPIPE);
74         return hdev;
75 }
76
77 void hci_sock_set_flag(struct sock *sk, int nr)
78 {
79         set_bit(nr, &hci_pi(sk)->flags);
80 }
81
82 void hci_sock_clear_flag(struct sock *sk, int nr)
83 {
84         clear_bit(nr, &hci_pi(sk)->flags);
85 }
86
87 int hci_sock_test_flag(struct sock *sk, int nr)
88 {
89         return test_bit(nr, &hci_pi(sk)->flags);
90 }
91
92 unsigned short hci_sock_get_channel(struct sock *sk)
93 {
94         return hci_pi(sk)->channel;
95 }
96
97 u32 hci_sock_get_cookie(struct sock *sk)
98 {
99         return hci_pi(sk)->cookie;
100 }
101
102 static bool hci_sock_gen_cookie(struct sock *sk)
103 {
104         int id = hci_pi(sk)->cookie;
105
106         if (!id) {
107                 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
108                 if (id < 0)
109                         id = 0xffffffff;
110
111                 hci_pi(sk)->cookie = id;
112                 get_task_comm(hci_pi(sk)->comm, current);
113                 return true;
114         }
115
116         return false;
117 }
118
119 static void hci_sock_free_cookie(struct sock *sk)
120 {
121         int id = hci_pi(sk)->cookie;
122
123         if (id) {
124                 hci_pi(sk)->cookie = 0xffffffff;
125                 ida_simple_remove(&sock_cookie_ida, id);
126         }
127 }
128
129 static inline int hci_test_bit(int nr, const void *addr)
130 {
131         return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
132 }
133
134 /* Security filter */
135 #define HCI_SFLT_MAX_OGF  5
136
137 struct hci_sec_filter {
138         __u32 type_mask;
139         __u32 event_mask[2];
140         __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
141 };
142
143 static const struct hci_sec_filter hci_sec_filter = {
144         /* Packet types */
145         0x10,
146         /* Events */
147         { 0x1000d9fe, 0x0000b00c },
148         /* Commands */
149         {
150                 { 0x0 },
151                 /* OGF_LINK_CTL */
152                 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
153                 /* OGF_LINK_POLICY */
154                 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
155                 /* OGF_HOST_CTL */
156                 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
157                 /* OGF_INFO_PARAM */
158                 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
159                 /* OGF_STATUS_PARAM */
160                 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
161         }
162 };
163
164 static struct bt_sock_list hci_sk_list = {
165         .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
166 };
167
168 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
169 {
170         struct hci_filter *flt;
171         int flt_type, flt_event;
172
173         /* Apply filter */
174         flt = &hci_pi(sk)->filter;
175
176         flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
177
178         if (!test_bit(flt_type, &flt->type_mask))
179                 return true;
180
181         /* Extra filter for event packets only */
182         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
183                 return false;
184
185         flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
186
187         if (!hci_test_bit(flt_event, &flt->event_mask))
188                 return true;
189
190         /* Check filter only when opcode is set */
191         if (!flt->opcode)
192                 return false;
193
194         if (flt_event == HCI_EV_CMD_COMPLETE &&
195             flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
196                 return true;
197
198         if (flt_event == HCI_EV_CMD_STATUS &&
199             flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
200                 return true;
201
202         return false;
203 }
204
205 /* Send frame to RAW socket */
206 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
207 {
208         struct sock *sk;
209         struct sk_buff *skb_copy = NULL;
210
211         BT_DBG("hdev %p len %d", hdev, skb->len);
212
213         read_lock(&hci_sk_list.lock);
214
215         sk_for_each(sk, &hci_sk_list.head) {
216                 struct sk_buff *nskb;
217
218                 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
219                         continue;
220
221                 /* Don't send frame to the socket it came from */
222                 if (skb->sk == sk)
223                         continue;
224
225                 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
226                         if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
227                             hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
228                             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
229                             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
230                             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
231                                 continue;
232                         if (is_filtered_packet(sk, skb))
233                                 continue;
234                 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
235                         if (!bt_cb(skb)->incoming)
236                                 continue;
237                         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
238                             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
239                             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
240                             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
241                                 continue;
242                 } else {
243                         /* Don't send frame to other channel types */
244                         continue;
245                 }
246
247                 if (!skb_copy) {
248                         /* Create a private copy with headroom */
249                         skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
250                         if (!skb_copy)
251                                 continue;
252
253                         /* Put type byte before the data */
254                         memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
255                 }
256
257                 nskb = skb_clone(skb_copy, GFP_ATOMIC);
258                 if (!nskb)
259                         continue;
260
261                 if (sock_queue_rcv_skb(sk, nskb))
262                         kfree_skb(nskb);
263         }
264
265         read_unlock(&hci_sk_list.lock);
266
267         kfree_skb(skb_copy);
268 }
269
270 static void hci_sock_copy_creds(struct sock *sk, struct sk_buff *skb)
271 {
272         struct scm_creds *creds;
273
274         if (!sk || WARN_ON(!skb))
275                 return;
276
277         creds = &bt_cb(skb)->creds;
278
279         /* Check if peer credentials is set */
280         if (!sk->sk_peer_pid) {
281                 /* Check if parent peer credentials is set */
282                 if (bt_sk(sk)->parent && bt_sk(sk)->parent->sk_peer_pid)
283                         sk = bt_sk(sk)->parent;
284                 else
285                         return;
286         }
287
288         /* Check if scm_creds already set */
289         if (creds->pid == pid_vnr(sk->sk_peer_pid))
290                 return;
291
292         memset(creds, 0, sizeof(*creds));
293
294         creds->pid = pid_vnr(sk->sk_peer_pid);
295         if (sk->sk_peer_cred) {
296                 creds->uid = sk->sk_peer_cred->uid;
297                 creds->gid = sk->sk_peer_cred->gid;
298         }
299 }
300
301 static struct sk_buff *hci_skb_clone(struct sk_buff *skb)
302 {
303         struct sk_buff *nskb;
304
305         if (!skb)
306                 return NULL;
307
308         nskb = skb_clone(skb, GFP_ATOMIC);
309         if (!nskb)
310                 return NULL;
311
312         hci_sock_copy_creds(skb->sk, nskb);
313
314         return nskb;
315 }
316
317 /* Send frame to sockets with specific channel */
318 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
319                                   int flag, struct sock *skip_sk)
320 {
321         struct sock *sk;
322
323         BT_DBG("channel %u len %d", channel, skb->len);
324
325         sk_for_each(sk, &hci_sk_list.head) {
326                 struct sk_buff *nskb;
327
328                 /* Ignore socket without the flag set */
329                 if (!hci_sock_test_flag(sk, flag))
330                         continue;
331
332                 /* Skip the original socket */
333                 if (sk == skip_sk)
334                         continue;
335
336                 if (sk->sk_state != BT_BOUND)
337                         continue;
338
339                 if (hci_pi(sk)->channel != channel)
340                         continue;
341
342                 nskb = hci_skb_clone(skb);
343                 if (!nskb)
344                         continue;
345
346                 if (sock_queue_rcv_skb(sk, nskb))
347                         kfree_skb(nskb);
348         }
349
350 }
351
352 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
353                          int flag, struct sock *skip_sk)
354 {
355         read_lock(&hci_sk_list.lock);
356         __hci_send_to_channel(channel, skb, flag, skip_sk);
357         read_unlock(&hci_sk_list.lock);
358 }
359
360 /* Send frame to monitor socket */
361 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
362 {
363         struct sk_buff *skb_copy = NULL;
364         struct hci_mon_hdr *hdr;
365         __le16 opcode;
366
367         if (!atomic_read(&monitor_promisc))
368                 return;
369
370         BT_DBG("hdev %p len %d", hdev, skb->len);
371
372         switch (hci_skb_pkt_type(skb)) {
373         case HCI_COMMAND_PKT:
374                 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
375                 break;
376         case HCI_EVENT_PKT:
377                 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
378                 break;
379         case HCI_ACLDATA_PKT:
380                 if (bt_cb(skb)->incoming)
381                         opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
382                 else
383                         opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
384                 break;
385         case HCI_SCODATA_PKT:
386                 if (bt_cb(skb)->incoming)
387                         opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
388                 else
389                         opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
390                 break;
391         case HCI_ISODATA_PKT:
392                 if (bt_cb(skb)->incoming)
393                         opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
394                 else
395                         opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
396                 break;
397         case HCI_DIAG_PKT:
398                 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
399                 break;
400         default:
401                 return;
402         }
403
404         /* Create a private copy with headroom */
405         skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
406         if (!skb_copy)
407                 return;
408
409         hci_sock_copy_creds(skb->sk, skb_copy);
410
411         /* Put header before the data */
412         hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
413         hdr->opcode = opcode;
414         hdr->index = cpu_to_le16(hdev->id);
415         hdr->len = cpu_to_le16(skb->len);
416
417         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
418                             HCI_SOCK_TRUSTED, NULL);
419         kfree_skb(skb_copy);
420 }
421
422 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
423                                  void *data, u16 data_len, ktime_t tstamp,
424                                  int flag, struct sock *skip_sk)
425 {
426         struct sock *sk;
427         __le16 index;
428
429         if (hdev)
430                 index = cpu_to_le16(hdev->id);
431         else
432                 index = cpu_to_le16(MGMT_INDEX_NONE);
433
434         read_lock(&hci_sk_list.lock);
435
436         sk_for_each(sk, &hci_sk_list.head) {
437                 struct hci_mon_hdr *hdr;
438                 struct sk_buff *skb;
439
440                 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
441                         continue;
442
443                 /* Ignore socket without the flag set */
444                 if (!hci_sock_test_flag(sk, flag))
445                         continue;
446
447                 /* Skip the original socket */
448                 if (sk == skip_sk)
449                         continue;
450
451                 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
452                 if (!skb)
453                         continue;
454
455                 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
456                 put_unaligned_le16(event, skb_put(skb, 2));
457
458                 if (data)
459                         skb_put_data(skb, data, data_len);
460
461                 skb->tstamp = tstamp;
462
463                 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
464                 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
465                 hdr->index = index;
466                 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
467
468                 __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
469                                       HCI_SOCK_TRUSTED, NULL);
470                 kfree_skb(skb);
471         }
472
473         read_unlock(&hci_sk_list.lock);
474 }
475
476 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
477 {
478         struct hci_mon_hdr *hdr;
479         struct hci_mon_new_index *ni;
480         struct hci_mon_index_info *ii;
481         struct sk_buff *skb;
482         __le16 opcode;
483
484         switch (event) {
485         case HCI_DEV_REG:
486                 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
487                 if (!skb)
488                         return NULL;
489
490                 ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
491                 ni->type = hdev->dev_type;
492                 ni->bus = hdev->bus;
493                 bacpy(&ni->bdaddr, &hdev->bdaddr);
494                 memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
495                                strnlen(hdev->name, sizeof(ni->name)), '\0');
496
497                 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
498                 break;
499
500         case HCI_DEV_UNREG:
501                 skb = bt_skb_alloc(0, GFP_ATOMIC);
502                 if (!skb)
503                         return NULL;
504
505                 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
506                 break;
507
508         case HCI_DEV_SETUP:
509                 if (hdev->manufacturer == 0xffff)
510                         return NULL;
511                 fallthrough;
512
513         case HCI_DEV_UP:
514                 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
515                 if (!skb)
516                         return NULL;
517
518                 ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
519                 bacpy(&ii->bdaddr, &hdev->bdaddr);
520                 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
521
522                 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
523                 break;
524
525         case HCI_DEV_OPEN:
526                 skb = bt_skb_alloc(0, GFP_ATOMIC);
527                 if (!skb)
528                         return NULL;
529
530                 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
531                 break;
532
533         case HCI_DEV_CLOSE:
534                 skb = bt_skb_alloc(0, GFP_ATOMIC);
535                 if (!skb)
536                         return NULL;
537
538                 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
539                 break;
540
541         default:
542                 return NULL;
543         }
544
545         __net_timestamp(skb);
546
547         hdr = skb_push(skb, HCI_MON_HDR_SIZE);
548         hdr->opcode = opcode;
549         hdr->index = cpu_to_le16(hdev->id);
550         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
551
552         return skb;
553 }
554
555 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
556 {
557         struct hci_mon_hdr *hdr;
558         struct sk_buff *skb;
559         u16 format;
560         u8 ver[3];
561         u32 flags;
562
563         /* No message needed when cookie is not present */
564         if (!hci_pi(sk)->cookie)
565                 return NULL;
566
567         switch (hci_pi(sk)->channel) {
568         case HCI_CHANNEL_RAW:
569                 format = 0x0000;
570                 ver[0] = BT_SUBSYS_VERSION;
571                 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
572                 break;
573         case HCI_CHANNEL_USER:
574                 format = 0x0001;
575                 ver[0] = BT_SUBSYS_VERSION;
576                 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
577                 break;
578         case HCI_CHANNEL_CONTROL:
579                 format = 0x0002;
580                 mgmt_fill_version_info(ver);
581                 break;
582         default:
583                 /* No message for unsupported format */
584                 return NULL;
585         }
586
587         skb = bt_skb_alloc(14 + TASK_COMM_LEN, GFP_ATOMIC);
588         if (!skb)
589                 return NULL;
590
591         hci_sock_copy_creds(sk, skb);
592
593         flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
594
595         put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
596         put_unaligned_le16(format, skb_put(skb, 2));
597         skb_put_data(skb, ver, sizeof(ver));
598         put_unaligned_le32(flags, skb_put(skb, 4));
599         skb_put_u8(skb, TASK_COMM_LEN);
600         skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
601
602         __net_timestamp(skb);
603
604         hdr = skb_push(skb, HCI_MON_HDR_SIZE);
605         hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
606         if (hci_pi(sk)->hdev)
607                 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
608         else
609                 hdr->index = cpu_to_le16(HCI_DEV_NONE);
610         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
611
612         return skb;
613 }
614
615 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
616 {
617         struct hci_mon_hdr *hdr;
618         struct sk_buff *skb;
619
620         /* No message needed when cookie is not present */
621         if (!hci_pi(sk)->cookie)
622                 return NULL;
623
624         switch (hci_pi(sk)->channel) {
625         case HCI_CHANNEL_RAW:
626         case HCI_CHANNEL_USER:
627         case HCI_CHANNEL_CONTROL:
628                 break;
629         default:
630                 /* No message for unsupported format */
631                 return NULL;
632         }
633
634         skb = bt_skb_alloc(4, GFP_ATOMIC);
635         if (!skb)
636                 return NULL;
637
638         hci_sock_copy_creds(sk, skb);
639
640         put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
641
642         __net_timestamp(skb);
643
644         hdr = skb_push(skb, HCI_MON_HDR_SIZE);
645         hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
646         if (hci_pi(sk)->hdev)
647                 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
648         else
649                 hdr->index = cpu_to_le16(HCI_DEV_NONE);
650         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
651
652         return skb;
653 }
654
655 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
656                                                    u16 opcode, u16 len,
657                                                    const void *buf)
658 {
659         struct hci_mon_hdr *hdr;
660         struct sk_buff *skb;
661
662         skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
663         if (!skb)
664                 return NULL;
665
666         hci_sock_copy_creds(sk, skb);
667
668         put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
669         put_unaligned_le16(opcode, skb_put(skb, 2));
670
671         if (buf)
672                 skb_put_data(skb, buf, len);
673
674         __net_timestamp(skb);
675
676         hdr = skb_push(skb, HCI_MON_HDR_SIZE);
677         hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
678         hdr->index = cpu_to_le16(index);
679         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
680
681         return skb;
682 }
683
684 static void __printf(2, 3)
685 send_monitor_note(struct sock *sk, const char *fmt, ...)
686 {
687         size_t len;
688         struct hci_mon_hdr *hdr;
689         struct sk_buff *skb;
690         va_list args;
691
692         va_start(args, fmt);
693         len = vsnprintf(NULL, 0, fmt, args);
694         va_end(args);
695
696         skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
697         if (!skb)
698                 return;
699
700         hci_sock_copy_creds(sk, skb);
701
702         va_start(args, fmt);
703         vsprintf(skb_put(skb, len), fmt, args);
704         *(u8 *)skb_put(skb, 1) = 0;
705         va_end(args);
706
707         __net_timestamp(skb);
708
709         hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
710         hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
711         hdr->index = cpu_to_le16(HCI_DEV_NONE);
712         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
713
714         if (sock_queue_rcv_skb(sk, skb))
715                 kfree_skb(skb);
716 }
717
718 static void send_monitor_replay(struct sock *sk)
719 {
720         struct hci_dev *hdev;
721
722         read_lock(&hci_dev_list_lock);
723
724         list_for_each_entry(hdev, &hci_dev_list, list) {
725                 struct sk_buff *skb;
726
727                 skb = create_monitor_event(hdev, HCI_DEV_REG);
728                 if (!skb)
729                         continue;
730
731                 if (sock_queue_rcv_skb(sk, skb))
732                         kfree_skb(skb);
733
734                 if (!test_bit(HCI_RUNNING, &hdev->flags))
735                         continue;
736
737                 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
738                 if (!skb)
739                         continue;
740
741                 if (sock_queue_rcv_skb(sk, skb))
742                         kfree_skb(skb);
743
744                 if (test_bit(HCI_UP, &hdev->flags))
745                         skb = create_monitor_event(hdev, HCI_DEV_UP);
746                 else if (hci_dev_test_flag(hdev, HCI_SETUP))
747                         skb = create_monitor_event(hdev, HCI_DEV_SETUP);
748                 else
749                         skb = NULL;
750
751                 if (skb) {
752                         if (sock_queue_rcv_skb(sk, skb))
753                                 kfree_skb(skb);
754                 }
755         }
756
757         read_unlock(&hci_dev_list_lock);
758 }
759
760 static void send_monitor_control_replay(struct sock *mon_sk)
761 {
762         struct sock *sk;
763
764         read_lock(&hci_sk_list.lock);
765
766         sk_for_each(sk, &hci_sk_list.head) {
767                 struct sk_buff *skb;
768
769                 skb = create_monitor_ctrl_open(sk);
770                 if (!skb)
771                         continue;
772
773                 if (sock_queue_rcv_skb(mon_sk, skb))
774                         kfree_skb(skb);
775         }
776
777         read_unlock(&hci_sk_list.lock);
778 }
779
780 /* Generate internal stack event */
781 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
782 {
783         struct hci_event_hdr *hdr;
784         struct hci_ev_stack_internal *ev;
785         struct sk_buff *skb;
786
787         skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
788         if (!skb)
789                 return;
790
791         hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
792         hdr->evt  = HCI_EV_STACK_INTERNAL;
793         hdr->plen = sizeof(*ev) + dlen;
794
795         ev = skb_put(skb, sizeof(*ev) + dlen);
796         ev->type = type;
797         memcpy(ev->data, data, dlen);
798
799         bt_cb(skb)->incoming = 1;
800         __net_timestamp(skb);
801
802         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
803         hci_send_to_sock(hdev, skb);
804         kfree_skb(skb);
805 }
806
807 void hci_sock_dev_event(struct hci_dev *hdev, int event)
808 {
809         BT_DBG("hdev %s event %d", hdev->name, event);
810
811         if (atomic_read(&monitor_promisc)) {
812                 struct sk_buff *skb;
813
814                 /* Send event to monitor */
815                 skb = create_monitor_event(hdev, event);
816                 if (skb) {
817                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
818                                             HCI_SOCK_TRUSTED, NULL);
819                         kfree_skb(skb);
820                 }
821         }
822
823         if (event <= HCI_DEV_DOWN) {
824                 struct hci_ev_si_device ev;
825
826                 /* Send event to sockets */
827                 ev.event  = event;
828                 ev.dev_id = hdev->id;
829                 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
830         }
831
832         if (event == HCI_DEV_UNREG) {
833                 struct sock *sk;
834
835                 /* Wake up sockets using this dead device */
836                 read_lock(&hci_sk_list.lock);
837                 sk_for_each(sk, &hci_sk_list.head) {
838                         if (hci_pi(sk)->hdev == hdev) {
839                                 sk->sk_err = EPIPE;
840                                 sk->sk_state_change(sk);
841                         }
842                 }
843                 read_unlock(&hci_sk_list.lock);
844         }
845 }
846
847 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
848 {
849         struct hci_mgmt_chan *c;
850
851         list_for_each_entry(c, &mgmt_chan_list, list) {
852                 if (c->channel == channel)
853                         return c;
854         }
855
856         return NULL;
857 }
858
859 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
860 {
861         struct hci_mgmt_chan *c;
862
863         mutex_lock(&mgmt_chan_list_lock);
864         c = __hci_mgmt_chan_find(channel);
865         mutex_unlock(&mgmt_chan_list_lock);
866
867         return c;
868 }
869
870 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
871 {
872         if (c->channel < HCI_CHANNEL_CONTROL)
873                 return -EINVAL;
874
875         mutex_lock(&mgmt_chan_list_lock);
876         if (__hci_mgmt_chan_find(c->channel)) {
877                 mutex_unlock(&mgmt_chan_list_lock);
878                 return -EALREADY;
879         }
880
881         list_add_tail(&c->list, &mgmt_chan_list);
882
883         mutex_unlock(&mgmt_chan_list_lock);
884
885         return 0;
886 }
887 EXPORT_SYMBOL(hci_mgmt_chan_register);
888
889 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
890 {
891         mutex_lock(&mgmt_chan_list_lock);
892         list_del(&c->list);
893         mutex_unlock(&mgmt_chan_list_lock);
894 }
895 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
896
897 static int hci_sock_release(struct socket *sock)
898 {
899         struct sock *sk = sock->sk;
900         struct hci_dev *hdev;
901         struct sk_buff *skb;
902
903         BT_DBG("sock %p sk %p", sock, sk);
904
905         if (!sk)
906                 return 0;
907
908         lock_sock(sk);
909
910         switch (hci_pi(sk)->channel) {
911         case HCI_CHANNEL_MONITOR:
912                 atomic_dec(&monitor_promisc);
913                 break;
914         case HCI_CHANNEL_RAW:
915         case HCI_CHANNEL_USER:
916         case HCI_CHANNEL_CONTROL:
917                 /* Send event to monitor */
918                 skb = create_monitor_ctrl_close(sk);
919                 if (skb) {
920                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
921                                             HCI_SOCK_TRUSTED, NULL);
922                         kfree_skb(skb);
923                 }
924
925                 hci_sock_free_cookie(sk);
926                 break;
927         }
928
929         bt_sock_unlink(&hci_sk_list, sk);
930
931         hdev = hci_pi(sk)->hdev;
932         if (hdev) {
933                 if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
934                     !hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
935                         /* When releasing a user channel exclusive access,
936                          * call hci_dev_do_close directly instead of calling
937                          * hci_dev_close to ensure the exclusive access will
938                          * be released and the controller brought back down.
939                          *
940                          * The checking of HCI_AUTO_OFF is not needed in this
941                          * case since it will have been cleared already when
942                          * opening the user channel.
943                          *
944                          * Make sure to also check that we haven't already
945                          * unregistered since all the cleanup will have already
946                          * been complete and hdev will get released when we put
947                          * below.
948                          */
949                         hci_dev_do_close(hdev);
950                         hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
951                         mgmt_index_added(hdev);
952                 }
953
954                 atomic_dec(&hdev->promisc);
955                 hci_dev_put(hdev);
956         }
957
958         sock_orphan(sk);
959         release_sock(sk);
960         sock_put(sk);
961         return 0;
962 }
963
964 static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
965 {
966         bdaddr_t bdaddr;
967         int err;
968
969         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
970                 return -EFAULT;
971
972         hci_dev_lock(hdev);
973
974         err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
975
976         hci_dev_unlock(hdev);
977
978         return err;
979 }
980
981 static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
982 {
983         bdaddr_t bdaddr;
984         int err;
985
986         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
987                 return -EFAULT;
988
989         hci_dev_lock(hdev);
990
991         err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
992
993         hci_dev_unlock(hdev);
994
995         return err;
996 }
997
998 /* Ioctls that require bound socket */
999 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
1000                                 unsigned long arg)
1001 {
1002         struct hci_dev *hdev = hci_hdev_from_sock(sk);
1003
1004         if (IS_ERR(hdev))
1005                 return PTR_ERR(hdev);
1006
1007         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1008                 return -EBUSY;
1009
1010         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1011                 return -EOPNOTSUPP;
1012
1013         if (hdev->dev_type != HCI_PRIMARY)
1014                 return -EOPNOTSUPP;
1015
1016         switch (cmd) {
1017         case HCISETRAW:
1018                 if (!capable(CAP_NET_ADMIN))
1019                         return -EPERM;
1020                 return -EOPNOTSUPP;
1021
1022         case HCIGETCONNINFO:
1023                 return hci_get_conn_info(hdev, (void __user *)arg);
1024
1025         case HCIGETAUTHINFO:
1026                 return hci_get_auth_info(hdev, (void __user *)arg);
1027
1028         case HCIBLOCKADDR:
1029                 if (!capable(CAP_NET_ADMIN))
1030                         return -EPERM;
1031                 return hci_sock_reject_list_add(hdev, (void __user *)arg);
1032
1033         case HCIUNBLOCKADDR:
1034                 if (!capable(CAP_NET_ADMIN))
1035                         return -EPERM;
1036                 return hci_sock_reject_list_del(hdev, (void __user *)arg);
1037         }
1038
1039         return -ENOIOCTLCMD;
1040 }
1041
1042 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
1043                           unsigned long arg)
1044 {
1045         void __user *argp = (void __user *)arg;
1046         struct sock *sk = sock->sk;
1047         int err;
1048
1049         BT_DBG("cmd %x arg %lx", cmd, arg);
1050
1051         /* Make sure the cmd is valid before doing anything */
1052         switch (cmd) {
1053         case HCIGETDEVLIST:
1054         case HCIGETDEVINFO:
1055         case HCIGETCONNLIST:
1056         case HCIDEVUP:
1057         case HCIDEVDOWN:
1058         case HCIDEVRESET:
1059         case HCIDEVRESTAT:
1060         case HCISETSCAN:
1061         case HCISETAUTH:
1062         case HCISETENCRYPT:
1063         case HCISETPTYPE:
1064         case HCISETLINKPOL:
1065         case HCISETLINKMODE:
1066         case HCISETACLMTU:
1067         case HCISETSCOMTU:
1068         case HCIINQUIRY:
1069         case HCISETRAW:
1070         case HCIGETCONNINFO:
1071         case HCIGETAUTHINFO:
1072         case HCIBLOCKADDR:
1073         case HCIUNBLOCKADDR:
1074                 break;
1075         default:
1076                 return -ENOIOCTLCMD;
1077         }
1078
1079         lock_sock(sk);
1080
1081         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1082                 err = -EBADFD;
1083                 goto done;
1084         }
1085
1086         /* When calling an ioctl on an unbound raw socket, then ensure
1087          * that the monitor gets informed. Ensure that the resulting event
1088          * is only send once by checking if the cookie exists or not. The
1089          * socket cookie will be only ever generated once for the lifetime
1090          * of a given socket.
1091          */
1092         if (hci_sock_gen_cookie(sk)) {
1093                 struct sk_buff *skb;
1094
1095                 /* Perform careful checks before setting the HCI_SOCK_TRUSTED
1096                  * flag. Make sure that not only the current task but also
1097                  * the socket opener has the required capability, since
1098                  * privileged programs can be tricked into making ioctl calls
1099                  * on HCI sockets, and the socket should not be marked as
1100                  * trusted simply because the ioctl caller is privileged.
1101                  */
1102                 if (sk_capable(sk, CAP_NET_ADMIN))
1103                         hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1104
1105                 /* Send event to monitor */
1106                 skb = create_monitor_ctrl_open(sk);
1107                 if (skb) {
1108                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1109                                             HCI_SOCK_TRUSTED, NULL);
1110                         kfree_skb(skb);
1111                 }
1112         }
1113
1114         release_sock(sk);
1115
1116         switch (cmd) {
1117         case HCIGETDEVLIST:
1118                 return hci_get_dev_list(argp);
1119
1120         case HCIGETDEVINFO:
1121                 return hci_get_dev_info(argp);
1122
1123         case HCIGETCONNLIST:
1124                 return hci_get_conn_list(argp);
1125
1126         case HCIDEVUP:
1127                 if (!capable(CAP_NET_ADMIN))
1128                         return -EPERM;
1129                 return hci_dev_open(arg);
1130
1131         case HCIDEVDOWN:
1132                 if (!capable(CAP_NET_ADMIN))
1133                         return -EPERM;
1134                 return hci_dev_close(arg);
1135
1136         case HCIDEVRESET:
1137                 if (!capable(CAP_NET_ADMIN))
1138                         return -EPERM;
1139                 return hci_dev_reset(arg);
1140
1141         case HCIDEVRESTAT:
1142                 if (!capable(CAP_NET_ADMIN))
1143                         return -EPERM;
1144                 return hci_dev_reset_stat(arg);
1145
1146         case HCISETSCAN:
1147         case HCISETAUTH:
1148         case HCISETENCRYPT:
1149         case HCISETPTYPE:
1150         case HCISETLINKPOL:
1151         case HCISETLINKMODE:
1152         case HCISETACLMTU:
1153         case HCISETSCOMTU:
1154                 if (!capable(CAP_NET_ADMIN))
1155                         return -EPERM;
1156                 return hci_dev_cmd(cmd, argp);
1157
1158         case HCIINQUIRY:
1159                 return hci_inquiry(argp);
1160         }
1161
1162         lock_sock(sk);
1163
1164         err = hci_sock_bound_ioctl(sk, cmd, arg);
1165
1166 done:
1167         release_sock(sk);
1168         return err;
1169 }
1170
1171 #ifdef CONFIG_COMPAT
1172 static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1173                                  unsigned long arg)
1174 {
1175         switch (cmd) {
1176         case HCIDEVUP:
1177         case HCIDEVDOWN:
1178         case HCIDEVRESET:
1179         case HCIDEVRESTAT:
1180                 return hci_sock_ioctl(sock, cmd, arg);
1181         }
1182
1183         return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1184 }
1185 #endif
1186
1187 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1188                          int addr_len)
1189 {
1190         struct sockaddr_hci haddr;
1191         struct sock *sk = sock->sk;
1192         struct hci_dev *hdev = NULL;
1193         struct sk_buff *skb;
1194         int len, err = 0;
1195
1196         BT_DBG("sock %p sk %p", sock, sk);
1197
1198         if (!addr)
1199                 return -EINVAL;
1200
1201         memset(&haddr, 0, sizeof(haddr));
1202         len = min_t(unsigned int, sizeof(haddr), addr_len);
1203         memcpy(&haddr, addr, len);
1204
1205         if (haddr.hci_family != AF_BLUETOOTH)
1206                 return -EINVAL;
1207
1208         lock_sock(sk);
1209
1210         /* Allow detaching from dead device and attaching to alive device, if
1211          * the caller wants to re-bind (instead of close) this socket in
1212          * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1213          */
1214         hdev = hci_pi(sk)->hdev;
1215         if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1216                 hci_pi(sk)->hdev = NULL;
1217                 sk->sk_state = BT_OPEN;
1218                 hci_dev_put(hdev);
1219         }
1220         hdev = NULL;
1221
1222         if (sk->sk_state == BT_BOUND) {
1223                 err = -EALREADY;
1224                 goto done;
1225         }
1226
1227         switch (haddr.hci_channel) {
1228         case HCI_CHANNEL_RAW:
1229                 if (hci_pi(sk)->hdev) {
1230                         err = -EALREADY;
1231                         goto done;
1232                 }
1233
1234                 if (haddr.hci_dev != HCI_DEV_NONE) {
1235                         hdev = hci_dev_get(haddr.hci_dev);
1236                         if (!hdev) {
1237                                 err = -ENODEV;
1238                                 goto done;
1239                         }
1240
1241                         atomic_inc(&hdev->promisc);
1242                 }
1243
1244                 hci_pi(sk)->channel = haddr.hci_channel;
1245
1246                 if (!hci_sock_gen_cookie(sk)) {
1247                         /* In the case when a cookie has already been assigned,
1248                          * then there has been already an ioctl issued against
1249                          * an unbound socket and with that triggered an open
1250                          * notification. Send a close notification first to
1251                          * allow the state transition to bounded.
1252                          */
1253                         skb = create_monitor_ctrl_close(sk);
1254                         if (skb) {
1255                                 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1256                                                     HCI_SOCK_TRUSTED, NULL);
1257                                 kfree_skb(skb);
1258                         }
1259                 }
1260
1261                 if (capable(CAP_NET_ADMIN))
1262                         hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1263
1264                 hci_pi(sk)->hdev = hdev;
1265
1266                 /* Send event to monitor */
1267                 skb = create_monitor_ctrl_open(sk);
1268                 if (skb) {
1269                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1270                                             HCI_SOCK_TRUSTED, NULL);
1271                         kfree_skb(skb);
1272                 }
1273                 break;
1274
1275         case HCI_CHANNEL_USER:
1276                 if (hci_pi(sk)->hdev) {
1277                         err = -EALREADY;
1278                         goto done;
1279                 }
1280
1281                 if (haddr.hci_dev == HCI_DEV_NONE) {
1282                         err = -EINVAL;
1283                         goto done;
1284                 }
1285
1286                 if (!capable(CAP_NET_ADMIN)) {
1287                         err = -EPERM;
1288                         goto done;
1289                 }
1290
1291                 hdev = hci_dev_get(haddr.hci_dev);
1292                 if (!hdev) {
1293                         err = -ENODEV;
1294                         goto done;
1295                 }
1296
1297                 if (test_bit(HCI_INIT, &hdev->flags) ||
1298                     hci_dev_test_flag(hdev, HCI_SETUP) ||
1299                     hci_dev_test_flag(hdev, HCI_CONFIG) ||
1300                     (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1301                      test_bit(HCI_UP, &hdev->flags))) {
1302                         err = -EBUSY;
1303                         hci_dev_put(hdev);
1304                         goto done;
1305                 }
1306
1307                 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1308                         err = -EUSERS;
1309                         hci_dev_put(hdev);
1310                         goto done;
1311                 }
1312
1313                 mgmt_index_removed(hdev);
1314
1315                 err = hci_dev_open(hdev->id);
1316                 if (err) {
1317                         if (err == -EALREADY) {
1318                                 /* In case the transport is already up and
1319                                  * running, clear the error here.
1320                                  *
1321                                  * This can happen when opening a user
1322                                  * channel and HCI_AUTO_OFF grace period
1323                                  * is still active.
1324                                  */
1325                                 err = 0;
1326                         } else {
1327                                 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1328                                 mgmt_index_added(hdev);
1329                                 hci_dev_put(hdev);
1330                                 goto done;
1331                         }
1332                 }
1333
1334                 hci_pi(sk)->channel = haddr.hci_channel;
1335
1336                 if (!hci_sock_gen_cookie(sk)) {
1337                         /* In the case when a cookie has already been assigned,
1338                          * this socket will transition from a raw socket into
1339                          * a user channel socket. For a clean transition, send
1340                          * the close notification first.
1341                          */
1342                         skb = create_monitor_ctrl_close(sk);
1343                         if (skb) {
1344                                 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1345                                                     HCI_SOCK_TRUSTED, NULL);
1346                                 kfree_skb(skb);
1347                         }
1348                 }
1349
1350                 /* The user channel is restricted to CAP_NET_ADMIN
1351                  * capabilities and with that implicitly trusted.
1352                  */
1353                 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1354
1355                 hci_pi(sk)->hdev = hdev;
1356
1357                 /* Send event to monitor */
1358                 skb = create_monitor_ctrl_open(sk);
1359                 if (skb) {
1360                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1361                                             HCI_SOCK_TRUSTED, NULL);
1362                         kfree_skb(skb);
1363                 }
1364
1365                 atomic_inc(&hdev->promisc);
1366                 break;
1367
1368         case HCI_CHANNEL_MONITOR:
1369                 if (haddr.hci_dev != HCI_DEV_NONE) {
1370                         err = -EINVAL;
1371                         goto done;
1372                 }
1373
1374                 if (!capable(CAP_NET_RAW)) {
1375                         err = -EPERM;
1376                         goto done;
1377                 }
1378
1379                 hci_pi(sk)->channel = haddr.hci_channel;
1380
1381                 /* The monitor interface is restricted to CAP_NET_RAW
1382                  * capabilities and with that implicitly trusted.
1383                  */
1384                 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1385
1386                 send_monitor_note(sk, "Linux version %s (%s)",
1387                                   init_utsname()->release,
1388                                   init_utsname()->machine);
1389                 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1390                                   BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1391                 send_monitor_replay(sk);
1392                 send_monitor_control_replay(sk);
1393
1394                 atomic_inc(&monitor_promisc);
1395                 break;
1396
1397         case HCI_CHANNEL_LOGGING:
1398                 if (haddr.hci_dev != HCI_DEV_NONE) {
1399                         err = -EINVAL;
1400                         goto done;
1401                 }
1402
1403                 if (!capable(CAP_NET_ADMIN)) {
1404                         err = -EPERM;
1405                         goto done;
1406                 }
1407
1408                 hci_pi(sk)->channel = haddr.hci_channel;
1409                 break;
1410
1411         default:
1412                 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1413                         err = -EINVAL;
1414                         goto done;
1415                 }
1416
1417                 if (haddr.hci_dev != HCI_DEV_NONE) {
1418                         err = -EINVAL;
1419                         goto done;
1420                 }
1421
1422                 /* Users with CAP_NET_ADMIN capabilities are allowed
1423                  * access to all management commands and events. For
1424                  * untrusted users the interface is restricted and
1425                  * also only untrusted events are sent.
1426                  */
1427                 if (capable(CAP_NET_ADMIN))
1428                         hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1429
1430                 hci_pi(sk)->channel = haddr.hci_channel;
1431
1432                 /* At the moment the index and unconfigured index events
1433                  * are enabled unconditionally. Setting them on each
1434                  * socket when binding keeps this functionality. They
1435                  * however might be cleared later and then sending of these
1436                  * events will be disabled, but that is then intentional.
1437                  *
1438                  * This also enables generic events that are safe to be
1439                  * received by untrusted users. Example for such events
1440                  * are changes to settings, class of device, name etc.
1441                  */
1442                 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1443                         if (!hci_sock_gen_cookie(sk)) {
1444                                 /* In the case when a cookie has already been
1445                                  * assigned, this socket will transition from
1446                                  * a raw socket into a control socket. To
1447                                  * allow for a clean transition, send the
1448                                  * close notification first.
1449                                  */
1450                                 skb = create_monitor_ctrl_close(sk);
1451                                 if (skb) {
1452                                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1453                                                             HCI_SOCK_TRUSTED, NULL);
1454                                         kfree_skb(skb);
1455                                 }
1456                         }
1457
1458                         /* Send event to monitor */
1459                         skb = create_monitor_ctrl_open(sk);
1460                         if (skb) {
1461                                 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1462                                                     HCI_SOCK_TRUSTED, NULL);
1463                                 kfree_skb(skb);
1464                         }
1465
1466                         hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1467                         hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1468                         hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1469                         hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1470                         hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1471                         hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1472                 }
1473                 break;
1474         }
1475
1476         /* Default MTU to HCI_MAX_FRAME_SIZE if not set */
1477         if (!hci_pi(sk)->mtu)
1478                 hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE;
1479
1480         sk->sk_state = BT_BOUND;
1481
1482 done:
1483         release_sock(sk);
1484         return err;
1485 }
1486
1487 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1488                             int peer)
1489 {
1490         struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1491         struct sock *sk = sock->sk;
1492         struct hci_dev *hdev;
1493         int err = 0;
1494
1495         BT_DBG("sock %p sk %p", sock, sk);
1496
1497         if (peer)
1498                 return -EOPNOTSUPP;
1499
1500         lock_sock(sk);
1501
1502         hdev = hci_hdev_from_sock(sk);
1503         if (IS_ERR(hdev)) {
1504                 err = PTR_ERR(hdev);
1505                 goto done;
1506         }
1507
1508         haddr->hci_family = AF_BLUETOOTH;
1509         haddr->hci_dev    = hdev->id;
1510         haddr->hci_channel= hci_pi(sk)->channel;
1511         err = sizeof(*haddr);
1512
1513 done:
1514         release_sock(sk);
1515         return err;
1516 }
1517
1518 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1519                           struct sk_buff *skb)
1520 {
1521         __u8 mask = hci_pi(sk)->cmsg_mask;
1522
1523         if (mask & HCI_CMSG_DIR) {
1524                 int incoming = bt_cb(skb)->incoming;
1525                 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1526                          &incoming);
1527         }
1528
1529         if (mask & HCI_CMSG_TSTAMP) {
1530 #ifdef CONFIG_COMPAT
1531                 struct old_timeval32 ctv;
1532 #endif
1533                 struct __kernel_old_timeval tv;
1534                 void *data;
1535                 int len;
1536
1537                 skb_get_timestamp(skb, &tv);
1538
1539                 data = &tv;
1540                 len = sizeof(tv);
1541 #ifdef CONFIG_COMPAT
1542                 if (!COMPAT_USE_64BIT_TIME &&
1543                     (msg->msg_flags & MSG_CMSG_COMPAT)) {
1544                         ctv.tv_sec = tv.tv_sec;
1545                         ctv.tv_usec = tv.tv_usec;
1546                         data = &ctv;
1547                         len = sizeof(ctv);
1548                 }
1549 #endif
1550
1551                 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1552         }
1553 }
1554
1555 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1556                             size_t len, int flags)
1557 {
1558         struct scm_cookie scm;
1559         struct sock *sk = sock->sk;
1560         struct sk_buff *skb;
1561         int copied, err;
1562         unsigned int skblen;
1563
1564         BT_DBG("sock %p, sk %p", sock, sk);
1565
1566         if (flags & MSG_OOB)
1567                 return -EOPNOTSUPP;
1568
1569         if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1570                 return -EOPNOTSUPP;
1571
1572         if (sk->sk_state == BT_CLOSED)
1573                 return 0;
1574
1575         skb = skb_recv_datagram(sk, flags, &err);
1576         if (!skb)
1577                 return err;
1578
1579         skblen = skb->len;
1580         copied = skb->len;
1581         if (len < copied) {
1582                 msg->msg_flags |= MSG_TRUNC;
1583                 copied = len;
1584         }
1585
1586         skb_reset_transport_header(skb);
1587         err = skb_copy_datagram_msg(skb, 0, msg, copied);
1588
1589         switch (hci_pi(sk)->channel) {
1590         case HCI_CHANNEL_RAW:
1591                 hci_sock_cmsg(sk, msg, skb);
1592                 break;
1593         case HCI_CHANNEL_USER:
1594         case HCI_CHANNEL_MONITOR:
1595                 sock_recv_timestamp(msg, sk, skb);
1596                 break;
1597         default:
1598                 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1599                         sock_recv_timestamp(msg, sk, skb);
1600                 break;
1601         }
1602
1603         memset(&scm, 0, sizeof(scm));
1604         scm.creds = bt_cb(skb)->creds;
1605
1606         skb_free_datagram(sk, skb);
1607
1608         if (flags & MSG_TRUNC)
1609                 copied = skblen;
1610
1611         scm_recv(sock, msg, &scm, flags);
1612
1613         return err ? : copied;
1614 }
1615
1616 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1617                         struct sk_buff *skb)
1618 {
1619         u8 *cp;
1620         struct mgmt_hdr *hdr;
1621         u16 opcode, index, len;
1622         struct hci_dev *hdev = NULL;
1623         const struct hci_mgmt_handler *handler;
1624         bool var_len, no_hdev;
1625         int err;
1626
1627         BT_DBG("got %d bytes", skb->len);
1628
1629         if (skb->len < sizeof(*hdr))
1630                 return -EINVAL;
1631
1632         hdr = (void *)skb->data;
1633         opcode = __le16_to_cpu(hdr->opcode);
1634         index = __le16_to_cpu(hdr->index);
1635         len = __le16_to_cpu(hdr->len);
1636
1637         if (len != skb->len - sizeof(*hdr)) {
1638                 err = -EINVAL;
1639                 goto done;
1640         }
1641
1642 #ifdef TIZEN_BT
1643    if (opcode >= TIZEN_OP_CODE_BASE) {
1644        u16 tizen_opcode_index = opcode - TIZEN_OP_CODE_BASE;
1645        if (tizen_opcode_index >= chan->tizen_handler_count ||
1646            chan->tizen_handlers[tizen_opcode_index].func == NULL) {
1647            BT_DBG("Unknown op %u", opcode);
1648            err = mgmt_cmd_status(sk, index, opcode,
1649                          MGMT_STATUS_UNKNOWN_COMMAND);
1650            goto done;
1651        }
1652
1653        handler = &chan->tizen_handlers[tizen_opcode_index];
1654
1655    } else {
1656 #endif
1657
1658         if (chan->channel == HCI_CHANNEL_CONTROL) {
1659                 struct sk_buff *cmd;
1660
1661                 /* Send event to monitor */
1662                 cmd = create_monitor_ctrl_command(sk, index, opcode, len,
1663                                                   skb->data + sizeof(*hdr));
1664                 if (cmd) {
1665                         hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd,
1666                                             HCI_SOCK_TRUSTED, NULL);
1667                         kfree_skb(cmd);
1668                 }
1669         }
1670
1671         if (opcode >= chan->handler_count ||
1672             chan->handlers[opcode].func == NULL) {
1673                 BT_DBG("Unknown op %u", opcode);
1674                 err = mgmt_cmd_status(sk, index, opcode,
1675                                       MGMT_STATUS_UNKNOWN_COMMAND);
1676                 goto done;
1677         }
1678
1679         handler = &chan->handlers[opcode];
1680 #ifdef TIZEN_BT
1681         }
1682 #endif
1683
1684         if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1685             !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1686                 err = mgmt_cmd_status(sk, index, opcode,
1687                                       MGMT_STATUS_PERMISSION_DENIED);
1688                 goto done;
1689         }
1690
1691         if (index != MGMT_INDEX_NONE) {
1692                 hdev = hci_dev_get(index);
1693                 if (!hdev) {
1694                         err = mgmt_cmd_status(sk, index, opcode,
1695                                               MGMT_STATUS_INVALID_INDEX);
1696                         goto done;
1697                 }
1698
1699                 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1700                     hci_dev_test_flag(hdev, HCI_CONFIG) ||
1701                     hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1702                         err = mgmt_cmd_status(sk, index, opcode,
1703                                               MGMT_STATUS_INVALID_INDEX);
1704                         goto done;
1705                 }
1706
1707                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1708                     !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1709                         err = mgmt_cmd_status(sk, index, opcode,
1710                                               MGMT_STATUS_INVALID_INDEX);
1711                         goto done;
1712                 }
1713         }
1714
1715         if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
1716                 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1717                 if (no_hdev != !hdev) {
1718                         err = mgmt_cmd_status(sk, index, opcode,
1719                                               MGMT_STATUS_INVALID_INDEX);
1720                         goto done;
1721                 }
1722         }
1723
1724         var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1725         if ((var_len && len < handler->data_len) ||
1726             (!var_len && len != handler->data_len)) {
1727                 err = mgmt_cmd_status(sk, index, opcode,
1728                                       MGMT_STATUS_INVALID_PARAMS);
1729                 goto done;
1730         }
1731
1732         if (hdev && chan->hdev_init)
1733                 chan->hdev_init(sk, hdev);
1734
1735         cp = skb->data + sizeof(*hdr);
1736
1737         err = handler->func(sk, hdev, cp, len);
1738         if (err < 0)
1739                 goto done;
1740
1741         err = skb->len;
1742
1743 done:
1744         if (hdev)
1745                 hci_dev_put(hdev);
1746
1747         return err;
1748 }
1749
1750 static int hci_logging_frame(struct sock *sk, struct sk_buff *skb,
1751                              unsigned int flags)
1752 {
1753         struct hci_mon_hdr *hdr;
1754         struct hci_dev *hdev;
1755         u16 index;
1756         int err;
1757
1758         /* The logging frame consists at minimum of the standard header,
1759          * the priority byte, the ident length byte and at least one string
1760          * terminator NUL byte. Anything shorter are invalid packets.
1761          */
1762         if (skb->len < sizeof(*hdr) + 3)
1763                 return -EINVAL;
1764
1765         hdr = (void *)skb->data;
1766
1767         if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr))
1768                 return -EINVAL;
1769
1770         if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1771                 __u8 priority = skb->data[sizeof(*hdr)];
1772                 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1773
1774                 /* Only the priorities 0-7 are valid and with that any other
1775                  * value results in an invalid packet.
1776                  *
1777                  * The priority byte is followed by an ident length byte and
1778                  * the NUL terminated ident string. Check that the ident
1779                  * length is not overflowing the packet and also that the
1780                  * ident string itself is NUL terminated. In case the ident
1781                  * length is zero, the length value actually doubles as NUL
1782                  * terminator identifier.
1783                  *
1784                  * The message follows the ident string (if present) and
1785                  * must be NUL terminated. Otherwise it is not a valid packet.
1786                  */
1787                 if (priority > 7 || skb->data[skb->len - 1] != 0x00 ||
1788                     ident_len > skb->len - sizeof(*hdr) - 3 ||
1789                     skb->data[sizeof(*hdr) + ident_len + 1] != 0x00)
1790                         return -EINVAL;
1791         } else {
1792                 return -EINVAL;
1793         }
1794
1795         index = __le16_to_cpu(hdr->index);
1796
1797         if (index != MGMT_INDEX_NONE) {
1798                 hdev = hci_dev_get(index);
1799                 if (!hdev)
1800                         return -ENODEV;
1801         } else {
1802                 hdev = NULL;
1803         }
1804
1805         hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1806
1807         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1808         err = skb->len;
1809
1810         if (hdev)
1811                 hci_dev_put(hdev);
1812
1813         return err;
1814 }
1815
1816 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1817                             size_t len)
1818 {
1819         struct sock *sk = sock->sk;
1820         struct hci_mgmt_chan *chan;
1821         struct hci_dev *hdev;
1822         struct sk_buff *skb;
1823         int err;
1824         const unsigned int flags = msg->msg_flags;
1825
1826         BT_DBG("sock %p sk %p", sock, sk);
1827
1828         if (flags & MSG_OOB)
1829                 return -EOPNOTSUPP;
1830
1831         if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
1832                 return -EINVAL;
1833
1834         if (len < 4 || len > hci_pi(sk)->mtu)
1835                 return -EINVAL;
1836
1837         skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0);
1838         if (IS_ERR(skb))
1839                 return PTR_ERR(skb);
1840
1841         lock_sock(sk);
1842
1843         switch (hci_pi(sk)->channel) {
1844         case HCI_CHANNEL_RAW:
1845         case HCI_CHANNEL_USER:
1846                 break;
1847         case HCI_CHANNEL_MONITOR:
1848                 err = -EOPNOTSUPP;
1849                 goto drop;
1850         case HCI_CHANNEL_LOGGING:
1851                 err = hci_logging_frame(sk, skb, flags);
1852                 goto drop;
1853         default:
1854                 mutex_lock(&mgmt_chan_list_lock);
1855                 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1856                 if (chan)
1857                         err = hci_mgmt_cmd(chan, sk, skb);
1858                 else
1859                         err = -EINVAL;
1860
1861                 mutex_unlock(&mgmt_chan_list_lock);
1862                 goto drop;
1863         }
1864
1865         hdev = hci_hdev_from_sock(sk);
1866         if (IS_ERR(hdev)) {
1867                 err = PTR_ERR(hdev);
1868                 goto drop;
1869         }
1870
1871         if (!test_bit(HCI_UP, &hdev->flags)) {
1872                 err = -ENETDOWN;
1873                 goto drop;
1874         }
1875
1876         hci_skb_pkt_type(skb) = skb->data[0];
1877         skb_pull(skb, 1);
1878
1879         if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1880                 /* No permission check is needed for user channel
1881                  * since that gets enforced when binding the socket.
1882                  *
1883                  * However check that the packet type is valid.
1884                  */
1885                 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1886                     hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1887                     hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1888                     hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1889                         err = -EINVAL;
1890                         goto drop;
1891                 }
1892
1893                 skb_queue_tail(&hdev->raw_q, skb);
1894                 queue_work(hdev->workqueue, &hdev->tx_work);
1895         } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1896                 u16 opcode = get_unaligned_le16(skb->data);
1897                 u16 ogf = hci_opcode_ogf(opcode);
1898                 u16 ocf = hci_opcode_ocf(opcode);
1899
1900                 if (((ogf > HCI_SFLT_MAX_OGF) ||
1901                      !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1902                                    &hci_sec_filter.ocf_mask[ogf])) &&
1903                     !capable(CAP_NET_RAW)) {
1904                         err = -EPERM;
1905                         goto drop;
1906                 }
1907
1908                 /* Since the opcode has already been extracted here, store
1909                  * a copy of the value for later use by the drivers.
1910                  */
1911                 hci_skb_opcode(skb) = opcode;
1912
1913                 if (ogf == 0x3f) {
1914                         skb_queue_tail(&hdev->raw_q, skb);
1915                         queue_work(hdev->workqueue, &hdev->tx_work);
1916                 } else {
1917                         /* Stand-alone HCI commands must be flagged as
1918                          * single-command requests.
1919                          */
1920                         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1921
1922                         skb_queue_tail(&hdev->cmd_q, skb);
1923                         queue_work(hdev->workqueue, &hdev->cmd_work);
1924                 }
1925         } else {
1926                 if (!capable(CAP_NET_RAW)) {
1927                         err = -EPERM;
1928                         goto drop;
1929                 }
1930
1931                 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1932                     hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1933                     hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1934                         err = -EINVAL;
1935                         goto drop;
1936                 }
1937
1938                 skb_queue_tail(&hdev->raw_q, skb);
1939                 queue_work(hdev->workqueue, &hdev->tx_work);
1940         }
1941
1942         err = len;
1943
1944 done:
1945         release_sock(sk);
1946         return err;
1947
1948 drop:
1949         kfree_skb(skb);
1950         goto done;
1951 }
1952
1953 static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
1954                                    sockptr_t optval, unsigned int len)
1955 {
1956         struct hci_ufilter uf = { .opcode = 0 };
1957         struct sock *sk = sock->sk;
1958         int err = 0, opt = 0;
1959
1960         BT_DBG("sk %p, opt %d", sk, optname);
1961
1962         lock_sock(sk);
1963
1964         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1965                 err = -EBADFD;
1966                 goto done;
1967         }
1968
1969         switch (optname) {
1970         case HCI_DATA_DIR:
1971                 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1972                         err = -EFAULT;
1973                         break;
1974                 }
1975
1976                 if (opt)
1977                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1978                 else
1979                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1980                 break;
1981
1982         case HCI_TIME_STAMP:
1983                 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1984                         err = -EFAULT;
1985                         break;
1986                 }
1987
1988                 if (opt)
1989                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1990                 else
1991                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1992                 break;
1993
1994         case HCI_FILTER:
1995                 {
1996                         struct hci_filter *f = &hci_pi(sk)->filter;
1997
1998                         uf.type_mask = f->type_mask;
1999                         uf.opcode    = f->opcode;
2000                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2001                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2002                 }
2003
2004                 len = min_t(unsigned int, len, sizeof(uf));
2005                 if (copy_from_sockptr(&uf, optval, len)) {
2006                         err = -EFAULT;
2007                         break;
2008                 }
2009
2010                 if (!capable(CAP_NET_RAW)) {
2011                         uf.type_mask &= hci_sec_filter.type_mask;
2012                         uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
2013                         uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
2014                 }
2015
2016                 {
2017                         struct hci_filter *f = &hci_pi(sk)->filter;
2018
2019                         f->type_mask = uf.type_mask;
2020                         f->opcode    = uf.opcode;
2021                         *((u32 *) f->event_mask + 0) = uf.event_mask[0];
2022                         *((u32 *) f->event_mask + 1) = uf.event_mask[1];
2023                 }
2024                 break;
2025
2026         default:
2027                 err = -ENOPROTOOPT;
2028                 break;
2029         }
2030
2031 done:
2032         release_sock(sk);
2033         return err;
2034 }
2035
2036 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
2037                                sockptr_t optval, unsigned int len)
2038 {
2039         struct sock *sk = sock->sk;
2040         int err = 0;
2041         u16 opt;
2042
2043         BT_DBG("sk %p, opt %d", sk, optname);
2044
2045         if (level == SOL_HCI)
2046                 return hci_sock_setsockopt_old(sock, level, optname, optval,
2047                                                len);
2048
2049         if (level != SOL_BLUETOOTH)
2050                 return -ENOPROTOOPT;
2051
2052         lock_sock(sk);
2053
2054         switch (optname) {
2055         case BT_SNDMTU:
2056         case BT_RCVMTU:
2057                 switch (hci_pi(sk)->channel) {
2058                 /* Don't allow changing MTU for channels that are meant for HCI
2059                  * traffic only.
2060                  */
2061                 case HCI_CHANNEL_RAW:
2062                 case HCI_CHANNEL_USER:
2063                         err = -ENOPROTOOPT;
2064                         goto done;
2065                 }
2066
2067                 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
2068                         err = -EFAULT;
2069                         break;
2070                 }
2071
2072                 hci_pi(sk)->mtu = opt;
2073                 break;
2074
2075         default:
2076                 err = -ENOPROTOOPT;
2077                 break;
2078         }
2079
2080 done:
2081         release_sock(sk);
2082         return err;
2083 }
2084
2085 static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname,
2086                                    char __user *optval, int __user *optlen)
2087 {
2088         struct hci_ufilter uf;
2089         struct sock *sk = sock->sk;
2090         int len, opt, err = 0;
2091
2092         BT_DBG("sk %p, opt %d", sk, optname);
2093
2094         if (get_user(len, optlen))
2095                 return -EFAULT;
2096
2097         lock_sock(sk);
2098
2099         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
2100                 err = -EBADFD;
2101                 goto done;
2102         }
2103
2104         switch (optname) {
2105         case HCI_DATA_DIR:
2106                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
2107                         opt = 1;
2108                 else
2109                         opt = 0;
2110
2111                 if (put_user(opt, optval))
2112                         err = -EFAULT;
2113                 break;
2114
2115         case HCI_TIME_STAMP:
2116                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
2117                         opt = 1;
2118                 else
2119                         opt = 0;
2120
2121                 if (put_user(opt, optval))
2122                         err = -EFAULT;
2123                 break;
2124
2125         case HCI_FILTER:
2126                 {
2127                         struct hci_filter *f = &hci_pi(sk)->filter;
2128
2129                         memset(&uf, 0, sizeof(uf));
2130                         uf.type_mask = f->type_mask;
2131                         uf.opcode    = f->opcode;
2132                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2133                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2134                 }
2135
2136                 len = min_t(unsigned int, len, sizeof(uf));
2137                 if (copy_to_user(optval, &uf, len))
2138                         err = -EFAULT;
2139                 break;
2140
2141         default:
2142                 err = -ENOPROTOOPT;
2143                 break;
2144         }
2145
2146 done:
2147         release_sock(sk);
2148         return err;
2149 }
2150
2151 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
2152                                char __user *optval, int __user *optlen)
2153 {
2154         struct sock *sk = sock->sk;
2155         int err = 0;
2156
2157         BT_DBG("sk %p, opt %d", sk, optname);
2158
2159         if (level == SOL_HCI)
2160                 return hci_sock_getsockopt_old(sock, level, optname, optval,
2161                                                optlen);
2162
2163         if (level != SOL_BLUETOOTH)
2164                 return -ENOPROTOOPT;
2165
2166         lock_sock(sk);
2167
2168         switch (optname) {
2169         case BT_SNDMTU:
2170         case BT_RCVMTU:
2171                 if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval))
2172                         err = -EFAULT;
2173                 break;
2174
2175         default:
2176                 err = -ENOPROTOOPT;
2177                 break;
2178         }
2179
2180         release_sock(sk);
2181         return err;
2182 }
2183
2184 static void hci_sock_destruct(struct sock *sk)
2185 {
2186         mgmt_cleanup(sk);
2187         skb_queue_purge(&sk->sk_receive_queue);
2188         skb_queue_purge(&sk->sk_write_queue);
2189 }
2190
2191 static const struct proto_ops hci_sock_ops = {
2192         .family         = PF_BLUETOOTH,
2193         .owner          = THIS_MODULE,
2194         .release        = hci_sock_release,
2195         .bind           = hci_sock_bind,
2196         .getname        = hci_sock_getname,
2197         .sendmsg        = hci_sock_sendmsg,
2198         .recvmsg        = hci_sock_recvmsg,
2199         .ioctl          = hci_sock_ioctl,
2200 #ifdef CONFIG_COMPAT
2201         .compat_ioctl   = hci_sock_compat_ioctl,
2202 #endif
2203         .poll           = datagram_poll,
2204         .listen         = sock_no_listen,
2205         .shutdown       = sock_no_shutdown,
2206         .setsockopt     = hci_sock_setsockopt,
2207         .getsockopt     = hci_sock_getsockopt,
2208         .connect        = sock_no_connect,
2209         .socketpair     = sock_no_socketpair,
2210         .accept         = sock_no_accept,
2211         .mmap           = sock_no_mmap
2212 };
2213
2214 static struct proto hci_sk_proto = {
2215         .name           = "HCI",
2216         .owner          = THIS_MODULE,
2217         .obj_size       = sizeof(struct hci_pinfo)
2218 };
2219
2220 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2221                            int kern)
2222 {
2223         struct sock *sk;
2224
2225         BT_DBG("sock %p", sock);
2226
2227         if (sock->type != SOCK_RAW)
2228                 return -ESOCKTNOSUPPORT;
2229
2230         sock->ops = &hci_sock_ops;
2231
2232         sk = bt_sock_alloc(net, sock, &hci_sk_proto, protocol, GFP_ATOMIC,
2233                            kern);
2234         if (!sk)
2235                 return -ENOMEM;
2236
2237         sock->state = SS_UNCONNECTED;
2238         sk->sk_destruct = hci_sock_destruct;
2239
2240         bt_sock_link(&hci_sk_list, sk);
2241         return 0;
2242 }
2243
2244 static const struct net_proto_family hci_sock_family_ops = {
2245         .family = PF_BLUETOOTH,
2246         .owner  = THIS_MODULE,
2247         .create = hci_sock_create,
2248 };
2249
2250 int __init hci_sock_init(void)
2251 {
2252         int err;
2253
2254         BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2255
2256         err = proto_register(&hci_sk_proto, 0);
2257         if (err < 0)
2258                 return err;
2259
2260         err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2261         if (err < 0) {
2262                 BT_ERR("HCI socket registration failed");
2263                 goto error;
2264         }
2265
2266         err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2267         if (err < 0) {
2268                 BT_ERR("Failed to create HCI proc file");
2269                 bt_sock_unregister(BTPROTO_HCI);
2270                 goto error;
2271         }
2272
2273         BT_INFO("HCI socket layer initialized");
2274
2275         return 0;
2276
2277 error:
2278         proto_unregister(&hci_sk_proto);
2279         return err;
2280 }
2281
2282 void hci_sock_cleanup(void)
2283 {
2284         bt_procfs_cleanup(&init_net, "hci");
2285         bt_sock_unregister(BTPROTO_HCI);
2286         proto_unregister(&hci_sk_proto);
2287 }