packaging: install license for rpm package instead of license package
[profile/mobile/platform/kernel/linux-3.10-sc7730.git] / net / bluetooth / hci_sock.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33
34 static atomic_t monitor_promisc = ATOMIC_INIT(0);
35
36 /* ----- HCI socket interface ----- */
37
38 /* Socket info */
39 #define hci_pi(sk) ((struct hci_pinfo *) sk)
40
41 struct hci_pinfo {
42         struct bt_sock    bt;
43         struct hci_dev    *hdev;
44         struct hci_filter filter;
45         __u32             cmsg_mask;
46         unsigned short    channel;
47 };
48
49 static inline int hci_test_bit(int nr, void *addr)
50 {
51         return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
52 }
53
54 /* Security filter */
55 #define HCI_SFLT_MAX_OGF  5
56
57 struct hci_sec_filter {
58         __u32 type_mask;
59         __u32 event_mask[2];
60         __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
61 };
62
63 static const struct hci_sec_filter hci_sec_filter = {
64         /* Packet types */
65         0x10,
66         /* Events */
67         { 0x1000d9fe, 0x0000b00c },
68         /* Commands */
69         {
70                 { 0x0 },
71                 /* OGF_LINK_CTL */
72                 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
73                 /* OGF_LINK_POLICY */
74                 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
75                 /* OGF_HOST_CTL */
76                 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
77                 /* OGF_INFO_PARAM */
78                 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
79                 /* OGF_STATUS_PARAM */
80                 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
81         }
82 };
83
84 static struct bt_sock_list hci_sk_list = {
85         .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
86 };
87
88 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
89 {
90         struct hci_filter *flt;
91         int flt_type, flt_event;
92
93         /* Apply filter */
94         flt = &hci_pi(sk)->filter;
95
96         if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
97                 flt_type = 0;
98         else
99                 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
100
101         if (!test_bit(flt_type, &flt->type_mask))
102                 return true;
103
104         /* Extra filter for event packets only */
105         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
106                 return false;
107
108         flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
109
110         if (!hci_test_bit(flt_event, &flt->event_mask))
111                 return true;
112
113         /* Check filter only when opcode is set */
114         if (!flt->opcode)
115                 return false;
116
117         if (flt_event == HCI_EV_CMD_COMPLETE &&
118             flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
119                 return true;
120
121         if (flt_event == HCI_EV_CMD_STATUS &&
122             flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
123                 return true;
124
125         return false;
126 }
127
128 /* Send frame to RAW socket */
129 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
130 {
131         struct sock *sk;
132         struct sk_buff *skb_copy = NULL;
133
134         BT_DBG("hdev %p len %d", hdev, skb->len);
135
136         read_lock(&hci_sk_list.lock);
137
138         sk_for_each(sk, &hci_sk_list.head) {
139                 struct sk_buff *nskb;
140
141                 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
142                         continue;
143
144                 /* Don't send frame to the socket it came from */
145                 if (skb->sk == sk)
146                         continue;
147
148                 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
149                         if (is_filtered_packet(sk, skb))
150                                 continue;
151                 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
152                         if (!bt_cb(skb)->incoming)
153                                 continue;
154                         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
155                             bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
156                             bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
157                                 continue;
158                 } else {
159                         /* Don't send frame to other channel types */
160                         continue;
161                 }
162
163                 if (!skb_copy) {
164                         /* Create a private copy with headroom */
165 #ifdef CONFIG_TIZEN_WIP
166                         skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
167 #else
168                         /*__pskb_copy_fclone is defined in latest kernel.
169                         * if kernel is migrated to to latest, below code should be enabled
170                         */
171                         skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
172 #endif
173                         if (!skb_copy)
174                                 continue;
175
176                         /* Put type byte before the data */
177                         memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
178                 }
179
180                 nskb = skb_clone(skb_copy, GFP_ATOMIC);
181                 if (!nskb)
182                         continue;
183
184                 if (sock_queue_rcv_skb(sk, nskb))
185                         kfree_skb(nskb);
186         }
187
188         read_unlock(&hci_sk_list.lock);
189
190         kfree_skb(skb_copy);
191 }
192
193 /* Send frame to control socket */
194 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
195 {
196         struct sock *sk;
197
198         BT_DBG("len %d", skb->len);
199
200         read_lock(&hci_sk_list.lock);
201
202         sk_for_each(sk, &hci_sk_list.head) {
203                 struct sk_buff *nskb;
204
205                 /* Skip the original socket */
206                 if (sk == skip_sk)
207                         continue;
208
209                 if (sk->sk_state != BT_BOUND)
210                         continue;
211
212                 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
213                         continue;
214
215                 nskb = skb_clone(skb, GFP_ATOMIC);
216                 if (!nskb)
217                         continue;
218
219                 if (sock_queue_rcv_skb(sk, nskb))
220                         kfree_skb(nskb);
221         }
222
223         read_unlock(&hci_sk_list.lock);
224 }
225
226 static void queue_monitor_skb(struct sk_buff *skb)
227 {
228         struct sock *sk;
229
230         BT_DBG("len %d", skb->len);
231
232         read_lock(&hci_sk_list.lock);
233
234         sk_for_each(sk, &hci_sk_list.head) {
235                 struct sk_buff *nskb;
236
237                 if (sk->sk_state != BT_BOUND)
238                         continue;
239
240                 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
241                         continue;
242
243                 nskb = skb_clone(skb, GFP_ATOMIC);
244                 if (!nskb)
245                         continue;
246
247                 if (sock_queue_rcv_skb(sk, nskb))
248                         kfree_skb(nskb);
249         }
250
251         read_unlock(&hci_sk_list.lock);
252 }
253
254 /* Send frame to monitor socket */
255 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
256 {
257         struct sk_buff *skb_copy = NULL;
258         struct hci_mon_hdr *hdr;
259         __le16 opcode;
260
261         if (!atomic_read(&monitor_promisc))
262                 return;
263
264         BT_DBG("hdev %p len %d", hdev, skb->len);
265
266         switch (bt_cb(skb)->pkt_type) {
267         case HCI_COMMAND_PKT:
268                 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
269                 break;
270         case HCI_EVENT_PKT:
271                 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
272                 break;
273         case HCI_ACLDATA_PKT:
274                 if (bt_cb(skb)->incoming)
275                         opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
276                 else
277                         opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
278                 break;
279         case HCI_SCODATA_PKT:
280                 if (bt_cb(skb)->incoming)
281                         opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
282                 else
283                         opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
284                 break;
285         default:
286                 return;
287         }
288
289
290         /* Create a private copy with headroom */
291 #ifdef CONFIG_TIZEN_WIP
292         skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC);
293 #else
294         skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
295 #endif
296         if (!skb_copy)
297                 return;
298
299         /* Put header before the data */
300         hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
301         hdr->opcode = opcode;
302         hdr->index = cpu_to_le16(hdev->id);
303         hdr->len = cpu_to_le16(skb->len);
304
305         queue_monitor_skb(skb_copy);
306         kfree_skb(skb_copy);
307 }
308
309 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
310 {
311         struct hci_mon_hdr *hdr;
312         struct hci_mon_new_index *ni;
313         struct sk_buff *skb;
314         __le16 opcode;
315
316         switch (event) {
317         case HCI_DEV_REG:
318                 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
319                 if (!skb)
320                         return NULL;
321
322                 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
323                 ni->type = hdev->dev_type;
324                 ni->bus = hdev->bus;
325                 bacpy(&ni->bdaddr, &hdev->bdaddr);
326                 memcpy(ni->name, hdev->name, 8);
327
328                 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
329                 break;
330
331         case HCI_DEV_UNREG:
332                 skb = bt_skb_alloc(0, GFP_ATOMIC);
333                 if (!skb)
334                         return NULL;
335
336                 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
337                 break;
338
339         default:
340                 return NULL;
341         }
342
343         __net_timestamp(skb);
344
345         hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
346         hdr->opcode = opcode;
347         hdr->index = cpu_to_le16(hdev->id);
348         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
349
350         return skb;
351 }
352
353 static void send_monitor_replay(struct sock *sk)
354 {
355         struct hci_dev *hdev;
356
357         read_lock(&hci_dev_list_lock);
358
359         list_for_each_entry(hdev, &hci_dev_list, list) {
360                 struct sk_buff *skb;
361
362                 skb = create_monitor_event(hdev, HCI_DEV_REG);
363                 if (!skb)
364                         continue;
365
366                 if (sock_queue_rcv_skb(sk, skb))
367                         kfree_skb(skb);
368         }
369
370         read_unlock(&hci_dev_list_lock);
371 }
372
373 /* Generate internal stack event */
374 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
375 {
376         struct hci_event_hdr *hdr;
377         struct hci_ev_stack_internal *ev;
378         struct sk_buff *skb;
379
380         skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
381         if (!skb)
382                 return;
383
384         hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
385         hdr->evt  = HCI_EV_STACK_INTERNAL;
386         hdr->plen = sizeof(*ev) + dlen;
387
388         ev  = (void *) skb_put(skb, sizeof(*ev) + dlen);
389         ev->type = type;
390         memcpy(ev->data, data, dlen);
391
392         bt_cb(skb)->incoming = 1;
393         __net_timestamp(skb);
394
395         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
396         hci_send_to_sock(hdev, skb);
397         kfree_skb(skb);
398 }
399
400 void hci_sock_dev_event(struct hci_dev *hdev, int event)
401 {
402         struct hci_ev_si_device ev;
403
404         BT_DBG("hdev %s event %d", hdev->name, event);
405
406         /* Send event to monitor */
407         if (atomic_read(&monitor_promisc)) {
408                 struct sk_buff *skb;
409
410                 skb = create_monitor_event(hdev, event);
411                 if (skb) {
412                         queue_monitor_skb(skb);
413                         kfree_skb(skb);
414                 }
415         }
416
417         /* Send event to sockets */
418         ev.event  = event;
419         ev.dev_id = hdev->id;
420         hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
421
422         if (event == HCI_DEV_UNREG) {
423                 struct sock *sk;
424
425                 /* Detach sockets from device */
426                 read_lock(&hci_sk_list.lock);
427                 sk_for_each(sk, &hci_sk_list.head) {
428                         bh_lock_sock_nested(sk);
429                         if (hci_pi(sk)->hdev == hdev) {
430                                 hci_pi(sk)->hdev = NULL;
431                                 sk->sk_err = EPIPE;
432                                 sk->sk_state = BT_OPEN;
433                                 sk->sk_state_change(sk);
434
435                                 hci_dev_put(hdev);
436                         }
437                         bh_unlock_sock(sk);
438                 }
439                 read_unlock(&hci_sk_list.lock);
440         }
441 }
442
443 static int hci_sock_release(struct socket *sock)
444 {
445         struct sock *sk = sock->sk;
446         struct hci_dev *hdev;
447
448         BT_DBG("sock %p sk %p", sock, sk);
449
450         if (!sk)
451                 return 0;
452
453         hdev = hci_pi(sk)->hdev;
454
455         if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
456                 atomic_dec(&monitor_promisc);
457
458         bt_sock_unlink(&hci_sk_list, sk);
459
460         if (hdev) {
461                 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
462                         mgmt_index_added(hdev);
463                         clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
464                         hci_dev_close(hdev->id);
465                 }
466
467                 atomic_dec(&hdev->promisc);
468                 hci_dev_put(hdev);
469         }
470
471         sock_orphan(sk);
472
473         skb_queue_purge(&sk->sk_receive_queue);
474         skb_queue_purge(&sk->sk_write_queue);
475
476         sock_put(sk);
477         return 0;
478 }
479
480 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
481 {
482         bdaddr_t bdaddr;
483         int err;
484
485         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
486                 return -EFAULT;
487
488         hci_dev_lock(hdev);
489
490         err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
491
492         hci_dev_unlock(hdev);
493
494         return err;
495 }
496
497 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
498 {
499         bdaddr_t bdaddr;
500         int err;
501
502         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
503                 return -EFAULT;
504
505         hci_dev_lock(hdev);
506
507         err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
508
509         hci_dev_unlock(hdev);
510
511         return err;
512 }
513
514 /* Ioctls that require bound socket */
515 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
516                                 unsigned long arg)
517 {
518         struct hci_dev *hdev = hci_pi(sk)->hdev;
519
520         if (!hdev)
521                 return -EBADFD;
522
523         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
524                 return -EBUSY;
525
526         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
527                 return -EOPNOTSUPP;
528
529         if (hdev->dev_type != HCI_BREDR)
530                 return -EOPNOTSUPP;
531
532         switch (cmd) {
533         case HCISETRAW:
534                 if (!capable(CAP_NET_ADMIN))
535                         return -EPERM;
536                 return -EOPNOTSUPP;
537
538         case HCIGETCONNINFO:
539                 return hci_get_conn_info(hdev, (void __user *) arg);
540
541         case HCIGETAUTHINFO:
542                 return hci_get_auth_info(hdev, (void __user *) arg);
543
544         case HCIBLOCKADDR:
545                 if (!capable(CAP_NET_ADMIN))
546                         return -EPERM;
547                 return hci_sock_blacklist_add(hdev, (void __user *) arg);
548
549         case HCIUNBLOCKADDR:
550                 if (!capable(CAP_NET_ADMIN))
551                         return -EPERM;
552                 return hci_sock_blacklist_del(hdev, (void __user *) arg);
553         }
554
555         return -ENOIOCTLCMD;
556 }
557
558 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
559                           unsigned long arg)
560 {
561         void __user *argp = (void __user *) arg;
562         struct sock *sk = sock->sk;
563         int err;
564
565         BT_DBG("cmd %x arg %lx", cmd, arg);
566
567         lock_sock(sk);
568
569         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
570                 err = -EBADFD;
571                 goto done;
572         }
573
574         release_sock(sk);
575
576         switch (cmd) {
577         case HCIGETDEVLIST:
578                 return hci_get_dev_list(argp);
579
580         case HCIGETDEVINFO:
581                 return hci_get_dev_info(argp);
582
583         case HCIGETCONNLIST:
584                 return hci_get_conn_list(argp);
585
586         case HCIDEVUP:
587                 if (!capable(CAP_NET_ADMIN))
588                         return -EPERM;
589                 return hci_dev_open(arg);
590
591         case HCIDEVDOWN:
592                 if (!capable(CAP_NET_ADMIN))
593                         return -EPERM;
594                 return hci_dev_close(arg);
595
596         case HCIDEVRESET:
597                 if (!capable(CAP_NET_ADMIN))
598                         return -EPERM;
599                 return hci_dev_reset(arg);
600
601         case HCIDEVRESTAT:
602                 if (!capable(CAP_NET_ADMIN))
603                         return -EPERM;
604                 return hci_dev_reset_stat(arg);
605
606         case HCISETSCAN:
607         case HCISETAUTH:
608         case HCISETENCRYPT:
609         case HCISETPTYPE:
610         case HCISETLINKPOL:
611         case HCISETLINKMODE:
612         case HCISETACLMTU:
613         case HCISETSCOMTU:
614                 if (!capable(CAP_NET_ADMIN))
615                         return -EPERM;
616                 return hci_dev_cmd(cmd, argp);
617
618         case HCIINQUIRY:
619                 return hci_inquiry(argp);
620         }
621
622         lock_sock(sk);
623
624         err = hci_sock_bound_ioctl(sk, cmd, arg);
625
626 done:
627         release_sock(sk);
628         return err;
629 }
630
631 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
632                          int addr_len)
633 {
634         struct sockaddr_hci haddr;
635         struct sock *sk = sock->sk;
636         struct hci_dev *hdev = NULL;
637         int len, err = 0;
638
639         BT_DBG("sock %p sk %p", sock, sk);
640
641         if (!addr)
642                 return -EINVAL;
643
644         memset(&haddr, 0, sizeof(haddr));
645         len = min_t(unsigned int, sizeof(haddr), addr_len);
646         memcpy(&haddr, addr, len);
647
648         if (haddr.hci_family != AF_BLUETOOTH)
649                 return -EINVAL;
650
651         lock_sock(sk);
652
653         if (sk->sk_state == BT_BOUND) {
654                 err = -EALREADY;
655                 goto done;
656         }
657
658         switch (haddr.hci_channel) {
659         case HCI_CHANNEL_RAW:
660                 if (hci_pi(sk)->hdev) {
661                         err = -EALREADY;
662                         goto done;
663                 }
664
665                 if (haddr.hci_dev != HCI_DEV_NONE) {
666                         hdev = hci_dev_get(haddr.hci_dev);
667                         if (!hdev) {
668                                 err = -ENODEV;
669                                 goto done;
670                         }
671
672                         atomic_inc(&hdev->promisc);
673                 }
674
675                 hci_pi(sk)->hdev = hdev;
676                 break;
677
678         case HCI_CHANNEL_USER:
679                 if (hci_pi(sk)->hdev) {
680                         err = -EALREADY;
681                         goto done;
682                 }
683
684                 if (haddr.hci_dev == HCI_DEV_NONE) {
685                         err = -EINVAL;
686                         goto done;
687                 }
688
689                 if (!capable(CAP_NET_ADMIN)) {
690                         err = -EPERM;
691                         goto done;
692                 }
693
694                 hdev = hci_dev_get(haddr.hci_dev);
695                 if (!hdev) {
696                         err = -ENODEV;
697                         goto done;
698                 }
699
700                 if (test_bit(HCI_UP, &hdev->flags) ||
701                     test_bit(HCI_INIT, &hdev->flags) ||
702                     test_bit(HCI_SETUP, &hdev->dev_flags) ||
703                     test_bit(HCI_CONFIG, &hdev->dev_flags)) {
704                         err = -EBUSY;
705                         hci_dev_put(hdev);
706                         goto done;
707                 }
708
709                 if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
710                         err = -EUSERS;
711                         hci_dev_put(hdev);
712                         goto done;
713                 }
714
715                 mgmt_index_removed(hdev);
716
717                 err = hci_dev_open(hdev->id);
718                 if (err) {
719                         clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
720                         mgmt_index_added(hdev);
721                         hci_dev_put(hdev);
722                         goto done;
723                 }
724
725                 atomic_inc(&hdev->promisc);
726
727                 hci_pi(sk)->hdev = hdev;
728                 break;
729
730         case HCI_CHANNEL_CONTROL:
731                 if (haddr.hci_dev != HCI_DEV_NONE) {
732                         err = -EINVAL;
733                         goto done;
734                 }
735
736                 if (!capable(CAP_NET_ADMIN)) {
737                         err = -EPERM;
738                         goto done;
739                 }
740
741                 break;
742
743         case HCI_CHANNEL_MONITOR:
744                 if (haddr.hci_dev != HCI_DEV_NONE) {
745                         err = -EINVAL;
746                         goto done;
747                 }
748
749                 if (!capable(CAP_NET_RAW)) {
750                         err = -EPERM;
751                         goto done;
752                 }
753
754                 send_monitor_replay(sk);
755
756                 atomic_inc(&monitor_promisc);
757                 break;
758
759         default:
760                 err = -EINVAL;
761                 goto done;
762         }
763
764
765         hci_pi(sk)->channel = haddr.hci_channel;
766         sk->sk_state = BT_BOUND;
767
768 done:
769         release_sock(sk);
770         return err;
771 }
772
773 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
774                             int *addr_len, int peer)
775 {
776         struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
777         struct sock *sk = sock->sk;
778         struct hci_dev *hdev;
779         int err = 0;
780
781         BT_DBG("sock %p sk %p", sock, sk);
782
783         if (peer)
784                 return -EOPNOTSUPP;
785
786         lock_sock(sk);
787
788         hdev = hci_pi(sk)->hdev;
789         if (!hdev) {
790                 err = -EBADFD;
791                 goto done;
792         }
793
794         *addr_len = sizeof(*haddr);
795         haddr->hci_family = AF_BLUETOOTH;
796         haddr->hci_dev    = hdev->id;
797         haddr->hci_channel= hci_pi(sk)->channel;
798
799 done:
800         release_sock(sk);
801         return err;
802 }
803
804 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
805                           struct sk_buff *skb)
806 {
807         __u32 mask = hci_pi(sk)->cmsg_mask;
808
809         if (mask & HCI_CMSG_DIR) {
810                 int incoming = bt_cb(skb)->incoming;
811                 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
812                          &incoming);
813         }
814
815         if (mask & HCI_CMSG_TSTAMP) {
816 #ifdef CONFIG_COMPAT
817                 struct compat_timeval ctv;
818 #endif
819                 struct timeval tv;
820                 void *data;
821                 int len;
822
823                 skb_get_timestamp(skb, &tv);
824
825                 data = &tv;
826                 len = sizeof(tv);
827 #ifdef CONFIG_COMPAT
828                 if (!COMPAT_USE_64BIT_TIME &&
829                     (msg->msg_flags & MSG_CMSG_COMPAT)) {
830                         ctv.tv_sec = tv.tv_sec;
831                         ctv.tv_usec = tv.tv_usec;
832                         data = &ctv;
833                         len = sizeof(ctv);
834                 }
835 #endif
836
837                 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
838         }
839 }
840
841 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
842                             struct msghdr *msg, size_t len, int flags)
843 {
844         int noblock = flags & MSG_DONTWAIT;
845         struct sock *sk = sock->sk;
846         struct sk_buff *skb;
847         int copied, err;
848
849         BT_DBG("sock %p, sk %p", sock, sk);
850
851         if (flags & (MSG_OOB))
852                 return -EOPNOTSUPP;
853
854         if (sk->sk_state == BT_CLOSED)
855                 return 0;
856
857         skb = skb_recv_datagram(sk, flags, noblock, &err);
858         if (!skb)
859                 return err;
860
861         copied = skb->len;
862         if (len < copied) {
863                 msg->msg_flags |= MSG_TRUNC;
864                 copied = len;
865         }
866
867         skb_reset_transport_header(skb);
868 #ifdef CONFIG_TIZEN_WIP
869         err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
870 #else
871         err = skb_copy_datagram_msg(skb, 0, msg, copied);
872 #endif
873
874         switch (hci_pi(sk)->channel) {
875         case HCI_CHANNEL_RAW:
876                 hci_sock_cmsg(sk, msg, skb);
877                 break;
878         case HCI_CHANNEL_USER:
879         case HCI_CHANNEL_CONTROL:
880         case HCI_CHANNEL_MONITOR:
881                 sock_recv_timestamp(msg, sk, skb);
882                 break;
883         }
884
885         skb_free_datagram(sk, skb);
886
887         return err ? : copied;
888 }
889
890 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
891                             struct msghdr *msg, size_t len)
892 {
893         struct sock *sk = sock->sk;
894         struct hci_dev *hdev;
895         struct sk_buff *skb;
896         int err;
897
898         BT_DBG("sock %p sk %p", sock, sk);
899
900         if (msg->msg_flags & MSG_OOB)
901                 return -EOPNOTSUPP;
902
903         if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
904                 return -EINVAL;
905
906         if (len < 4 || len > HCI_MAX_FRAME_SIZE)
907                 return -EINVAL;
908
909         lock_sock(sk);
910
911         switch (hci_pi(sk)->channel) {
912         case HCI_CHANNEL_RAW:
913         case HCI_CHANNEL_USER:
914                 break;
915         case HCI_CHANNEL_CONTROL:
916                 err = mgmt_control(sk, msg, len);
917                 goto done;
918         case HCI_CHANNEL_MONITOR:
919                 err = -EOPNOTSUPP;
920                 goto done;
921         default:
922                 err = -EINVAL;
923                 goto done;
924         }
925
926         hdev = hci_pi(sk)->hdev;
927         if (!hdev) {
928                 err = -EBADFD;
929                 goto done;
930         }
931
932         if (!test_bit(HCI_UP, &hdev->flags)) {
933                 err = -ENETDOWN;
934                 goto done;
935         }
936
937         skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
938         if (!skb)
939                 goto done;
940 #ifdef CONFIG_TIZEN_WIP
941         if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
942 #else
943         if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
944 #endif
945                 err = -EFAULT;
946                 goto drop;
947         }
948
949         bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
950         skb_pull(skb, 1);
951
952         if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
953                 /* No permission check is needed for user channel
954                  * since that gets enforced when binding the socket.
955                  *
956                  * However check that the packet type is valid.
957                  */
958                 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
959                     bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
960                     bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
961                         err = -EINVAL;
962                         goto drop;
963                 }
964
965                 skb_queue_tail(&hdev->raw_q, skb);
966                 queue_work(hdev->workqueue, &hdev->tx_work);
967         } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
968                 u16 opcode = get_unaligned_le16(skb->data);
969                 u16 ogf = hci_opcode_ogf(opcode);
970                 u16 ocf = hci_opcode_ocf(opcode);
971
972                 if (((ogf > HCI_SFLT_MAX_OGF) ||
973                      !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
974                                    &hci_sec_filter.ocf_mask[ogf])) &&
975                     !capable(CAP_NET_RAW)) {
976                         err = -EPERM;
977                         goto drop;
978                 }
979
980                 if (ogf == 0x3f) {
981                         skb_queue_tail(&hdev->raw_q, skb);
982                         queue_work(hdev->workqueue, &hdev->tx_work);
983                 } else {
984                         /* Stand-alone HCI commands must be flagged as
985                          * single-command requests.
986                          */
987                         bt_cb(skb)->req.start = true;
988
989                         skb_queue_tail(&hdev->cmd_q, skb);
990                         queue_work(hdev->workqueue, &hdev->cmd_work);
991                 }
992         } else {
993                 if (!capable(CAP_NET_RAW)) {
994                         err = -EPERM;
995                         goto drop;
996                 }
997
998                 skb_queue_tail(&hdev->raw_q, skb);
999                 queue_work(hdev->workqueue, &hdev->tx_work);
1000         }
1001
1002         err = len;
1003
1004 done:
1005         release_sock(sk);
1006         return err;
1007
1008 drop:
1009         kfree_skb(skb);
1010         goto done;
1011 }
1012
1013 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1014                                char __user *optval, unsigned int len)
1015 {
1016         struct hci_ufilter uf = { .opcode = 0 };
1017         struct sock *sk = sock->sk;
1018         int err = 0, opt = 0;
1019
1020         BT_DBG("sk %p, opt %d", sk, optname);
1021
1022         lock_sock(sk);
1023
1024         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1025                 err = -EBADFD;
1026                 goto done;
1027         }
1028
1029         switch (optname) {
1030         case HCI_DATA_DIR:
1031                 if (get_user(opt, (int __user *)optval)) {
1032                         err = -EFAULT;
1033                         break;
1034                 }
1035
1036                 if (opt)
1037                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1038                 else
1039                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1040                 break;
1041
1042         case HCI_TIME_STAMP:
1043                 if (get_user(opt, (int __user *)optval)) {
1044                         err = -EFAULT;
1045                         break;
1046                 }
1047
1048                 if (opt)
1049                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1050                 else
1051                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1052                 break;
1053
1054         case HCI_FILTER:
1055                 {
1056                         struct hci_filter *f = &hci_pi(sk)->filter;
1057
1058                         uf.type_mask = f->type_mask;
1059                         uf.opcode    = f->opcode;
1060                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1061                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1062                 }
1063
1064                 len = min_t(unsigned int, len, sizeof(uf));
1065                 if (copy_from_user(&uf, optval, len)) {
1066                         err = -EFAULT;
1067                         break;
1068                 }
1069
1070                 if (!capable(CAP_NET_RAW)) {
1071                         uf.type_mask &= hci_sec_filter.type_mask;
1072                         uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1073                         uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1074                 }
1075
1076                 {
1077                         struct hci_filter *f = &hci_pi(sk)->filter;
1078
1079                         f->type_mask = uf.type_mask;
1080                         f->opcode    = uf.opcode;
1081                         *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1082                         *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1083                 }
1084                 break;
1085
1086         default:
1087                 err = -ENOPROTOOPT;
1088                 break;
1089         }
1090
1091 done:
1092         release_sock(sk);
1093         return err;
1094 }
1095
1096 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1097                                char __user *optval, int __user *optlen)
1098 {
1099         struct hci_ufilter uf;
1100         struct sock *sk = sock->sk;
1101         int len, opt, err = 0;
1102
1103         BT_DBG("sk %p, opt %d", sk, optname);
1104
1105         if (get_user(len, optlen))
1106                 return -EFAULT;
1107
1108         lock_sock(sk);
1109
1110         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1111                 err = -EBADFD;
1112                 goto done;
1113         }
1114
1115         switch (optname) {
1116         case HCI_DATA_DIR:
1117                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1118                         opt = 1;
1119                 else
1120                         opt = 0;
1121
1122                 if (put_user(opt, optval))
1123                         err = -EFAULT;
1124                 break;
1125
1126         case HCI_TIME_STAMP:
1127                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1128                         opt = 1;
1129                 else
1130                         opt = 0;
1131
1132                 if (put_user(opt, optval))
1133                         err = -EFAULT;
1134                 break;
1135
1136         case HCI_FILTER:
1137                 {
1138                         struct hci_filter *f = &hci_pi(sk)->filter;
1139
1140                         memset(&uf, 0, sizeof(uf));
1141                         uf.type_mask = f->type_mask;
1142                         uf.opcode    = f->opcode;
1143                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1144                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1145                 }
1146
1147                 len = min_t(unsigned int, len, sizeof(uf));
1148                 if (copy_to_user(optval, &uf, len))
1149                         err = -EFAULT;
1150                 break;
1151
1152         default:
1153                 err = -ENOPROTOOPT;
1154                 break;
1155         }
1156
1157 done:
1158         release_sock(sk);
1159         return err;
1160 }
1161
1162 static const struct proto_ops hci_sock_ops = {
1163         .family         = PF_BLUETOOTH,
1164         .owner          = THIS_MODULE,
1165         .release        = hci_sock_release,
1166         .bind           = hci_sock_bind,
1167         .getname        = hci_sock_getname,
1168         .sendmsg        = hci_sock_sendmsg,
1169         .recvmsg        = hci_sock_recvmsg,
1170         .ioctl          = hci_sock_ioctl,
1171         .poll           = datagram_poll,
1172         .listen         = sock_no_listen,
1173         .shutdown       = sock_no_shutdown,
1174         .setsockopt     = hci_sock_setsockopt,
1175         .getsockopt     = hci_sock_getsockopt,
1176         .connect        = sock_no_connect,
1177         .socketpair     = sock_no_socketpair,
1178         .accept         = sock_no_accept,
1179         .mmap           = sock_no_mmap
1180 };
1181
1182 static struct proto hci_sk_proto = {
1183         .name           = "HCI",
1184         .owner          = THIS_MODULE,
1185         .obj_size       = sizeof(struct hci_pinfo)
1186 };
1187
1188 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1189                            int kern)
1190 {
1191         struct sock *sk;
1192
1193         BT_DBG("sock %p", sock);
1194
1195         if (sock->type != SOCK_RAW)
1196                 return -ESOCKTNOSUPPORT;
1197
1198         sock->ops = &hci_sock_ops;
1199
1200         sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1201         if (!sk)
1202                 return -ENOMEM;
1203
1204         sock_init_data(sock, sk);
1205
1206         sock_reset_flag(sk, SOCK_ZAPPED);
1207
1208         sk->sk_protocol = protocol;
1209
1210         sock->state = SS_UNCONNECTED;
1211         sk->sk_state = BT_OPEN;
1212
1213         bt_sock_link(&hci_sk_list, sk);
1214         return 0;
1215 }
1216
1217 static const struct net_proto_family hci_sock_family_ops = {
1218         .family = PF_BLUETOOTH,
1219         .owner  = THIS_MODULE,
1220         .create = hci_sock_create,
1221 };
1222
1223 int __init hci_sock_init(void)
1224 {
1225         int err;
1226
1227         BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1228
1229         err = proto_register(&hci_sk_proto, 0);
1230         if (err < 0)
1231                 return err;
1232
1233         err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1234         if (err < 0) {
1235                 BT_ERR("HCI socket registration failed");
1236                 goto error;
1237         }
1238
1239         err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1240         if (err < 0) {
1241                 BT_ERR("Failed to create HCI proc file");
1242                 bt_sock_unregister(BTPROTO_HCI);
1243                 goto error;
1244         }
1245
1246         BT_INFO("HCI socket layer initialized");
1247
1248         return 0;
1249
1250 error:
1251         proto_unregister(&hci_sk_proto);
1252         return err;
1253 }
1254
1255 void hci_sock_cleanup(void)
1256 {
1257         bt_procfs_cleanup(&init_net, "hci");
1258         bt_sock_unregister(BTPROTO_HCI);
1259         proto_unregister(&hci_sk_proto);
1260 }