2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
34 static atomic_t monitor_promisc = ATOMIC_INIT(0);
36 /* ----- HCI socket interface ----- */
38 static inline int hci_test_bit(int nr, void *addr)
40 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
44 static struct hci_sec_filter hci_sec_filter = {
48 { 0x1000d9fe, 0x0000b00c },
53 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
55 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
57 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
59 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
60 /* OGF_STATUS_PARAM */
61 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
65 static struct bt_sock_list hci_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
69 /* Send frame to RAW socket */
70 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
73 struct sk_buff *skb_copy = NULL;
75 BT_DBG("hdev %p len %d", hdev, skb->len);
77 read_lock(&hci_sk_list.lock);
79 sk_for_each(sk, &hci_sk_list.head) {
80 struct hci_filter *flt;
83 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
86 /* Don't send frame to the socket it came from */
90 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
94 flt = &hci_pi(sk)->filter;
96 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
97 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS),
101 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
102 int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
104 if (!hci_test_bit(evt, &flt->event_mask))
108 ((evt == HCI_EV_CMD_COMPLETE &&
110 get_unaligned((__le16 *)(skb->data + 3))) ||
111 (evt == HCI_EV_CMD_STATUS &&
113 get_unaligned((__le16 *)(skb->data + 4)))))
118 /* Create a private copy with headroom */
119 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
123 /* Put type byte before the data */
124 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
127 nskb = skb_clone(skb_copy, GFP_ATOMIC);
131 if (sock_queue_rcv_skb(sk, nskb))
135 read_unlock(&hci_sk_list.lock);
140 /* Send frame to control socket */
141 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
145 BT_DBG("len %d", skb->len);
147 read_lock(&hci_sk_list.lock);
149 sk_for_each(sk, &hci_sk_list.head) {
150 struct sk_buff *nskb;
152 /* Skip the original socket */
156 if (sk->sk_state != BT_BOUND)
159 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
162 nskb = skb_clone(skb, GFP_ATOMIC);
166 if (sock_queue_rcv_skb(sk, nskb))
170 read_unlock(&hci_sk_list.lock);
173 /* Send frame to monitor socket */
174 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
177 struct sk_buff *skb_copy = NULL;
180 if (!atomic_read(&monitor_promisc))
183 BT_DBG("hdev %p len %d", hdev, skb->len);
185 switch (bt_cb(skb)->pkt_type) {
186 case HCI_COMMAND_PKT:
187 opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
190 opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
192 case HCI_ACLDATA_PKT:
193 if (bt_cb(skb)->incoming)
194 opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
196 opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
198 case HCI_SCODATA_PKT:
199 if (bt_cb(skb)->incoming)
200 opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
202 opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
208 read_lock(&hci_sk_list.lock);
210 sk_for_each(sk, &hci_sk_list.head) {
211 struct sk_buff *nskb;
213 if (sk->sk_state != BT_BOUND)
216 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
220 struct hci_mon_hdr *hdr;
222 /* Create a private copy with headroom */
223 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
228 /* Put header before the data */
229 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
230 hdr->opcode = opcode;
231 hdr->index = cpu_to_le16(hdev->id);
232 hdr->len = cpu_to_le16(skb->len);
235 nskb = skb_clone(skb_copy, GFP_ATOMIC);
239 if (sock_queue_rcv_skb(sk, nskb))
243 read_unlock(&hci_sk_list.lock);
248 static void send_monitor_event(struct sk_buff *skb)
252 BT_DBG("len %d", skb->len);
254 read_lock(&hci_sk_list.lock);
256 sk_for_each(sk, &hci_sk_list.head) {
257 struct sk_buff *nskb;
259 if (sk->sk_state != BT_BOUND)
262 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
265 nskb = skb_clone(skb, GFP_ATOMIC);
269 if (sock_queue_rcv_skb(sk, nskb))
273 read_unlock(&hci_sk_list.lock);
276 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
278 struct hci_mon_hdr *hdr;
279 struct hci_mon_new_index *ni;
285 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
289 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
290 ni->type = hdev->dev_type;
292 bacpy(&ni->bdaddr, &hdev->bdaddr);
293 memcpy(ni->name, hdev->name, 8);
295 opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
299 skb = bt_skb_alloc(0, GFP_ATOMIC);
303 opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
310 __net_timestamp(skb);
312 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
313 hdr->opcode = opcode;
314 hdr->index = cpu_to_le16(hdev->id);
315 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
320 static void send_monitor_replay(struct sock *sk)
322 struct hci_dev *hdev;
324 read_lock(&hci_dev_list_lock);
326 list_for_each_entry(hdev, &hci_dev_list, list) {
329 skb = create_monitor_event(hdev, HCI_DEV_REG);
333 if (sock_queue_rcv_skb(sk, skb))
337 read_unlock(&hci_dev_list_lock);
340 /* Generate internal stack event */
341 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
343 struct hci_event_hdr *hdr;
344 struct hci_ev_stack_internal *ev;
347 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
351 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
352 hdr->evt = HCI_EV_STACK_INTERNAL;
353 hdr->plen = sizeof(*ev) + dlen;
355 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
357 memcpy(ev->data, data, dlen);
359 bt_cb(skb)->incoming = 1;
360 __net_timestamp(skb);
362 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
363 skb->dev = (void *) hdev;
364 hci_send_to_sock(hdev, skb);
368 void hci_sock_dev_event(struct hci_dev *hdev, int event)
370 struct hci_ev_si_device ev;
372 BT_DBG("hdev %s event %d", hdev->name, event);
374 /* Send event to monitor */
375 if (atomic_read(&monitor_promisc)) {
378 skb = create_monitor_event(hdev, event);
380 send_monitor_event(skb);
385 /* Send event to sockets */
387 ev.dev_id = hdev->id;
388 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
390 if (event == HCI_DEV_UNREG) {
393 /* Detach sockets from device */
394 read_lock(&hci_sk_list.lock);
395 sk_for_each(sk, &hci_sk_list.head) {
396 bh_lock_sock_nested(sk);
397 if (hci_pi(sk)->hdev == hdev) {
398 hci_pi(sk)->hdev = NULL;
400 sk->sk_state = BT_OPEN;
401 sk->sk_state_change(sk);
407 read_unlock(&hci_sk_list.lock);
411 static int hci_sock_release(struct socket *sock)
413 struct sock *sk = sock->sk;
414 struct hci_dev *hdev;
416 BT_DBG("sock %p sk %p", sock, sk);
421 hdev = hci_pi(sk)->hdev;
423 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
424 atomic_dec(&monitor_promisc);
426 bt_sock_unlink(&hci_sk_list, sk);
429 atomic_dec(&hdev->promisc);
435 skb_queue_purge(&sk->sk_receive_queue);
436 skb_queue_purge(&sk->sk_write_queue);
442 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
447 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
452 err = hci_blacklist_add(hdev, &bdaddr, 0);
454 hci_dev_unlock(hdev);
459 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
464 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
469 err = hci_blacklist_del(hdev, &bdaddr, 0);
471 hci_dev_unlock(hdev);
476 /* Ioctls that require bound socket */
477 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
480 struct hci_dev *hdev = hci_pi(sk)->hdev;
487 if (!capable(CAP_NET_ADMIN))
490 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
494 set_bit(HCI_RAW, &hdev->flags);
496 clear_bit(HCI_RAW, &hdev->flags);
501 return hci_get_conn_info(hdev, (void __user *) arg);
504 return hci_get_auth_info(hdev, (void __user *) arg);
507 if (!capable(CAP_NET_ADMIN))
509 return hci_sock_blacklist_add(hdev, (void __user *) arg);
512 if (!capable(CAP_NET_ADMIN))
514 return hci_sock_blacklist_del(hdev, (void __user *) arg);
518 return hdev->ioctl(hdev, cmd, arg);
523 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
526 struct sock *sk = sock->sk;
527 void __user *argp = (void __user *) arg;
530 BT_DBG("cmd %x arg %lx", cmd, arg);
534 return hci_get_dev_list(argp);
537 return hci_get_dev_info(argp);
540 return hci_get_conn_list(argp);
543 if (!capable(CAP_NET_ADMIN))
545 return hci_dev_open(arg);
548 if (!capable(CAP_NET_ADMIN))
550 return hci_dev_close(arg);
553 if (!capable(CAP_NET_ADMIN))
555 return hci_dev_reset(arg);
558 if (!capable(CAP_NET_ADMIN))
560 return hci_dev_reset_stat(arg);
570 if (!capable(CAP_NET_ADMIN))
572 return hci_dev_cmd(cmd, argp);
575 return hci_inquiry(argp);
579 err = hci_sock_bound_ioctl(sk, cmd, arg);
585 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
588 struct sockaddr_hci haddr;
589 struct sock *sk = sock->sk;
590 struct hci_dev *hdev = NULL;
593 BT_DBG("sock %p sk %p", sock, sk);
598 memset(&haddr, 0, sizeof(haddr));
599 len = min_t(unsigned int, sizeof(haddr), addr_len);
600 memcpy(&haddr, addr, len);
602 if (haddr.hci_family != AF_BLUETOOTH)
607 if (sk->sk_state == BT_BOUND) {
612 switch (haddr.hci_channel) {
613 case HCI_CHANNEL_RAW:
614 if (hci_pi(sk)->hdev) {
619 if (haddr.hci_dev != HCI_DEV_NONE) {
620 hdev = hci_dev_get(haddr.hci_dev);
626 atomic_inc(&hdev->promisc);
629 hci_pi(sk)->hdev = hdev;
632 case HCI_CHANNEL_CONTROL:
633 if (haddr.hci_dev != HCI_DEV_NONE) {
638 if (!capable(CAP_NET_ADMIN)) {
645 case HCI_CHANNEL_MONITOR:
646 if (haddr.hci_dev != HCI_DEV_NONE) {
651 if (!capable(CAP_NET_RAW)) {
656 send_monitor_replay(sk);
658 atomic_inc(&monitor_promisc);
667 hci_pi(sk)->channel = haddr.hci_channel;
668 sk->sk_state = BT_BOUND;
675 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
676 int *addr_len, int peer)
678 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
679 struct sock *sk = sock->sk;
680 struct hci_dev *hdev = hci_pi(sk)->hdev;
682 BT_DBG("sock %p sk %p", sock, sk);
689 *addr_len = sizeof(*haddr);
690 haddr->hci_family = AF_BLUETOOTH;
691 haddr->hci_dev = hdev->id;
692 haddr->hci_channel= 0;
698 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
701 __u32 mask = hci_pi(sk)->cmsg_mask;
703 if (mask & HCI_CMSG_DIR) {
704 int incoming = bt_cb(skb)->incoming;
705 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
709 if (mask & HCI_CMSG_TSTAMP) {
711 struct compat_timeval ctv;
717 skb_get_timestamp(skb, &tv);
722 if (!COMPAT_USE_64BIT_TIME &&
723 (msg->msg_flags & MSG_CMSG_COMPAT)) {
724 ctv.tv_sec = tv.tv_sec;
725 ctv.tv_usec = tv.tv_usec;
731 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
735 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
736 struct msghdr *msg, size_t len, int flags)
738 int noblock = flags & MSG_DONTWAIT;
739 struct sock *sk = sock->sk;
743 BT_DBG("sock %p, sk %p", sock, sk);
745 if (flags & (MSG_OOB))
748 if (sk->sk_state == BT_CLOSED)
751 skb = skb_recv_datagram(sk, flags, noblock, &err);
755 msg->msg_namelen = 0;
759 msg->msg_flags |= MSG_TRUNC;
763 skb_reset_transport_header(skb);
764 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
766 switch (hci_pi(sk)->channel) {
767 case HCI_CHANNEL_RAW:
768 hci_sock_cmsg(sk, msg, skb);
770 case HCI_CHANNEL_CONTROL:
771 case HCI_CHANNEL_MONITOR:
772 sock_recv_timestamp(msg, sk, skb);
776 skb_free_datagram(sk, skb);
778 return err ? : copied;
781 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
782 struct msghdr *msg, size_t len)
784 struct sock *sk = sock->sk;
785 struct hci_dev *hdev;
789 BT_DBG("sock %p sk %p", sock, sk);
791 if (msg->msg_flags & MSG_OOB)
794 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
797 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
802 switch (hci_pi(sk)->channel) {
803 case HCI_CHANNEL_RAW:
805 case HCI_CHANNEL_CONTROL:
806 err = mgmt_control(sk, msg, len);
808 case HCI_CHANNEL_MONITOR:
816 hdev = hci_pi(sk)->hdev;
822 if (!test_bit(HCI_UP, &hdev->flags)) {
827 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
831 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
836 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
838 skb->dev = (void *) hdev;
840 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
841 u16 opcode = get_unaligned_le16(skb->data);
842 u16 ogf = hci_opcode_ogf(opcode);
843 u16 ocf = hci_opcode_ocf(opcode);
845 if (((ogf > HCI_SFLT_MAX_OGF) ||
846 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
847 &hci_sec_filter.ocf_mask[ogf])) &&
848 !capable(CAP_NET_RAW)) {
853 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
854 skb_queue_tail(&hdev->raw_q, skb);
855 queue_work(hdev->workqueue, &hdev->tx_work);
857 /* Stand-alone HCI commands must be flaged as
858 * single-command requests.
860 bt_cb(skb)->req.start = true;
862 skb_queue_tail(&hdev->cmd_q, skb);
863 queue_work(hdev->workqueue, &hdev->cmd_work);
866 if (!capable(CAP_NET_RAW)) {
871 skb_queue_tail(&hdev->raw_q, skb);
872 queue_work(hdev->workqueue, &hdev->tx_work);
886 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
887 char __user *optval, unsigned int len)
889 struct hci_ufilter uf = { .opcode = 0 };
890 struct sock *sk = sock->sk;
891 int err = 0, opt = 0;
893 BT_DBG("sk %p, opt %d", sk, optname);
897 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
904 if (get_user(opt, (int __user *)optval)) {
910 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
912 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
916 if (get_user(opt, (int __user *)optval)) {
922 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
924 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
929 struct hci_filter *f = &hci_pi(sk)->filter;
931 uf.type_mask = f->type_mask;
932 uf.opcode = f->opcode;
933 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
934 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
937 len = min_t(unsigned int, len, sizeof(uf));
938 if (copy_from_user(&uf, optval, len)) {
943 if (!capable(CAP_NET_RAW)) {
944 uf.type_mask &= hci_sec_filter.type_mask;
945 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
946 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
950 struct hci_filter *f = &hci_pi(sk)->filter;
952 f->type_mask = uf.type_mask;
953 f->opcode = uf.opcode;
954 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
955 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
969 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
970 char __user *optval, int __user *optlen)
972 struct hci_ufilter uf;
973 struct sock *sk = sock->sk;
974 int len, opt, err = 0;
976 BT_DBG("sk %p, opt %d", sk, optname);
978 if (get_user(len, optlen))
983 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
990 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
995 if (put_user(opt, optval))
1000 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1005 if (put_user(opt, optval))
1011 struct hci_filter *f = &hci_pi(sk)->filter;
1013 memset(&uf, 0, sizeof(uf));
1014 uf.type_mask = f->type_mask;
1015 uf.opcode = f->opcode;
1016 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1017 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1020 len = min_t(unsigned int, len, sizeof(uf));
1021 if (copy_to_user(optval, &uf, len))
1035 static const struct proto_ops hci_sock_ops = {
1036 .family = PF_BLUETOOTH,
1037 .owner = THIS_MODULE,
1038 .release = hci_sock_release,
1039 .bind = hci_sock_bind,
1040 .getname = hci_sock_getname,
1041 .sendmsg = hci_sock_sendmsg,
1042 .recvmsg = hci_sock_recvmsg,
1043 .ioctl = hci_sock_ioctl,
1044 .poll = datagram_poll,
1045 .listen = sock_no_listen,
1046 .shutdown = sock_no_shutdown,
1047 .setsockopt = hci_sock_setsockopt,
1048 .getsockopt = hci_sock_getsockopt,
1049 .connect = sock_no_connect,
1050 .socketpair = sock_no_socketpair,
1051 .accept = sock_no_accept,
1052 .mmap = sock_no_mmap
1055 static struct proto hci_sk_proto = {
1057 .owner = THIS_MODULE,
1058 .obj_size = sizeof(struct hci_pinfo)
1061 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1066 BT_DBG("sock %p", sock);
1068 if (sock->type != SOCK_RAW)
1069 return -ESOCKTNOSUPPORT;
1071 sock->ops = &hci_sock_ops;
1073 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1077 sock_init_data(sock, sk);
1079 sock_reset_flag(sk, SOCK_ZAPPED);
1081 sk->sk_protocol = protocol;
1083 sock->state = SS_UNCONNECTED;
1084 sk->sk_state = BT_OPEN;
1086 bt_sock_link(&hci_sk_list, sk);
1090 static const struct net_proto_family hci_sock_family_ops = {
1091 .family = PF_BLUETOOTH,
1092 .owner = THIS_MODULE,
1093 .create = hci_sock_create,
1096 int __init hci_sock_init(void)
1100 err = proto_register(&hci_sk_proto, 0);
1104 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1106 BT_ERR("HCI socket registration failed");
1110 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1112 BT_ERR("Failed to create HCI proc file");
1113 bt_sock_unregister(BTPROTO_HCI);
1117 BT_INFO("HCI socket layer initialized");
1122 proto_unregister(&hci_sk_proto);
1126 void hci_sock_cleanup(void)
1128 bt_procfs_cleanup(&init_net, "hci");
1129 bt_sock_unregister(BTPROTO_HCI);
1130 proto_unregister(&hci_sk_proto);