2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
34 static atomic_t monitor_promisc = ATOMIC_INIT(0);
36 /* ----- HCI socket interface ----- */
38 static inline int hci_test_bit(int nr, void *addr)
40 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
44 static struct hci_sec_filter hci_sec_filter = {
48 { 0x1000d9fe, 0x0000b00c },
53 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
55 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
57 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
59 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
60 /* OGF_STATUS_PARAM */
61 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
65 static struct bt_sock_list hci_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
69 /* Send frame to RAW socket */
70 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
73 struct hlist_node *node;
74 struct sk_buff *skb_copy = NULL;
76 BT_DBG("hdev %p len %d", hdev, skb->len);
78 read_lock(&hci_sk_list.lock);
80 sk_for_each(sk, node, &hci_sk_list.head) {
81 struct hci_filter *flt;
84 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
87 /* Don't send frame to the socket it came from */
91 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
95 flt = &hci_pi(sk)->filter;
97 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
98 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS),
102 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
103 int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
105 if (!hci_test_bit(evt, &flt->event_mask))
109 ((evt == HCI_EV_CMD_COMPLETE &&
111 get_unaligned((__le16 *)(skb->data + 3))) ||
112 (evt == HCI_EV_CMD_STATUS &&
114 get_unaligned((__le16 *)(skb->data + 4)))))
119 /* Create a private copy with headroom */
120 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
124 /* Put type byte before the data */
125 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
128 nskb = skb_clone(skb_copy, GFP_ATOMIC);
132 if (sock_queue_rcv_skb(sk, nskb))
136 read_unlock(&hci_sk_list.lock);
141 /* Send frame to control socket */
142 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
145 struct hlist_node *node;
147 BT_DBG("len %d", skb->len);
149 read_lock(&hci_sk_list.lock);
151 sk_for_each(sk, node, &hci_sk_list.head) {
152 struct sk_buff *nskb;
154 /* Skip the original socket */
158 if (sk->sk_state != BT_BOUND)
161 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
164 nskb = skb_clone(skb, GFP_ATOMIC);
168 if (sock_queue_rcv_skb(sk, nskb))
172 read_unlock(&hci_sk_list.lock);
175 /* Send frame to monitor socket */
176 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
179 struct hlist_node *node;
180 struct sk_buff *skb_copy = NULL;
183 if (!atomic_read(&monitor_promisc))
186 BT_DBG("hdev %p len %d", hdev, skb->len);
188 switch (bt_cb(skb)->pkt_type) {
189 case HCI_COMMAND_PKT:
190 opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
193 opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
195 case HCI_ACLDATA_PKT:
196 if (bt_cb(skb)->incoming)
197 opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
199 opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
201 case HCI_SCODATA_PKT:
202 if (bt_cb(skb)->incoming)
203 opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
205 opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
211 read_lock(&hci_sk_list.lock);
213 sk_for_each(sk, node, &hci_sk_list.head) {
214 struct sk_buff *nskb;
216 if (sk->sk_state != BT_BOUND)
219 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
223 struct hci_mon_hdr *hdr;
225 /* Create a private copy with headroom */
226 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
231 /* Put header before the data */
232 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
233 hdr->opcode = opcode;
234 hdr->index = cpu_to_le16(hdev->id);
235 hdr->len = cpu_to_le16(skb->len);
238 nskb = skb_clone(skb_copy, GFP_ATOMIC);
242 if (sock_queue_rcv_skb(sk, nskb))
246 read_unlock(&hci_sk_list.lock);
251 static void send_monitor_event(struct sk_buff *skb)
254 struct hlist_node *node;
256 BT_DBG("len %d", skb->len);
258 read_lock(&hci_sk_list.lock);
260 sk_for_each(sk, node, &hci_sk_list.head) {
261 struct sk_buff *nskb;
263 if (sk->sk_state != BT_BOUND)
266 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
269 nskb = skb_clone(skb, GFP_ATOMIC);
273 if (sock_queue_rcv_skb(sk, nskb))
277 read_unlock(&hci_sk_list.lock);
280 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
282 struct hci_mon_hdr *hdr;
283 struct hci_mon_new_index *ni;
289 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
293 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
294 ni->type = hdev->dev_type;
296 bacpy(&ni->bdaddr, &hdev->bdaddr);
297 memcpy(ni->name, hdev->name, 8);
299 opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
303 skb = bt_skb_alloc(0, GFP_ATOMIC);
307 opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
314 __net_timestamp(skb);
316 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
317 hdr->opcode = opcode;
318 hdr->index = cpu_to_le16(hdev->id);
319 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
324 static void send_monitor_replay(struct sock *sk)
326 struct hci_dev *hdev;
328 read_lock(&hci_dev_list_lock);
330 list_for_each_entry(hdev, &hci_dev_list, list) {
333 skb = create_monitor_event(hdev, HCI_DEV_REG);
337 if (sock_queue_rcv_skb(sk, skb))
341 read_unlock(&hci_dev_list_lock);
344 /* Generate internal stack event */
345 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
347 struct hci_event_hdr *hdr;
348 struct hci_ev_stack_internal *ev;
351 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
355 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
356 hdr->evt = HCI_EV_STACK_INTERNAL;
357 hdr->plen = sizeof(*ev) + dlen;
359 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
361 memcpy(ev->data, data, dlen);
363 bt_cb(skb)->incoming = 1;
364 __net_timestamp(skb);
366 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
367 skb->dev = (void *) hdev;
368 hci_send_to_sock(hdev, skb);
372 void hci_sock_dev_event(struct hci_dev *hdev, int event)
374 struct hci_ev_si_device ev;
376 BT_DBG("hdev %s event %d", hdev->name, event);
378 /* Send event to monitor */
379 if (atomic_read(&monitor_promisc)) {
382 skb = create_monitor_event(hdev, event);
384 send_monitor_event(skb);
389 /* Send event to sockets */
391 ev.dev_id = hdev->id;
392 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
394 if (event == HCI_DEV_UNREG) {
396 struct hlist_node *node;
398 /* Detach sockets from device */
399 read_lock(&hci_sk_list.lock);
400 sk_for_each(sk, node, &hci_sk_list.head) {
401 bh_lock_sock_nested(sk);
402 if (hci_pi(sk)->hdev == hdev) {
403 hci_pi(sk)->hdev = NULL;
405 sk->sk_state = BT_OPEN;
406 sk->sk_state_change(sk);
412 read_unlock(&hci_sk_list.lock);
416 static int hci_sock_release(struct socket *sock)
418 struct sock *sk = sock->sk;
419 struct hci_dev *hdev;
421 BT_DBG("sock %p sk %p", sock, sk);
426 hdev = hci_pi(sk)->hdev;
428 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
429 atomic_dec(&monitor_promisc);
431 bt_sock_unlink(&hci_sk_list, sk);
434 atomic_dec(&hdev->promisc);
440 skb_queue_purge(&sk->sk_receive_queue);
441 skb_queue_purge(&sk->sk_write_queue);
447 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
452 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
457 err = hci_blacklist_add(hdev, &bdaddr, 0);
459 hci_dev_unlock(hdev);
464 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
469 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
474 err = hci_blacklist_del(hdev, &bdaddr, 0);
476 hci_dev_unlock(hdev);
481 /* Ioctls that require bound socket */
482 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
485 struct hci_dev *hdev = hci_pi(sk)->hdev;
492 if (!capable(CAP_NET_ADMIN))
495 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
499 set_bit(HCI_RAW, &hdev->flags);
501 clear_bit(HCI_RAW, &hdev->flags);
506 return hci_get_conn_info(hdev, (void __user *) arg);
509 return hci_get_auth_info(hdev, (void __user *) arg);
512 if (!capable(CAP_NET_ADMIN))
514 return hci_sock_blacklist_add(hdev, (void __user *) arg);
517 if (!capable(CAP_NET_ADMIN))
519 return hci_sock_blacklist_del(hdev, (void __user *) arg);
523 return hdev->ioctl(hdev, cmd, arg);
528 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
531 struct sock *sk = sock->sk;
532 void __user *argp = (void __user *) arg;
535 BT_DBG("cmd %x arg %lx", cmd, arg);
539 return hci_get_dev_list(argp);
542 return hci_get_dev_info(argp);
545 return hci_get_conn_list(argp);
548 if (!capable(CAP_NET_ADMIN))
550 return hci_dev_open(arg);
553 if (!capable(CAP_NET_ADMIN))
555 return hci_dev_close(arg);
558 if (!capable(CAP_NET_ADMIN))
560 return hci_dev_reset(arg);
563 if (!capable(CAP_NET_ADMIN))
565 return hci_dev_reset_stat(arg);
575 if (!capable(CAP_NET_ADMIN))
577 return hci_dev_cmd(cmd, argp);
580 return hci_inquiry(argp);
584 err = hci_sock_bound_ioctl(sk, cmd, arg);
590 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
593 struct sockaddr_hci haddr;
594 struct sock *sk = sock->sk;
595 struct hci_dev *hdev = NULL;
598 BT_DBG("sock %p sk %p", sock, sk);
603 memset(&haddr, 0, sizeof(haddr));
604 len = min_t(unsigned int, sizeof(haddr), addr_len);
605 memcpy(&haddr, addr, len);
607 if (haddr.hci_family != AF_BLUETOOTH)
612 if (sk->sk_state == BT_BOUND) {
617 switch (haddr.hci_channel) {
618 case HCI_CHANNEL_RAW:
619 if (hci_pi(sk)->hdev) {
624 if (haddr.hci_dev != HCI_DEV_NONE) {
625 hdev = hci_dev_get(haddr.hci_dev);
631 atomic_inc(&hdev->promisc);
634 hci_pi(sk)->hdev = hdev;
637 case HCI_CHANNEL_CONTROL:
638 if (haddr.hci_dev != HCI_DEV_NONE) {
643 if (!capable(CAP_NET_ADMIN)) {
650 case HCI_CHANNEL_MONITOR:
651 if (haddr.hci_dev != HCI_DEV_NONE) {
656 if (!capable(CAP_NET_RAW)) {
661 send_monitor_replay(sk);
663 atomic_inc(&monitor_promisc);
672 hci_pi(sk)->channel = haddr.hci_channel;
673 sk->sk_state = BT_BOUND;
680 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
681 int *addr_len, int peer)
683 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
684 struct sock *sk = sock->sk;
685 struct hci_dev *hdev = hci_pi(sk)->hdev;
687 BT_DBG("sock %p sk %p", sock, sk);
694 *addr_len = sizeof(*haddr);
695 haddr->hci_family = AF_BLUETOOTH;
696 haddr->hci_dev = hdev->id;
702 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
705 __u32 mask = hci_pi(sk)->cmsg_mask;
707 if (mask & HCI_CMSG_DIR) {
708 int incoming = bt_cb(skb)->incoming;
709 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
713 if (mask & HCI_CMSG_TSTAMP) {
715 struct compat_timeval ctv;
721 skb_get_timestamp(skb, &tv);
726 if (!COMPAT_USE_64BIT_TIME &&
727 (msg->msg_flags & MSG_CMSG_COMPAT)) {
728 ctv.tv_sec = tv.tv_sec;
729 ctv.tv_usec = tv.tv_usec;
735 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
739 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
740 struct msghdr *msg, size_t len, int flags)
742 int noblock = flags & MSG_DONTWAIT;
743 struct sock *sk = sock->sk;
747 BT_DBG("sock %p, sk %p", sock, sk);
749 if (flags & (MSG_OOB))
752 if (sk->sk_state == BT_CLOSED)
755 skb = skb_recv_datagram(sk, flags, noblock, &err);
759 msg->msg_namelen = 0;
763 msg->msg_flags |= MSG_TRUNC;
767 skb_reset_transport_header(skb);
768 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
770 switch (hci_pi(sk)->channel) {
771 case HCI_CHANNEL_RAW:
772 hci_sock_cmsg(sk, msg, skb);
774 case HCI_CHANNEL_CONTROL:
775 case HCI_CHANNEL_MONITOR:
776 sock_recv_timestamp(msg, sk, skb);
780 skb_free_datagram(sk, skb);
782 return err ? : copied;
785 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
786 struct msghdr *msg, size_t len)
788 struct sock *sk = sock->sk;
789 struct hci_dev *hdev;
793 BT_DBG("sock %p sk %p", sock, sk);
795 if (msg->msg_flags & MSG_OOB)
798 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
801 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
806 switch (hci_pi(sk)->channel) {
807 case HCI_CHANNEL_RAW:
809 case HCI_CHANNEL_CONTROL:
810 err = mgmt_control(sk, msg, len);
812 case HCI_CHANNEL_MONITOR:
820 hdev = hci_pi(sk)->hdev;
826 if (!test_bit(HCI_UP, &hdev->flags)) {
831 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
835 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
840 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
842 skb->dev = (void *) hdev;
844 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
845 u16 opcode = get_unaligned_le16(skb->data);
846 u16 ogf = hci_opcode_ogf(opcode);
847 u16 ocf = hci_opcode_ocf(opcode);
849 if (((ogf > HCI_SFLT_MAX_OGF) ||
850 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
851 &hci_sec_filter.ocf_mask[ogf])) &&
852 !capable(CAP_NET_RAW)) {
857 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
858 skb_queue_tail(&hdev->raw_q, skb);
859 queue_work(hdev->workqueue, &hdev->tx_work);
861 skb_queue_tail(&hdev->cmd_q, skb);
862 queue_work(hdev->workqueue, &hdev->cmd_work);
865 if (!capable(CAP_NET_RAW)) {
870 skb_queue_tail(&hdev->raw_q, skb);
871 queue_work(hdev->workqueue, &hdev->tx_work);
885 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
886 char __user *optval, unsigned int len)
888 struct hci_ufilter uf = { .opcode = 0 };
889 struct sock *sk = sock->sk;
890 int err = 0, opt = 0;
892 BT_DBG("sk %p, opt %d", sk, optname);
896 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
903 if (get_user(opt, (int __user *)optval)) {
909 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
911 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
915 if (get_user(opt, (int __user *)optval)) {
921 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
923 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
928 struct hci_filter *f = &hci_pi(sk)->filter;
930 uf.type_mask = f->type_mask;
931 uf.opcode = f->opcode;
932 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
933 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
936 len = min_t(unsigned int, len, sizeof(uf));
937 if (copy_from_user(&uf, optval, len)) {
942 if (!capable(CAP_NET_RAW)) {
943 uf.type_mask &= hci_sec_filter.type_mask;
944 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
945 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
949 struct hci_filter *f = &hci_pi(sk)->filter;
951 f->type_mask = uf.type_mask;
952 f->opcode = uf.opcode;
953 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
954 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
968 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
969 char __user *optval, int __user *optlen)
971 struct hci_ufilter uf;
972 struct sock *sk = sock->sk;
973 int len, opt, err = 0;
975 BT_DBG("sk %p, opt %d", sk, optname);
977 if (get_user(len, optlen))
982 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
989 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
994 if (put_user(opt, optval))
999 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1004 if (put_user(opt, optval))
1010 struct hci_filter *f = &hci_pi(sk)->filter;
1012 uf.type_mask = f->type_mask;
1013 uf.opcode = f->opcode;
1014 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1015 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1018 len = min_t(unsigned int, len, sizeof(uf));
1019 if (copy_to_user(optval, &uf, len))
1033 static const struct proto_ops hci_sock_ops = {
1034 .family = PF_BLUETOOTH,
1035 .owner = THIS_MODULE,
1036 .release = hci_sock_release,
1037 .bind = hci_sock_bind,
1038 .getname = hci_sock_getname,
1039 .sendmsg = hci_sock_sendmsg,
1040 .recvmsg = hci_sock_recvmsg,
1041 .ioctl = hci_sock_ioctl,
1042 .poll = datagram_poll,
1043 .listen = sock_no_listen,
1044 .shutdown = sock_no_shutdown,
1045 .setsockopt = hci_sock_setsockopt,
1046 .getsockopt = hci_sock_getsockopt,
1047 .connect = sock_no_connect,
1048 .socketpair = sock_no_socketpair,
1049 .accept = sock_no_accept,
1050 .mmap = sock_no_mmap
1053 static struct proto hci_sk_proto = {
1055 .owner = THIS_MODULE,
1056 .obj_size = sizeof(struct hci_pinfo)
1059 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1064 BT_DBG("sock %p", sock);
1066 if (sock->type != SOCK_RAW)
1067 return -ESOCKTNOSUPPORT;
1069 sock->ops = &hci_sock_ops;
1071 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1075 sock_init_data(sock, sk);
1077 sock_reset_flag(sk, SOCK_ZAPPED);
1079 sk->sk_protocol = protocol;
1081 sock->state = SS_UNCONNECTED;
1082 sk->sk_state = BT_OPEN;
1084 bt_sock_link(&hci_sk_list, sk);
1088 static const struct net_proto_family hci_sock_family_ops = {
1089 .family = PF_BLUETOOTH,
1090 .owner = THIS_MODULE,
1091 .create = hci_sock_create,
1094 int __init hci_sock_init(void)
1098 err = proto_register(&hci_sk_proto, 0);
1102 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1106 BT_INFO("HCI socket layer initialized");
1111 BT_ERR("HCI socket registration failed");
1112 proto_unregister(&hci_sk_proto);
1116 void hci_sock_cleanup(void)
1118 if (bt_sock_unregister(BTPROTO_HCI) < 0)
1119 BT_ERR("HCI socket unregistration failed");
1121 proto_unregister(&hci_sk_proto);