2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
34 static atomic_t monitor_promisc = ATOMIC_INIT(0);
36 /* ----- HCI socket interface ----- */
39 #define hci_pi(sk) ((struct hci_pinfo *) sk)
44 struct hci_filter filter;
46 unsigned short channel;
49 static inline int hci_test_bit(int nr, void *addr)
51 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
55 #define HCI_SFLT_MAX_OGF 5
57 struct hci_sec_filter {
60 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
63 static const struct hci_sec_filter hci_sec_filter = {
67 { 0x1000d9fe, 0x0000b00c },
72 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
74 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
76 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
78 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
79 /* OGF_STATUS_PARAM */
80 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
84 static struct bt_sock_list hci_sk_list = {
85 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
88 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
90 struct hci_filter *flt;
91 int flt_type, flt_event;
94 flt = &hci_pi(sk)->filter;
96 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
99 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
101 if (!test_bit(flt_type, &flt->type_mask))
104 /* Extra filter for event packets only */
105 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
108 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
110 if (!hci_test_bit(flt_event, &flt->event_mask))
113 /* Check filter only when opcode is set */
117 if (flt_event == HCI_EV_CMD_COMPLETE &&
118 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
121 if (flt_event == HCI_EV_CMD_STATUS &&
122 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
128 /* Send frame to RAW socket */
129 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
132 struct sk_buff *skb_copy = NULL;
134 BT_DBG("hdev %p len %d", hdev, skb->len);
136 read_lock(&hci_sk_list.lock);
138 sk_for_each(sk, &hci_sk_list.head) {
139 struct sk_buff *nskb;
141 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
144 /* Don't send frame to the socket it came from */
148 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
149 if (is_filtered_packet(sk, skb))
151 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
152 if (!bt_cb(skb)->incoming)
154 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
155 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
156 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
159 /* Don't send frame to other channel types */
164 /* Create a private copy with headroom */
165 #ifdef CONFIG_TIZEN_WIP
166 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
168 /*__pskb_copy_fclone is defined in latest kernel.
169 * if kernel is migrated to to latest, below code should be enabled
171 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
176 /* Put type byte before the data */
177 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
180 nskb = skb_clone(skb_copy, GFP_ATOMIC);
184 if (sock_queue_rcv_skb(sk, nskb))
188 read_unlock(&hci_sk_list.lock);
193 /* Send frame to control socket */
194 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
198 BT_DBG("len %d", skb->len);
200 read_lock(&hci_sk_list.lock);
202 sk_for_each(sk, &hci_sk_list.head) {
203 struct sk_buff *nskb;
205 /* Skip the original socket */
209 if (sk->sk_state != BT_BOUND)
212 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
215 nskb = skb_clone(skb, GFP_ATOMIC);
219 if (sock_queue_rcv_skb(sk, nskb))
223 read_unlock(&hci_sk_list.lock);
226 static void queue_monitor_skb(struct sk_buff *skb)
230 BT_DBG("len %d", skb->len);
232 read_lock(&hci_sk_list.lock);
234 sk_for_each(sk, &hci_sk_list.head) {
235 struct sk_buff *nskb;
237 if (sk->sk_state != BT_BOUND)
240 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
243 nskb = skb_clone(skb, GFP_ATOMIC);
247 if (sock_queue_rcv_skb(sk, nskb))
251 read_unlock(&hci_sk_list.lock);
254 /* Send frame to monitor socket */
255 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
257 struct sk_buff *skb_copy = NULL;
258 struct hci_mon_hdr *hdr;
261 if (!atomic_read(&monitor_promisc))
264 BT_DBG("hdev %p len %d", hdev, skb->len);
266 switch (bt_cb(skb)->pkt_type) {
267 case HCI_COMMAND_PKT:
268 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
271 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
273 case HCI_ACLDATA_PKT:
274 if (bt_cb(skb)->incoming)
275 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
277 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
279 case HCI_SCODATA_PKT:
280 if (bt_cb(skb)->incoming)
281 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
283 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
290 /* Create a private copy with headroom */
291 #ifdef CONFIG_TIZEN_WIP
292 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC);
294 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
299 /* Put header before the data */
300 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
301 hdr->opcode = opcode;
302 hdr->index = cpu_to_le16(hdev->id);
303 hdr->len = cpu_to_le16(skb->len);
305 queue_monitor_skb(skb_copy);
309 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
311 struct hci_mon_hdr *hdr;
312 struct hci_mon_new_index *ni;
318 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
322 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
323 ni->type = hdev->dev_type;
325 bacpy(&ni->bdaddr, &hdev->bdaddr);
326 memcpy(ni->name, hdev->name, 8);
328 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
332 skb = bt_skb_alloc(0, GFP_ATOMIC);
336 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
343 __net_timestamp(skb);
345 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
346 hdr->opcode = opcode;
347 hdr->index = cpu_to_le16(hdev->id);
348 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
353 static void send_monitor_replay(struct sock *sk)
355 struct hci_dev *hdev;
357 read_lock(&hci_dev_list_lock);
359 list_for_each_entry(hdev, &hci_dev_list, list) {
362 skb = create_monitor_event(hdev, HCI_DEV_REG);
366 if (sock_queue_rcv_skb(sk, skb))
370 read_unlock(&hci_dev_list_lock);
373 /* Generate internal stack event */
374 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
376 struct hci_event_hdr *hdr;
377 struct hci_ev_stack_internal *ev;
380 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
384 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
385 hdr->evt = HCI_EV_STACK_INTERNAL;
386 hdr->plen = sizeof(*ev) + dlen;
388 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
390 memcpy(ev->data, data, dlen);
392 bt_cb(skb)->incoming = 1;
393 __net_timestamp(skb);
395 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
396 hci_send_to_sock(hdev, skb);
400 void hci_sock_dev_event(struct hci_dev *hdev, int event)
402 struct hci_ev_si_device ev;
404 BT_DBG("hdev %s event %d", hdev->name, event);
406 /* Send event to monitor */
407 if (atomic_read(&monitor_promisc)) {
410 skb = create_monitor_event(hdev, event);
412 queue_monitor_skb(skb);
417 /* Send event to sockets */
419 ev.dev_id = hdev->id;
420 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
422 if (event == HCI_DEV_UNREG) {
425 /* Detach sockets from device */
426 read_lock(&hci_sk_list.lock);
427 sk_for_each(sk, &hci_sk_list.head) {
428 bh_lock_sock_nested(sk);
429 if (hci_pi(sk)->hdev == hdev) {
430 hci_pi(sk)->hdev = NULL;
432 sk->sk_state = BT_OPEN;
433 sk->sk_state_change(sk);
439 read_unlock(&hci_sk_list.lock);
443 static int hci_sock_release(struct socket *sock)
445 struct sock *sk = sock->sk;
446 struct hci_dev *hdev;
448 BT_DBG("sock %p sk %p", sock, sk);
453 hdev = hci_pi(sk)->hdev;
455 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
456 atomic_dec(&monitor_promisc);
458 bt_sock_unlink(&hci_sk_list, sk);
461 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
462 mgmt_index_added(hdev);
463 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
464 hci_dev_close(hdev->id);
467 atomic_dec(&hdev->promisc);
473 skb_queue_purge(&sk->sk_receive_queue);
474 skb_queue_purge(&sk->sk_write_queue);
480 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
485 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
490 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
492 hci_dev_unlock(hdev);
497 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
502 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
507 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
509 hci_dev_unlock(hdev);
514 /* Ioctls that require bound socket */
515 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
518 struct hci_dev *hdev = hci_pi(sk)->hdev;
523 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
526 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
529 if (hdev->dev_type != HCI_BREDR)
534 if (!capable(CAP_NET_ADMIN))
539 return hci_get_conn_info(hdev, (void __user *) arg);
542 return hci_get_auth_info(hdev, (void __user *) arg);
545 if (!capable(CAP_NET_ADMIN))
547 return hci_sock_blacklist_add(hdev, (void __user *) arg);
550 if (!capable(CAP_NET_ADMIN))
552 return hci_sock_blacklist_del(hdev, (void __user *) arg);
558 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
561 void __user *argp = (void __user *) arg;
562 struct sock *sk = sock->sk;
565 BT_DBG("cmd %x arg %lx", cmd, arg);
569 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
578 return hci_get_dev_list(argp);
581 return hci_get_dev_info(argp);
584 return hci_get_conn_list(argp);
587 if (!capable(CAP_NET_ADMIN))
589 return hci_dev_open(arg);
592 if (!capable(CAP_NET_ADMIN))
594 return hci_dev_close(arg);
597 if (!capable(CAP_NET_ADMIN))
599 return hci_dev_reset(arg);
602 if (!capable(CAP_NET_ADMIN))
604 return hci_dev_reset_stat(arg);
614 if (!capable(CAP_NET_ADMIN))
616 return hci_dev_cmd(cmd, argp);
619 return hci_inquiry(argp);
624 err = hci_sock_bound_ioctl(sk, cmd, arg);
631 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
634 struct sockaddr_hci haddr;
635 struct sock *sk = sock->sk;
636 struct hci_dev *hdev = NULL;
639 BT_DBG("sock %p sk %p", sock, sk);
644 memset(&haddr, 0, sizeof(haddr));
645 len = min_t(unsigned int, sizeof(haddr), addr_len);
646 memcpy(&haddr, addr, len);
648 if (haddr.hci_family != AF_BLUETOOTH)
653 if (sk->sk_state == BT_BOUND) {
658 switch (haddr.hci_channel) {
659 case HCI_CHANNEL_RAW:
660 if (hci_pi(sk)->hdev) {
665 if (haddr.hci_dev != HCI_DEV_NONE) {
666 hdev = hci_dev_get(haddr.hci_dev);
672 atomic_inc(&hdev->promisc);
675 hci_pi(sk)->hdev = hdev;
678 case HCI_CHANNEL_USER:
679 if (hci_pi(sk)->hdev) {
684 if (haddr.hci_dev == HCI_DEV_NONE) {
689 if (!capable(CAP_NET_ADMIN)) {
694 hdev = hci_dev_get(haddr.hci_dev);
700 if (test_bit(HCI_UP, &hdev->flags) ||
701 test_bit(HCI_INIT, &hdev->flags) ||
702 test_bit(HCI_SETUP, &hdev->dev_flags) ||
703 test_bit(HCI_CONFIG, &hdev->dev_flags)) {
709 if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
715 mgmt_index_removed(hdev);
717 err = hci_dev_open(hdev->id);
719 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
720 mgmt_index_added(hdev);
725 atomic_inc(&hdev->promisc);
727 hci_pi(sk)->hdev = hdev;
730 case HCI_CHANNEL_CONTROL:
731 if (haddr.hci_dev != HCI_DEV_NONE) {
736 if (!capable(CAP_NET_ADMIN)) {
743 case HCI_CHANNEL_MONITOR:
744 if (haddr.hci_dev != HCI_DEV_NONE) {
749 if (!capable(CAP_NET_RAW)) {
754 send_monitor_replay(sk);
756 atomic_inc(&monitor_promisc);
765 hci_pi(sk)->channel = haddr.hci_channel;
766 sk->sk_state = BT_BOUND;
773 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
774 int *addr_len, int peer)
776 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
777 struct sock *sk = sock->sk;
778 struct hci_dev *hdev;
781 BT_DBG("sock %p sk %p", sock, sk);
788 hdev = hci_pi(sk)->hdev;
794 *addr_len = sizeof(*haddr);
795 haddr->hci_family = AF_BLUETOOTH;
796 haddr->hci_dev = hdev->id;
797 haddr->hci_channel= hci_pi(sk)->channel;
804 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
807 __u32 mask = hci_pi(sk)->cmsg_mask;
809 if (mask & HCI_CMSG_DIR) {
810 int incoming = bt_cb(skb)->incoming;
811 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
815 if (mask & HCI_CMSG_TSTAMP) {
817 struct compat_timeval ctv;
823 skb_get_timestamp(skb, &tv);
828 if (!COMPAT_USE_64BIT_TIME &&
829 (msg->msg_flags & MSG_CMSG_COMPAT)) {
830 ctv.tv_sec = tv.tv_sec;
831 ctv.tv_usec = tv.tv_usec;
837 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
841 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
842 struct msghdr *msg, size_t len, int flags)
844 int noblock = flags & MSG_DONTWAIT;
845 struct sock *sk = sock->sk;
849 BT_DBG("sock %p, sk %p", sock, sk);
851 if (flags & (MSG_OOB))
854 if (sk->sk_state == BT_CLOSED)
857 skb = skb_recv_datagram(sk, flags, noblock, &err);
863 msg->msg_flags |= MSG_TRUNC;
867 skb_reset_transport_header(skb);
868 #ifdef CONFIG_TIZEN_WIP
869 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
871 err = skb_copy_datagram_msg(skb, 0, msg, copied);
874 switch (hci_pi(sk)->channel) {
875 case HCI_CHANNEL_RAW:
876 hci_sock_cmsg(sk, msg, skb);
878 case HCI_CHANNEL_USER:
879 case HCI_CHANNEL_CONTROL:
880 case HCI_CHANNEL_MONITOR:
881 sock_recv_timestamp(msg, sk, skb);
885 skb_free_datagram(sk, skb);
887 return err ? : copied;
890 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
891 struct msghdr *msg, size_t len)
893 struct sock *sk = sock->sk;
894 struct hci_dev *hdev;
898 BT_DBG("sock %p sk %p", sock, sk);
900 if (msg->msg_flags & MSG_OOB)
903 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
906 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
911 switch (hci_pi(sk)->channel) {
912 case HCI_CHANNEL_RAW:
913 case HCI_CHANNEL_USER:
915 case HCI_CHANNEL_CONTROL:
916 err = mgmt_control(sk, msg, len);
918 case HCI_CHANNEL_MONITOR:
926 hdev = hci_pi(sk)->hdev;
932 if (!test_bit(HCI_UP, &hdev->flags)) {
937 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
940 #ifdef CONFIG_TIZEN_WIP
941 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
943 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
949 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
952 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
953 /* No permission check is needed for user channel
954 * since that gets enforced when binding the socket.
956 * However check that the packet type is valid.
958 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
959 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
960 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
965 skb_queue_tail(&hdev->raw_q, skb);
966 queue_work(hdev->workqueue, &hdev->tx_work);
967 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
968 u16 opcode = get_unaligned_le16(skb->data);
969 u16 ogf = hci_opcode_ogf(opcode);
970 u16 ocf = hci_opcode_ocf(opcode);
972 if (((ogf > HCI_SFLT_MAX_OGF) ||
973 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
974 &hci_sec_filter.ocf_mask[ogf])) &&
975 !capable(CAP_NET_RAW)) {
981 skb_queue_tail(&hdev->raw_q, skb);
982 queue_work(hdev->workqueue, &hdev->tx_work);
984 /* Stand-alone HCI commands must be flagged as
985 * single-command requests.
987 bt_cb(skb)->req.start = true;
989 skb_queue_tail(&hdev->cmd_q, skb);
990 queue_work(hdev->workqueue, &hdev->cmd_work);
993 if (!capable(CAP_NET_RAW)) {
998 skb_queue_tail(&hdev->raw_q, skb);
999 queue_work(hdev->workqueue, &hdev->tx_work);
1013 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1014 char __user *optval, unsigned int len)
1016 struct hci_ufilter uf = { .opcode = 0 };
1017 struct sock *sk = sock->sk;
1018 int err = 0, opt = 0;
1020 BT_DBG("sk %p, opt %d", sk, optname);
1024 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1031 if (get_user(opt, (int __user *)optval)) {
1037 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1039 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1042 case HCI_TIME_STAMP:
1043 if (get_user(opt, (int __user *)optval)) {
1049 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1051 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1056 struct hci_filter *f = &hci_pi(sk)->filter;
1058 uf.type_mask = f->type_mask;
1059 uf.opcode = f->opcode;
1060 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1061 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1064 len = min_t(unsigned int, len, sizeof(uf));
1065 if (copy_from_user(&uf, optval, len)) {
1070 if (!capable(CAP_NET_RAW)) {
1071 uf.type_mask &= hci_sec_filter.type_mask;
1072 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1073 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1077 struct hci_filter *f = &hci_pi(sk)->filter;
1079 f->type_mask = uf.type_mask;
1080 f->opcode = uf.opcode;
1081 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1082 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1096 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1097 char __user *optval, int __user *optlen)
1099 struct hci_ufilter uf;
1100 struct sock *sk = sock->sk;
1101 int len, opt, err = 0;
1103 BT_DBG("sk %p, opt %d", sk, optname);
1105 if (get_user(len, optlen))
1110 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1117 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1122 if (put_user(opt, optval))
1126 case HCI_TIME_STAMP:
1127 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1132 if (put_user(opt, optval))
1138 struct hci_filter *f = &hci_pi(sk)->filter;
1140 memset(&uf, 0, sizeof(uf));
1141 uf.type_mask = f->type_mask;
1142 uf.opcode = f->opcode;
1143 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1144 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1147 len = min_t(unsigned int, len, sizeof(uf));
1148 if (copy_to_user(optval, &uf, len))
1162 static const struct proto_ops hci_sock_ops = {
1163 .family = PF_BLUETOOTH,
1164 .owner = THIS_MODULE,
1165 .release = hci_sock_release,
1166 .bind = hci_sock_bind,
1167 .getname = hci_sock_getname,
1168 .sendmsg = hci_sock_sendmsg,
1169 .recvmsg = hci_sock_recvmsg,
1170 .ioctl = hci_sock_ioctl,
1171 .poll = datagram_poll,
1172 .listen = sock_no_listen,
1173 .shutdown = sock_no_shutdown,
1174 .setsockopt = hci_sock_setsockopt,
1175 .getsockopt = hci_sock_getsockopt,
1176 .connect = sock_no_connect,
1177 .socketpair = sock_no_socketpair,
1178 .accept = sock_no_accept,
1179 .mmap = sock_no_mmap
1182 static struct proto hci_sk_proto = {
1184 .owner = THIS_MODULE,
1185 .obj_size = sizeof(struct hci_pinfo)
1188 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1193 BT_DBG("sock %p", sock);
1195 if (sock->type != SOCK_RAW)
1196 return -ESOCKTNOSUPPORT;
1198 sock->ops = &hci_sock_ops;
1200 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1204 sock_init_data(sock, sk);
1206 sock_reset_flag(sk, SOCK_ZAPPED);
1208 sk->sk_protocol = protocol;
1210 sock->state = SS_UNCONNECTED;
1211 sk->sk_state = BT_OPEN;
1213 bt_sock_link(&hci_sk_list, sk);
1217 static const struct net_proto_family hci_sock_family_ops = {
1218 .family = PF_BLUETOOTH,
1219 .owner = THIS_MODULE,
1220 .create = hci_sock_create,
1223 int __init hci_sock_init(void)
1227 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1229 err = proto_register(&hci_sk_proto, 0);
1233 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1235 BT_ERR("HCI socket registration failed");
1239 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1241 BT_ERR("Failed to create HCI proc file");
1242 bt_sock_unregister(BTPROTO_HCI);
1246 BT_INFO("HCI socket layer initialized");
1251 proto_unregister(&hci_sk_proto);
1255 void hci_sock_cleanup(void)
1257 bt_procfs_cleanup(&init_net, "hci");
1258 bt_sock_unregister(BTPROTO_HCI);
1259 proto_unregister(&hci_sk_proto);