2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
26 #include <linux/compat.h>
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
37 #include <net/bluetooth/mgmt_tizen.h>
40 #include "mgmt_util.h"
42 static LIST_HEAD(mgmt_chan_list);
43 static DEFINE_MUTEX(mgmt_chan_list_lock);
45 static DEFINE_IDA(sock_cookie_ida);
47 static atomic_t monitor_promisc = ATOMIC_INIT(0);
49 /* ----- HCI socket interface ----- */
52 #define hci_pi(sk) ((struct hci_pinfo *) sk)
57 struct hci_filter filter;
59 unsigned short channel;
62 char comm[TASK_COMM_LEN];
65 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
67 struct hci_dev *hdev = hci_pi(sk)->hdev;
70 return ERR_PTR(-EBADFD);
71 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
72 return ERR_PTR(-EPIPE);
76 void hci_sock_set_flag(struct sock *sk, int nr)
78 set_bit(nr, &hci_pi(sk)->flags);
81 void hci_sock_clear_flag(struct sock *sk, int nr)
83 clear_bit(nr, &hci_pi(sk)->flags);
86 int hci_sock_test_flag(struct sock *sk, int nr)
88 return test_bit(nr, &hci_pi(sk)->flags);
91 unsigned short hci_sock_get_channel(struct sock *sk)
93 return hci_pi(sk)->channel;
96 u32 hci_sock_get_cookie(struct sock *sk)
98 return hci_pi(sk)->cookie;
101 static bool hci_sock_gen_cookie(struct sock *sk)
103 int id = hci_pi(sk)->cookie;
106 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
110 hci_pi(sk)->cookie = id;
111 get_task_comm(hci_pi(sk)->comm, current);
118 static void hci_sock_free_cookie(struct sock *sk)
120 int id = hci_pi(sk)->cookie;
123 hci_pi(sk)->cookie = 0xffffffff;
124 ida_simple_remove(&sock_cookie_ida, id);
128 static inline int hci_test_bit(int nr, const void *addr)
130 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
133 /* Security filter */
134 #define HCI_SFLT_MAX_OGF 5
136 struct hci_sec_filter {
139 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
142 static const struct hci_sec_filter hci_sec_filter = {
146 { 0x1000d9fe, 0x0000b00c },
151 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
152 /* OGF_LINK_POLICY */
153 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
155 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
157 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
158 /* OGF_STATUS_PARAM */
159 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
163 static struct bt_sock_list hci_sk_list = {
164 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
167 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
169 struct hci_filter *flt;
170 int flt_type, flt_event;
173 flt = &hci_pi(sk)->filter;
175 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
177 if (!test_bit(flt_type, &flt->type_mask))
180 /* Extra filter for event packets only */
181 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
184 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
186 if (!hci_test_bit(flt_event, &flt->event_mask))
189 /* Check filter only when opcode is set */
193 if (flt_event == HCI_EV_CMD_COMPLETE &&
194 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
197 if (flt_event == HCI_EV_CMD_STATUS &&
198 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
204 /* Send frame to RAW socket */
205 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
208 struct sk_buff *skb_copy = NULL;
210 BT_DBG("hdev %p len %d", hdev, skb->len);
212 read_lock(&hci_sk_list.lock);
214 sk_for_each(sk, &hci_sk_list.head) {
215 struct sk_buff *nskb;
217 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
220 /* Don't send frame to the socket it came from */
224 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
225 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
226 hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
227 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
228 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
229 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
231 if (is_filtered_packet(sk, skb))
233 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
234 if (!bt_cb(skb)->incoming)
236 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
237 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
238 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
239 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
242 /* Don't send frame to other channel types */
247 /* Create a private copy with headroom */
248 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
252 /* Put type byte before the data */
253 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
256 nskb = skb_clone(skb_copy, GFP_ATOMIC);
260 if (sock_queue_rcv_skb(sk, nskb))
264 read_unlock(&hci_sk_list.lock);
269 /* Send frame to sockets with specific channel */
270 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
271 int flag, struct sock *skip_sk)
275 BT_DBG("channel %u len %d", channel, skb->len);
277 sk_for_each(sk, &hci_sk_list.head) {
278 struct sk_buff *nskb;
280 /* Ignore socket without the flag set */
281 if (!hci_sock_test_flag(sk, flag))
284 /* Skip the original socket */
288 if (sk->sk_state != BT_BOUND)
291 if (hci_pi(sk)->channel != channel)
294 nskb = skb_clone(skb, GFP_ATOMIC);
298 if (sock_queue_rcv_skb(sk, nskb))
304 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
305 int flag, struct sock *skip_sk)
307 read_lock(&hci_sk_list.lock);
308 __hci_send_to_channel(channel, skb, flag, skip_sk);
309 read_unlock(&hci_sk_list.lock);
312 /* Send frame to monitor socket */
313 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
315 struct sk_buff *skb_copy = NULL;
316 struct hci_mon_hdr *hdr;
319 if (!atomic_read(&monitor_promisc))
322 BT_DBG("hdev %p len %d", hdev, skb->len);
324 switch (hci_skb_pkt_type(skb)) {
325 case HCI_COMMAND_PKT:
326 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
329 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
331 case HCI_ACLDATA_PKT:
332 if (bt_cb(skb)->incoming)
333 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
335 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
337 case HCI_SCODATA_PKT:
338 if (bt_cb(skb)->incoming)
339 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
341 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
343 case HCI_ISODATA_PKT:
344 if (bt_cb(skb)->incoming)
345 opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
347 opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
350 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
356 /* Create a private copy with headroom */
357 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
361 /* Put header before the data */
362 hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
363 hdr->opcode = opcode;
364 hdr->index = cpu_to_le16(hdev->id);
365 hdr->len = cpu_to_le16(skb->len);
367 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
368 HCI_SOCK_TRUSTED, NULL);
372 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
373 void *data, u16 data_len, ktime_t tstamp,
374 int flag, struct sock *skip_sk)
380 index = cpu_to_le16(hdev->id);
382 index = cpu_to_le16(MGMT_INDEX_NONE);
384 read_lock(&hci_sk_list.lock);
386 sk_for_each(sk, &hci_sk_list.head) {
387 struct hci_mon_hdr *hdr;
390 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
393 /* Ignore socket without the flag set */
394 if (!hci_sock_test_flag(sk, flag))
397 /* Skip the original socket */
401 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
405 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
406 put_unaligned_le16(event, skb_put(skb, 2));
409 skb_put_data(skb, data, data_len);
411 skb->tstamp = tstamp;
413 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
414 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
416 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
418 __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
419 HCI_SOCK_TRUSTED, NULL);
423 read_unlock(&hci_sk_list.lock);
426 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
428 struct hci_mon_hdr *hdr;
429 struct hci_mon_new_index *ni;
430 struct hci_mon_index_info *ii;
436 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
440 ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
441 ni->type = hdev->dev_type;
443 bacpy(&ni->bdaddr, &hdev->bdaddr);
444 memcpy(ni->name, hdev->name, 8);
446 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
450 skb = bt_skb_alloc(0, GFP_ATOMIC);
454 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
458 if (hdev->manufacturer == 0xffff)
463 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
467 ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
468 bacpy(&ii->bdaddr, &hdev->bdaddr);
469 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
471 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
475 skb = bt_skb_alloc(0, GFP_ATOMIC);
479 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
483 skb = bt_skb_alloc(0, GFP_ATOMIC);
487 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
494 __net_timestamp(skb);
496 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
497 hdr->opcode = opcode;
498 hdr->index = cpu_to_le16(hdev->id);
499 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
504 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
506 struct hci_mon_hdr *hdr;
512 /* No message needed when cookie is not present */
513 if (!hci_pi(sk)->cookie)
516 switch (hci_pi(sk)->channel) {
517 case HCI_CHANNEL_RAW:
519 ver[0] = BT_SUBSYS_VERSION;
520 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
522 case HCI_CHANNEL_USER:
524 ver[0] = BT_SUBSYS_VERSION;
525 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
527 case HCI_CHANNEL_CONTROL:
529 mgmt_fill_version_info(ver);
532 /* No message for unsupported format */
536 skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
540 flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
542 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
543 put_unaligned_le16(format, skb_put(skb, 2));
544 skb_put_data(skb, ver, sizeof(ver));
545 put_unaligned_le32(flags, skb_put(skb, 4));
546 skb_put_u8(skb, TASK_COMM_LEN);
547 skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
549 __net_timestamp(skb);
551 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
552 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
553 if (hci_pi(sk)->hdev)
554 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
556 hdr->index = cpu_to_le16(HCI_DEV_NONE);
557 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
562 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
564 struct hci_mon_hdr *hdr;
567 /* No message needed when cookie is not present */
568 if (!hci_pi(sk)->cookie)
571 switch (hci_pi(sk)->channel) {
572 case HCI_CHANNEL_RAW:
573 case HCI_CHANNEL_USER:
574 case HCI_CHANNEL_CONTROL:
577 /* No message for unsupported format */
581 skb = bt_skb_alloc(4, GFP_ATOMIC);
585 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
587 __net_timestamp(skb);
589 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
590 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
591 if (hci_pi(sk)->hdev)
592 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
594 hdr->index = cpu_to_le16(HCI_DEV_NONE);
595 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
600 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
604 struct hci_mon_hdr *hdr;
607 skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
611 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
612 put_unaligned_le16(opcode, skb_put(skb, 2));
615 skb_put_data(skb, buf, len);
617 __net_timestamp(skb);
619 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
620 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
621 hdr->index = cpu_to_le16(index);
622 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
627 static void __printf(2, 3)
628 send_monitor_note(struct sock *sk, const char *fmt, ...)
631 struct hci_mon_hdr *hdr;
636 len = vsnprintf(NULL, 0, fmt, args);
639 skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
644 vsprintf(skb_put(skb, len), fmt, args);
645 *(u8 *)skb_put(skb, 1) = 0;
648 __net_timestamp(skb);
650 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
651 hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
652 hdr->index = cpu_to_le16(HCI_DEV_NONE);
653 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
655 if (sock_queue_rcv_skb(sk, skb))
659 static void send_monitor_replay(struct sock *sk)
661 struct hci_dev *hdev;
663 read_lock(&hci_dev_list_lock);
665 list_for_each_entry(hdev, &hci_dev_list, list) {
668 skb = create_monitor_event(hdev, HCI_DEV_REG);
672 if (sock_queue_rcv_skb(sk, skb))
675 if (!test_bit(HCI_RUNNING, &hdev->flags))
678 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
682 if (sock_queue_rcv_skb(sk, skb))
685 if (test_bit(HCI_UP, &hdev->flags))
686 skb = create_monitor_event(hdev, HCI_DEV_UP);
687 else if (hci_dev_test_flag(hdev, HCI_SETUP))
688 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
693 if (sock_queue_rcv_skb(sk, skb))
698 read_unlock(&hci_dev_list_lock);
701 static void send_monitor_control_replay(struct sock *mon_sk)
705 read_lock(&hci_sk_list.lock);
707 sk_for_each(sk, &hci_sk_list.head) {
710 skb = create_monitor_ctrl_open(sk);
714 if (sock_queue_rcv_skb(mon_sk, skb))
718 read_unlock(&hci_sk_list.lock);
721 /* Generate internal stack event */
722 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
724 struct hci_event_hdr *hdr;
725 struct hci_ev_stack_internal *ev;
728 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
732 hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
733 hdr->evt = HCI_EV_STACK_INTERNAL;
734 hdr->plen = sizeof(*ev) + dlen;
736 ev = skb_put(skb, sizeof(*ev) + dlen);
738 memcpy(ev->data, data, dlen);
740 bt_cb(skb)->incoming = 1;
741 __net_timestamp(skb);
743 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
744 hci_send_to_sock(hdev, skb);
748 void hci_sock_dev_event(struct hci_dev *hdev, int event)
750 BT_DBG("hdev %s event %d", hdev->name, event);
752 if (atomic_read(&monitor_promisc)) {
755 /* Send event to monitor */
756 skb = create_monitor_event(hdev, event);
758 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
759 HCI_SOCK_TRUSTED, NULL);
764 if (event <= HCI_DEV_DOWN) {
765 struct hci_ev_si_device ev;
767 /* Send event to sockets */
769 ev.dev_id = hdev->id;
770 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
773 if (event == HCI_DEV_UNREG) {
776 /* Wake up sockets using this dead device */
777 read_lock(&hci_sk_list.lock);
778 sk_for_each(sk, &hci_sk_list.head) {
779 if (hci_pi(sk)->hdev == hdev) {
781 sk->sk_state_change(sk);
784 read_unlock(&hci_sk_list.lock);
788 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
790 struct hci_mgmt_chan *c;
792 list_for_each_entry(c, &mgmt_chan_list, list) {
793 if (c->channel == channel)
800 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
802 struct hci_mgmt_chan *c;
804 mutex_lock(&mgmt_chan_list_lock);
805 c = __hci_mgmt_chan_find(channel);
806 mutex_unlock(&mgmt_chan_list_lock);
811 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
813 if (c->channel < HCI_CHANNEL_CONTROL)
816 mutex_lock(&mgmt_chan_list_lock);
817 if (__hci_mgmt_chan_find(c->channel)) {
818 mutex_unlock(&mgmt_chan_list_lock);
822 list_add_tail(&c->list, &mgmt_chan_list);
824 mutex_unlock(&mgmt_chan_list_lock);
828 EXPORT_SYMBOL(hci_mgmt_chan_register);
830 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
832 mutex_lock(&mgmt_chan_list_lock);
834 mutex_unlock(&mgmt_chan_list_lock);
836 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
838 static int hci_sock_release(struct socket *sock)
840 struct sock *sk = sock->sk;
841 struct hci_dev *hdev;
844 BT_DBG("sock %p sk %p", sock, sk);
851 switch (hci_pi(sk)->channel) {
852 case HCI_CHANNEL_MONITOR:
853 atomic_dec(&monitor_promisc);
855 case HCI_CHANNEL_RAW:
856 case HCI_CHANNEL_USER:
857 case HCI_CHANNEL_CONTROL:
858 /* Send event to monitor */
859 skb = create_monitor_ctrl_close(sk);
861 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
862 HCI_SOCK_TRUSTED, NULL);
866 hci_sock_free_cookie(sk);
870 bt_sock_unlink(&hci_sk_list, sk);
872 hdev = hci_pi(sk)->hdev;
874 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
875 /* When releasing a user channel exclusive access,
876 * call hci_dev_do_close directly instead of calling
877 * hci_dev_close to ensure the exclusive access will
878 * be released and the controller brought back down.
880 * The checking of HCI_AUTO_OFF is not needed in this
881 * case since it will have been cleared already when
882 * opening the user channel.
884 hci_dev_do_close(hdev);
885 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
886 mgmt_index_added(hdev);
889 atomic_dec(&hdev->promisc);
895 skb_queue_purge(&sk->sk_receive_queue);
896 skb_queue_purge(&sk->sk_write_queue);
903 static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
908 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
913 err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
915 hci_dev_unlock(hdev);
920 static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
925 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
930 err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
932 hci_dev_unlock(hdev);
937 /* Ioctls that require bound socket */
938 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
941 struct hci_dev *hdev = hci_hdev_from_sock(sk);
944 return PTR_ERR(hdev);
946 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
949 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
952 if (hdev->dev_type != HCI_PRIMARY)
957 if (!capable(CAP_NET_ADMIN))
962 return hci_get_conn_info(hdev, (void __user *)arg);
965 return hci_get_auth_info(hdev, (void __user *)arg);
968 if (!capable(CAP_NET_ADMIN))
970 return hci_sock_reject_list_add(hdev, (void __user *)arg);
973 if (!capable(CAP_NET_ADMIN))
975 return hci_sock_reject_list_del(hdev, (void __user *)arg);
981 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
984 void __user *argp = (void __user *)arg;
985 struct sock *sk = sock->sk;
988 BT_DBG("cmd %x arg %lx", cmd, arg);
992 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
997 /* When calling an ioctl on an unbound raw socket, then ensure
998 * that the monitor gets informed. Ensure that the resulting event
999 * is only send once by checking if the cookie exists or not. The
1000 * socket cookie will be only ever generated once for the lifetime
1001 * of a given socket.
1003 if (hci_sock_gen_cookie(sk)) {
1004 struct sk_buff *skb;
1006 if (capable(CAP_NET_ADMIN))
1007 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1009 /* Send event to monitor */
1010 skb = create_monitor_ctrl_open(sk);
1012 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1013 HCI_SOCK_TRUSTED, NULL);
1022 return hci_get_dev_list(argp);
1025 return hci_get_dev_info(argp);
1027 case HCIGETCONNLIST:
1028 return hci_get_conn_list(argp);
1031 if (!capable(CAP_NET_ADMIN))
1033 return hci_dev_open(arg);
1036 if (!capable(CAP_NET_ADMIN))
1038 return hci_dev_close(arg);
1041 if (!capable(CAP_NET_ADMIN))
1043 return hci_dev_reset(arg);
1046 if (!capable(CAP_NET_ADMIN))
1048 return hci_dev_reset_stat(arg);
1055 case HCISETLINKMODE:
1058 if (!capable(CAP_NET_ADMIN))
1060 return hci_dev_cmd(cmd, argp);
1063 return hci_inquiry(argp);
1068 err = hci_sock_bound_ioctl(sk, cmd, arg);
1075 #ifdef CONFIG_COMPAT
1076 static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1084 return hci_sock_ioctl(sock, cmd, arg);
1087 return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1091 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1094 struct sockaddr_hci haddr;
1095 struct sock *sk = sock->sk;
1096 struct hci_dev *hdev = NULL;
1097 struct sk_buff *skb;
1100 BT_DBG("sock %p sk %p", sock, sk);
1105 memset(&haddr, 0, sizeof(haddr));
1106 len = min_t(unsigned int, sizeof(haddr), addr_len);
1107 memcpy(&haddr, addr, len);
1109 if (haddr.hci_family != AF_BLUETOOTH)
1114 /* Allow detaching from dead device and attaching to alive device, if
1115 * the caller wants to re-bind (instead of close) this socket in
1116 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1118 hdev = hci_pi(sk)->hdev;
1119 if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1120 hci_pi(sk)->hdev = NULL;
1121 sk->sk_state = BT_OPEN;
1126 if (sk->sk_state == BT_BOUND) {
1131 switch (haddr.hci_channel) {
1132 case HCI_CHANNEL_RAW:
1133 if (hci_pi(sk)->hdev) {
1138 if (haddr.hci_dev != HCI_DEV_NONE) {
1139 hdev = hci_dev_get(haddr.hci_dev);
1145 atomic_inc(&hdev->promisc);
1148 hci_pi(sk)->channel = haddr.hci_channel;
1150 if (!hci_sock_gen_cookie(sk)) {
1151 /* In the case when a cookie has already been assigned,
1152 * then there has been already an ioctl issued against
1153 * an unbound socket and with that triggered an open
1154 * notification. Send a close notification first to
1155 * allow the state transition to bounded.
1157 skb = create_monitor_ctrl_close(sk);
1159 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1160 HCI_SOCK_TRUSTED, NULL);
1165 if (capable(CAP_NET_ADMIN))
1166 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1168 hci_pi(sk)->hdev = hdev;
1170 /* Send event to monitor */
1171 skb = create_monitor_ctrl_open(sk);
1173 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1174 HCI_SOCK_TRUSTED, NULL);
1179 case HCI_CHANNEL_USER:
1180 if (hci_pi(sk)->hdev) {
1185 if (haddr.hci_dev == HCI_DEV_NONE) {
1190 if (!capable(CAP_NET_ADMIN)) {
1195 hdev = hci_dev_get(haddr.hci_dev);
1201 if (test_bit(HCI_INIT, &hdev->flags) ||
1202 hci_dev_test_flag(hdev, HCI_SETUP) ||
1203 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1204 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1205 test_bit(HCI_UP, &hdev->flags))) {
1211 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1217 mgmt_index_removed(hdev);
1219 err = hci_dev_open(hdev->id);
1221 if (err == -EALREADY) {
1222 /* In case the transport is already up and
1223 * running, clear the error here.
1225 * This can happen when opening a user
1226 * channel and HCI_AUTO_OFF grace period
1231 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1232 mgmt_index_added(hdev);
1238 hci_pi(sk)->channel = haddr.hci_channel;
1240 if (!hci_sock_gen_cookie(sk)) {
1241 /* In the case when a cookie has already been assigned,
1242 * this socket will transition from a raw socket into
1243 * a user channel socket. For a clean transition, send
1244 * the close notification first.
1246 skb = create_monitor_ctrl_close(sk);
1248 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1249 HCI_SOCK_TRUSTED, NULL);
1254 /* The user channel is restricted to CAP_NET_ADMIN
1255 * capabilities and with that implicitly trusted.
1257 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1259 hci_pi(sk)->hdev = hdev;
1261 /* Send event to monitor */
1262 skb = create_monitor_ctrl_open(sk);
1264 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1265 HCI_SOCK_TRUSTED, NULL);
1269 atomic_inc(&hdev->promisc);
1272 case HCI_CHANNEL_MONITOR:
1273 if (haddr.hci_dev != HCI_DEV_NONE) {
1278 if (!capable(CAP_NET_RAW)) {
1283 hci_pi(sk)->channel = haddr.hci_channel;
1285 /* The monitor interface is restricted to CAP_NET_RAW
1286 * capabilities and with that implicitly trusted.
1288 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1290 send_monitor_note(sk, "Linux version %s (%s)",
1291 init_utsname()->release,
1292 init_utsname()->machine);
1293 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1294 BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1295 send_monitor_replay(sk);
1296 send_monitor_control_replay(sk);
1298 atomic_inc(&monitor_promisc);
1301 case HCI_CHANNEL_LOGGING:
1302 if (haddr.hci_dev != HCI_DEV_NONE) {
1307 if (!capable(CAP_NET_ADMIN)) {
1312 hci_pi(sk)->channel = haddr.hci_channel;
1316 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1321 if (haddr.hci_dev != HCI_DEV_NONE) {
1326 /* Users with CAP_NET_ADMIN capabilities are allowed
1327 * access to all management commands and events. For
1328 * untrusted users the interface is restricted and
1329 * also only untrusted events are sent.
1331 if (capable(CAP_NET_ADMIN))
1332 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1334 hci_pi(sk)->channel = haddr.hci_channel;
1336 /* At the moment the index and unconfigured index events
1337 * are enabled unconditionally. Setting them on each
1338 * socket when binding keeps this functionality. They
1339 * however might be cleared later and then sending of these
1340 * events will be disabled, but that is then intentional.
1342 * This also enables generic events that are safe to be
1343 * received by untrusted users. Example for such events
1344 * are changes to settings, class of device, name etc.
1346 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1347 if (!hci_sock_gen_cookie(sk)) {
1348 /* In the case when a cookie has already been
1349 * assigned, this socket will transition from
1350 * a raw socket into a control socket. To
1351 * allow for a clean transition, send the
1352 * close notification first.
1354 skb = create_monitor_ctrl_close(sk);
1356 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1357 HCI_SOCK_TRUSTED, NULL);
1362 /* Send event to monitor */
1363 skb = create_monitor_ctrl_open(sk);
1365 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1366 HCI_SOCK_TRUSTED, NULL);
1370 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1371 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1372 hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1373 hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1374 hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1375 hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1380 sk->sk_state = BT_BOUND;
1387 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1390 struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1391 struct sock *sk = sock->sk;
1392 struct hci_dev *hdev;
1395 BT_DBG("sock %p sk %p", sock, sk);
1402 hdev = hci_hdev_from_sock(sk);
1404 err = PTR_ERR(hdev);
1408 haddr->hci_family = AF_BLUETOOTH;
1409 haddr->hci_dev = hdev->id;
1410 haddr->hci_channel= hci_pi(sk)->channel;
1411 err = sizeof(*haddr);
1418 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1419 struct sk_buff *skb)
1421 __u8 mask = hci_pi(sk)->cmsg_mask;
1423 if (mask & HCI_CMSG_DIR) {
1424 int incoming = bt_cb(skb)->incoming;
1425 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1429 if (mask & HCI_CMSG_TSTAMP) {
1430 #ifdef CONFIG_COMPAT
1431 struct old_timeval32 ctv;
1433 struct __kernel_old_timeval tv;
1437 skb_get_timestamp(skb, &tv);
1441 #ifdef CONFIG_COMPAT
1442 if (!COMPAT_USE_64BIT_TIME &&
1443 (msg->msg_flags & MSG_CMSG_COMPAT)) {
1444 ctv.tv_sec = tv.tv_sec;
1445 ctv.tv_usec = tv.tv_usec;
1451 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1455 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1456 size_t len, int flags)
1458 int noblock = flags & MSG_DONTWAIT;
1459 struct sock *sk = sock->sk;
1460 struct sk_buff *skb;
1462 unsigned int skblen;
1464 BT_DBG("sock %p, sk %p", sock, sk);
1466 if (flags & MSG_OOB)
1469 if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1472 if (sk->sk_state == BT_CLOSED)
1475 skb = skb_recv_datagram(sk, flags, noblock, &err);
1482 msg->msg_flags |= MSG_TRUNC;
1486 skb_reset_transport_header(skb);
1487 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1489 switch (hci_pi(sk)->channel) {
1490 case HCI_CHANNEL_RAW:
1491 hci_sock_cmsg(sk, msg, skb);
1493 case HCI_CHANNEL_USER:
1494 case HCI_CHANNEL_MONITOR:
1495 sock_recv_timestamp(msg, sk, skb);
1498 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1499 sock_recv_timestamp(msg, sk, skb);
1503 skb_free_datagram(sk, skb);
1505 if (flags & MSG_TRUNC)
1508 return err ? : copied;
1511 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1512 struct msghdr *msg, size_t msglen)
1516 struct mgmt_hdr *hdr;
1517 u16 opcode, index, len;
1518 struct hci_dev *hdev = NULL;
1519 const struct hci_mgmt_handler *handler;
1520 bool var_len, no_hdev;
1523 BT_DBG("got %zu bytes", msglen);
1525 if (msglen < sizeof(*hdr))
1528 buf = kmalloc(msglen, GFP_KERNEL);
1532 if (memcpy_from_msg(buf, msg, msglen)) {
1538 opcode = __le16_to_cpu(hdr->opcode);
1539 index = __le16_to_cpu(hdr->index);
1540 len = __le16_to_cpu(hdr->len);
1542 if (len != msglen - sizeof(*hdr)) {
1548 if (opcode >= TIZEN_OP_CODE_BASE) {
1549 u16 tizen_opcode_index = opcode - TIZEN_OP_CODE_BASE;
1550 if (tizen_opcode_index >= chan->tizen_handler_count ||
1551 chan->tizen_handlers[tizen_opcode_index].func == NULL) {
1552 BT_DBG("Unknown op %u", opcode);
1553 err = mgmt_cmd_status(sk, index, opcode,
1554 MGMT_STATUS_UNKNOWN_COMMAND);
1558 handler = &chan->tizen_handlers[tizen_opcode_index];
1562 if (chan->channel == HCI_CHANNEL_CONTROL) {
1563 struct sk_buff *skb;
1565 /* Send event to monitor */
1566 skb = create_monitor_ctrl_command(sk, index, opcode, len,
1567 buf + sizeof(*hdr));
1569 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1570 HCI_SOCK_TRUSTED, NULL);
1575 if (opcode >= chan->handler_count ||
1576 chan->handlers[opcode].func == NULL) {
1577 BT_DBG("Unknown op %u", opcode);
1578 err = mgmt_cmd_status(sk, index, opcode,
1579 MGMT_STATUS_UNKNOWN_COMMAND);
1583 handler = &chan->handlers[opcode];
1588 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1589 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1590 err = mgmt_cmd_status(sk, index, opcode,
1591 MGMT_STATUS_PERMISSION_DENIED);
1595 if (index != MGMT_INDEX_NONE) {
1596 hdev = hci_dev_get(index);
1598 err = mgmt_cmd_status(sk, index, opcode,
1599 MGMT_STATUS_INVALID_INDEX);
1603 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1604 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1605 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1606 err = mgmt_cmd_status(sk, index, opcode,
1607 MGMT_STATUS_INVALID_INDEX);
1611 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1612 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1613 err = mgmt_cmd_status(sk, index, opcode,
1614 MGMT_STATUS_INVALID_INDEX);
1619 if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
1620 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1621 if (no_hdev != !hdev) {
1622 err = mgmt_cmd_status(sk, index, opcode,
1623 MGMT_STATUS_INVALID_INDEX);
1628 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1629 if ((var_len && len < handler->data_len) ||
1630 (!var_len && len != handler->data_len)) {
1631 err = mgmt_cmd_status(sk, index, opcode,
1632 MGMT_STATUS_INVALID_PARAMS);
1636 if (hdev && chan->hdev_init)
1637 chan->hdev_init(sk, hdev);
1639 cp = buf + sizeof(*hdr);
1641 err = handler->func(sk, hdev, cp, len);
1655 static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1657 struct hci_mon_hdr *hdr;
1658 struct sk_buff *skb;
1659 struct hci_dev *hdev;
1663 /* The logging frame consists at minimum of the standard header,
1664 * the priority byte, the ident length byte and at least one string
1665 * terminator NUL byte. Anything shorter are invalid packets.
1667 if (len < sizeof(*hdr) + 3)
1670 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1674 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1679 hdr = (void *)skb->data;
1681 if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1686 if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1687 __u8 priority = skb->data[sizeof(*hdr)];
1688 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1690 /* Only the priorities 0-7 are valid and with that any other
1691 * value results in an invalid packet.
1693 * The priority byte is followed by an ident length byte and
1694 * the NUL terminated ident string. Check that the ident
1695 * length is not overflowing the packet and also that the
1696 * ident string itself is NUL terminated. In case the ident
1697 * length is zero, the length value actually doubles as NUL
1698 * terminator identifier.
1700 * The message follows the ident string (if present) and
1701 * must be NUL terminated. Otherwise it is not a valid packet.
1703 if (priority > 7 || skb->data[len - 1] != 0x00 ||
1704 ident_len > len - sizeof(*hdr) - 3 ||
1705 skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1714 index = __le16_to_cpu(hdr->index);
1716 if (index != MGMT_INDEX_NONE) {
1717 hdev = hci_dev_get(index);
1726 hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1728 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1739 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1742 struct sock *sk = sock->sk;
1743 struct hci_mgmt_chan *chan;
1744 struct hci_dev *hdev;
1745 struct sk_buff *skb;
1748 BT_DBG("sock %p sk %p", sock, sk);
1750 if (msg->msg_flags & MSG_OOB)
1753 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1757 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1762 switch (hci_pi(sk)->channel) {
1763 case HCI_CHANNEL_RAW:
1764 case HCI_CHANNEL_USER:
1766 case HCI_CHANNEL_MONITOR:
1769 case HCI_CHANNEL_LOGGING:
1770 err = hci_logging_frame(sk, msg, len);
1773 mutex_lock(&mgmt_chan_list_lock);
1774 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1776 err = hci_mgmt_cmd(chan, sk, msg, len);
1780 mutex_unlock(&mgmt_chan_list_lock);
1784 hdev = hci_hdev_from_sock(sk);
1786 err = PTR_ERR(hdev);
1790 if (!test_bit(HCI_UP, &hdev->flags)) {
1795 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1799 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1804 hci_skb_pkt_type(skb) = skb->data[0];
1807 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1808 /* No permission check is needed for user channel
1809 * since that gets enforced when binding the socket.
1811 * However check that the packet type is valid.
1813 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1814 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1815 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1816 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1821 skb_queue_tail(&hdev->raw_q, skb);
1822 queue_work(hdev->workqueue, &hdev->tx_work);
1823 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1824 u16 opcode = get_unaligned_le16(skb->data);
1825 u16 ogf = hci_opcode_ogf(opcode);
1826 u16 ocf = hci_opcode_ocf(opcode);
1828 if (((ogf > HCI_SFLT_MAX_OGF) ||
1829 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1830 &hci_sec_filter.ocf_mask[ogf])) &&
1831 !capable(CAP_NET_RAW)) {
1836 /* Since the opcode has already been extracted here, store
1837 * a copy of the value for later use by the drivers.
1839 hci_skb_opcode(skb) = opcode;
1842 skb_queue_tail(&hdev->raw_q, skb);
1843 queue_work(hdev->workqueue, &hdev->tx_work);
1845 /* Stand-alone HCI commands must be flagged as
1846 * single-command requests.
1848 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1850 skb_queue_tail(&hdev->cmd_q, skb);
1851 queue_work(hdev->workqueue, &hdev->cmd_work);
1854 if (!capable(CAP_NET_RAW)) {
1859 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1860 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1861 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1866 skb_queue_tail(&hdev->raw_q, skb);
1867 queue_work(hdev->workqueue, &hdev->tx_work);
1881 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1882 sockptr_t optval, unsigned int len)
1884 struct hci_ufilter uf = { .opcode = 0 };
1885 struct sock *sk = sock->sk;
1886 int err = 0, opt = 0;
1888 BT_DBG("sk %p, opt %d", sk, optname);
1890 if (level != SOL_HCI)
1891 return -ENOPROTOOPT;
1895 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1902 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1908 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1910 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1913 case HCI_TIME_STAMP:
1914 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1920 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1922 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1927 struct hci_filter *f = &hci_pi(sk)->filter;
1929 uf.type_mask = f->type_mask;
1930 uf.opcode = f->opcode;
1931 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1932 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1935 len = min_t(unsigned int, len, sizeof(uf));
1936 if (copy_from_sockptr(&uf, optval, len)) {
1941 if (!capable(CAP_NET_RAW)) {
1942 uf.type_mask &= hci_sec_filter.type_mask;
1943 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1944 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1948 struct hci_filter *f = &hci_pi(sk)->filter;
1950 f->type_mask = uf.type_mask;
1951 f->opcode = uf.opcode;
1952 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1953 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1967 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1968 char __user *optval, int __user *optlen)
1970 struct hci_ufilter uf;
1971 struct sock *sk = sock->sk;
1972 int len, opt, err = 0;
1974 BT_DBG("sk %p, opt %d", sk, optname);
1976 if (level != SOL_HCI)
1977 return -ENOPROTOOPT;
1979 if (get_user(len, optlen))
1984 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1991 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1996 if (put_user(opt, optval))
2000 case HCI_TIME_STAMP:
2001 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
2006 if (put_user(opt, optval))
2012 struct hci_filter *f = &hci_pi(sk)->filter;
2014 memset(&uf, 0, sizeof(uf));
2015 uf.type_mask = f->type_mask;
2016 uf.opcode = f->opcode;
2017 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2018 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2021 len = min_t(unsigned int, len, sizeof(uf));
2022 if (copy_to_user(optval, &uf, len))
2036 static const struct proto_ops hci_sock_ops = {
2037 .family = PF_BLUETOOTH,
2038 .owner = THIS_MODULE,
2039 .release = hci_sock_release,
2040 .bind = hci_sock_bind,
2041 .getname = hci_sock_getname,
2042 .sendmsg = hci_sock_sendmsg,
2043 .recvmsg = hci_sock_recvmsg,
2044 .ioctl = hci_sock_ioctl,
2045 #ifdef CONFIG_COMPAT
2046 .compat_ioctl = hci_sock_compat_ioctl,
2048 .poll = datagram_poll,
2049 .listen = sock_no_listen,
2050 .shutdown = sock_no_shutdown,
2051 .setsockopt = hci_sock_setsockopt,
2052 .getsockopt = hci_sock_getsockopt,
2053 .connect = sock_no_connect,
2054 .socketpair = sock_no_socketpair,
2055 .accept = sock_no_accept,
2056 .mmap = sock_no_mmap
2059 static struct proto hci_sk_proto = {
2061 .owner = THIS_MODULE,
2062 .obj_size = sizeof(struct hci_pinfo)
2065 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2070 BT_DBG("sock %p", sock);
2072 if (sock->type != SOCK_RAW)
2073 return -ESOCKTNOSUPPORT;
2075 sock->ops = &hci_sock_ops;
2077 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2081 sock_init_data(sock, sk);
2083 sock_reset_flag(sk, SOCK_ZAPPED);
2085 sk->sk_protocol = protocol;
2087 sock->state = SS_UNCONNECTED;
2088 sk->sk_state = BT_OPEN;
2090 bt_sock_link(&hci_sk_list, sk);
2094 static const struct net_proto_family hci_sock_family_ops = {
2095 .family = PF_BLUETOOTH,
2096 .owner = THIS_MODULE,
2097 .create = hci_sock_create,
2100 int __init hci_sock_init(void)
2104 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2106 err = proto_register(&hci_sk_proto, 0);
2110 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2112 BT_ERR("HCI socket registration failed");
2116 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2118 BT_ERR("Failed to create HCI proc file");
2119 bt_sock_unregister(BTPROTO_HCI);
2123 BT_INFO("HCI socket layer initialized");
2128 proto_unregister(&hci_sk_proto);
2132 void hci_sock_cleanup(void)
2134 bt_procfs_cleanup(&init_net, "hci");
2135 bt_sock_unregister(BTPROTO_HCI);
2136 proto_unregister(&hci_sk_proto);