2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
26 #include <linux/compat.h>
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
37 #include <net/bluetooth/mgmt_tizen.h>
40 #include "mgmt_util.h"
42 static LIST_HEAD(mgmt_chan_list);
43 static DEFINE_MUTEX(mgmt_chan_list_lock);
45 static DEFINE_IDA(sock_cookie_ida);
47 static atomic_t monitor_promisc = ATOMIC_INIT(0);
49 /* ----- HCI socket interface ----- */
52 #define hci_pi(sk) ((struct hci_pinfo *) sk)
57 struct hci_filter filter;
59 unsigned short channel;
62 char comm[TASK_COMM_LEN];
66 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
68 struct hci_dev *hdev = hci_pi(sk)->hdev;
71 return ERR_PTR(-EBADFD);
72 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
73 return ERR_PTR(-EPIPE);
77 void hci_sock_set_flag(struct sock *sk, int nr)
79 set_bit(nr, &hci_pi(sk)->flags);
82 void hci_sock_clear_flag(struct sock *sk, int nr)
84 clear_bit(nr, &hci_pi(sk)->flags);
87 int hci_sock_test_flag(struct sock *sk, int nr)
89 return test_bit(nr, &hci_pi(sk)->flags);
92 unsigned short hci_sock_get_channel(struct sock *sk)
94 return hci_pi(sk)->channel;
97 u32 hci_sock_get_cookie(struct sock *sk)
99 return hci_pi(sk)->cookie;
102 static bool hci_sock_gen_cookie(struct sock *sk)
104 int id = hci_pi(sk)->cookie;
107 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
111 hci_pi(sk)->cookie = id;
112 get_task_comm(hci_pi(sk)->comm, current);
119 static void hci_sock_free_cookie(struct sock *sk)
121 int id = hci_pi(sk)->cookie;
124 hci_pi(sk)->cookie = 0xffffffff;
125 ida_simple_remove(&sock_cookie_ida, id);
129 static inline int hci_test_bit(int nr, const void *addr)
131 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
134 /* Security filter */
135 #define HCI_SFLT_MAX_OGF 5
137 struct hci_sec_filter {
140 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
143 static const struct hci_sec_filter hci_sec_filter = {
147 { 0x1000d9fe, 0x0000b00c },
152 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
153 /* OGF_LINK_POLICY */
154 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
156 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
158 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
159 /* OGF_STATUS_PARAM */
160 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
164 static struct bt_sock_list hci_sk_list = {
165 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
168 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
170 struct hci_filter *flt;
171 int flt_type, flt_event;
174 flt = &hci_pi(sk)->filter;
176 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
178 if (!test_bit(flt_type, &flt->type_mask))
181 /* Extra filter for event packets only */
182 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
185 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
187 if (!hci_test_bit(flt_event, &flt->event_mask))
190 /* Check filter only when opcode is set */
194 if (flt_event == HCI_EV_CMD_COMPLETE &&
195 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
198 if (flt_event == HCI_EV_CMD_STATUS &&
199 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
205 /* Send frame to RAW socket */
206 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
209 struct sk_buff *skb_copy = NULL;
211 BT_DBG("hdev %p len %d", hdev, skb->len);
213 read_lock(&hci_sk_list.lock);
215 sk_for_each(sk, &hci_sk_list.head) {
216 struct sk_buff *nskb;
218 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
221 /* Don't send frame to the socket it came from */
225 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
226 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
227 hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
228 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
229 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
230 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
232 if (is_filtered_packet(sk, skb))
234 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
235 if (!bt_cb(skb)->incoming)
237 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
238 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
239 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
240 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
243 /* Don't send frame to other channel types */
248 /* Create a private copy with headroom */
249 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
253 /* Put type byte before the data */
254 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
257 nskb = skb_clone(skb_copy, GFP_ATOMIC);
261 if (sock_queue_rcv_skb(sk, nskb))
265 read_unlock(&hci_sk_list.lock);
270 /* Send frame to sockets with specific channel */
271 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
272 int flag, struct sock *skip_sk)
276 BT_DBG("channel %u len %d", channel, skb->len);
278 sk_for_each(sk, &hci_sk_list.head) {
279 struct sk_buff *nskb;
281 /* Ignore socket without the flag set */
282 if (!hci_sock_test_flag(sk, flag))
285 /* Skip the original socket */
289 if (sk->sk_state != BT_BOUND)
292 if (hci_pi(sk)->channel != channel)
295 nskb = skb_clone(skb, GFP_ATOMIC);
299 if (sock_queue_rcv_skb(sk, nskb))
305 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
306 int flag, struct sock *skip_sk)
308 read_lock(&hci_sk_list.lock);
309 __hci_send_to_channel(channel, skb, flag, skip_sk);
310 read_unlock(&hci_sk_list.lock);
313 /* Send frame to monitor socket */
314 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
316 struct sk_buff *skb_copy = NULL;
317 struct hci_mon_hdr *hdr;
320 if (!atomic_read(&monitor_promisc))
323 BT_DBG("hdev %p len %d", hdev, skb->len);
325 switch (hci_skb_pkt_type(skb)) {
326 case HCI_COMMAND_PKT:
327 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
330 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
332 case HCI_ACLDATA_PKT:
333 if (bt_cb(skb)->incoming)
334 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
336 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
338 case HCI_SCODATA_PKT:
339 if (bt_cb(skb)->incoming)
340 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
342 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
344 case HCI_ISODATA_PKT:
345 if (bt_cb(skb)->incoming)
346 opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
348 opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
351 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
357 /* Create a private copy with headroom */
358 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
362 /* Put header before the data */
363 hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
364 hdr->opcode = opcode;
365 hdr->index = cpu_to_le16(hdev->id);
366 hdr->len = cpu_to_le16(skb->len);
368 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
369 HCI_SOCK_TRUSTED, NULL);
373 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
374 void *data, u16 data_len, ktime_t tstamp,
375 int flag, struct sock *skip_sk)
381 index = cpu_to_le16(hdev->id);
383 index = cpu_to_le16(MGMT_INDEX_NONE);
385 read_lock(&hci_sk_list.lock);
387 sk_for_each(sk, &hci_sk_list.head) {
388 struct hci_mon_hdr *hdr;
391 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
394 /* Ignore socket without the flag set */
395 if (!hci_sock_test_flag(sk, flag))
398 /* Skip the original socket */
402 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
406 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
407 put_unaligned_le16(event, skb_put(skb, 2));
410 skb_put_data(skb, data, data_len);
412 skb->tstamp = tstamp;
414 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
415 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
417 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
419 __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
420 HCI_SOCK_TRUSTED, NULL);
424 read_unlock(&hci_sk_list.lock);
427 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
429 struct hci_mon_hdr *hdr;
430 struct hci_mon_new_index *ni;
431 struct hci_mon_index_info *ii;
437 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
441 ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
442 ni->type = hdev->dev_type;
444 bacpy(&ni->bdaddr, &hdev->bdaddr);
445 memcpy(ni->name, hdev->name, 8);
447 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
451 skb = bt_skb_alloc(0, GFP_ATOMIC);
455 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
459 if (hdev->manufacturer == 0xffff)
464 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
468 ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
469 bacpy(&ii->bdaddr, &hdev->bdaddr);
470 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
472 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
476 skb = bt_skb_alloc(0, GFP_ATOMIC);
480 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
484 skb = bt_skb_alloc(0, GFP_ATOMIC);
488 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
495 __net_timestamp(skb);
497 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
498 hdr->opcode = opcode;
499 hdr->index = cpu_to_le16(hdev->id);
500 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
505 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
507 struct hci_mon_hdr *hdr;
513 /* No message needed when cookie is not present */
514 if (!hci_pi(sk)->cookie)
517 switch (hci_pi(sk)->channel) {
518 case HCI_CHANNEL_RAW:
520 ver[0] = BT_SUBSYS_VERSION;
521 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
523 case HCI_CHANNEL_USER:
525 ver[0] = BT_SUBSYS_VERSION;
526 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
528 case HCI_CHANNEL_CONTROL:
530 mgmt_fill_version_info(ver);
533 /* No message for unsupported format */
537 skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
541 flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
543 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
544 put_unaligned_le16(format, skb_put(skb, 2));
545 skb_put_data(skb, ver, sizeof(ver));
546 put_unaligned_le32(flags, skb_put(skb, 4));
547 skb_put_u8(skb, TASK_COMM_LEN);
548 skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
550 __net_timestamp(skb);
552 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
553 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
554 if (hci_pi(sk)->hdev)
555 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
557 hdr->index = cpu_to_le16(HCI_DEV_NONE);
558 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
563 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
565 struct hci_mon_hdr *hdr;
568 /* No message needed when cookie is not present */
569 if (!hci_pi(sk)->cookie)
572 switch (hci_pi(sk)->channel) {
573 case HCI_CHANNEL_RAW:
574 case HCI_CHANNEL_USER:
575 case HCI_CHANNEL_CONTROL:
578 /* No message for unsupported format */
582 skb = bt_skb_alloc(4, GFP_ATOMIC);
586 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
588 __net_timestamp(skb);
590 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
591 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
592 if (hci_pi(sk)->hdev)
593 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
595 hdr->index = cpu_to_le16(HCI_DEV_NONE);
596 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
601 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
605 struct hci_mon_hdr *hdr;
608 skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
612 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
613 put_unaligned_le16(opcode, skb_put(skb, 2));
616 skb_put_data(skb, buf, len);
618 __net_timestamp(skb);
620 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
621 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
622 hdr->index = cpu_to_le16(index);
623 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
628 static void __printf(2, 3)
629 send_monitor_note(struct sock *sk, const char *fmt, ...)
632 struct hci_mon_hdr *hdr;
637 len = vsnprintf(NULL, 0, fmt, args);
640 skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
645 vsprintf(skb_put(skb, len), fmt, args);
646 *(u8 *)skb_put(skb, 1) = 0;
649 __net_timestamp(skb);
651 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
652 hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
653 hdr->index = cpu_to_le16(HCI_DEV_NONE);
654 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
656 if (sock_queue_rcv_skb(sk, skb))
660 static void send_monitor_replay(struct sock *sk)
662 struct hci_dev *hdev;
664 read_lock(&hci_dev_list_lock);
666 list_for_each_entry(hdev, &hci_dev_list, list) {
669 skb = create_monitor_event(hdev, HCI_DEV_REG);
673 if (sock_queue_rcv_skb(sk, skb))
676 if (!test_bit(HCI_RUNNING, &hdev->flags))
679 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
683 if (sock_queue_rcv_skb(sk, skb))
686 if (test_bit(HCI_UP, &hdev->flags))
687 skb = create_monitor_event(hdev, HCI_DEV_UP);
688 else if (hci_dev_test_flag(hdev, HCI_SETUP))
689 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
694 if (sock_queue_rcv_skb(sk, skb))
699 read_unlock(&hci_dev_list_lock);
702 static void send_monitor_control_replay(struct sock *mon_sk)
706 read_lock(&hci_sk_list.lock);
708 sk_for_each(sk, &hci_sk_list.head) {
711 skb = create_monitor_ctrl_open(sk);
715 if (sock_queue_rcv_skb(mon_sk, skb))
719 read_unlock(&hci_sk_list.lock);
722 /* Generate internal stack event */
723 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
725 struct hci_event_hdr *hdr;
726 struct hci_ev_stack_internal *ev;
729 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
733 hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
734 hdr->evt = HCI_EV_STACK_INTERNAL;
735 hdr->plen = sizeof(*ev) + dlen;
737 ev = skb_put(skb, sizeof(*ev) + dlen);
739 memcpy(ev->data, data, dlen);
741 bt_cb(skb)->incoming = 1;
742 __net_timestamp(skb);
744 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
745 hci_send_to_sock(hdev, skb);
749 void hci_sock_dev_event(struct hci_dev *hdev, int event)
751 BT_DBG("hdev %s event %d", hdev->name, event);
753 if (atomic_read(&monitor_promisc)) {
756 /* Send event to monitor */
757 skb = create_monitor_event(hdev, event);
759 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
760 HCI_SOCK_TRUSTED, NULL);
765 if (event <= HCI_DEV_DOWN) {
766 struct hci_ev_si_device ev;
768 /* Send event to sockets */
770 ev.dev_id = hdev->id;
771 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
774 if (event == HCI_DEV_UNREG) {
777 /* Wake up sockets using this dead device */
778 read_lock(&hci_sk_list.lock);
779 sk_for_each(sk, &hci_sk_list.head) {
780 if (hci_pi(sk)->hdev == hdev) {
782 sk->sk_state_change(sk);
785 read_unlock(&hci_sk_list.lock);
789 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
791 struct hci_mgmt_chan *c;
793 list_for_each_entry(c, &mgmt_chan_list, list) {
794 if (c->channel == channel)
801 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
803 struct hci_mgmt_chan *c;
805 mutex_lock(&mgmt_chan_list_lock);
806 c = __hci_mgmt_chan_find(channel);
807 mutex_unlock(&mgmt_chan_list_lock);
812 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
814 if (c->channel < HCI_CHANNEL_CONTROL)
817 mutex_lock(&mgmt_chan_list_lock);
818 if (__hci_mgmt_chan_find(c->channel)) {
819 mutex_unlock(&mgmt_chan_list_lock);
823 list_add_tail(&c->list, &mgmt_chan_list);
825 mutex_unlock(&mgmt_chan_list_lock);
829 EXPORT_SYMBOL(hci_mgmt_chan_register);
831 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
833 mutex_lock(&mgmt_chan_list_lock);
835 mutex_unlock(&mgmt_chan_list_lock);
837 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
839 static int hci_sock_release(struct socket *sock)
841 struct sock *sk = sock->sk;
842 struct hci_dev *hdev;
845 BT_DBG("sock %p sk %p", sock, sk);
852 switch (hci_pi(sk)->channel) {
853 case HCI_CHANNEL_MONITOR:
854 atomic_dec(&monitor_promisc);
856 case HCI_CHANNEL_RAW:
857 case HCI_CHANNEL_USER:
858 case HCI_CHANNEL_CONTROL:
859 /* Send event to monitor */
860 skb = create_monitor_ctrl_close(sk);
862 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
863 HCI_SOCK_TRUSTED, NULL);
867 hci_sock_free_cookie(sk);
871 bt_sock_unlink(&hci_sk_list, sk);
873 hdev = hci_pi(sk)->hdev;
875 if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
876 !hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
877 /* When releasing a user channel exclusive access,
878 * call hci_dev_do_close directly instead of calling
879 * hci_dev_close to ensure the exclusive access will
880 * be released and the controller brought back down.
882 * The checking of HCI_AUTO_OFF is not needed in this
883 * case since it will have been cleared already when
884 * opening the user channel.
886 * Make sure to also check that we haven't already
887 * unregistered since all the cleanup will have already
888 * been complete and hdev will get released when we put
891 hci_dev_do_close(hdev);
892 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
893 mgmt_index_added(hdev);
896 atomic_dec(&hdev->promisc);
906 static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
911 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
916 err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
918 hci_dev_unlock(hdev);
923 static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
928 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
933 err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
935 hci_dev_unlock(hdev);
940 /* Ioctls that require bound socket */
941 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
944 struct hci_dev *hdev = hci_hdev_from_sock(sk);
947 return PTR_ERR(hdev);
949 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
952 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
955 if (hdev->dev_type != HCI_PRIMARY)
960 if (!capable(CAP_NET_ADMIN))
965 return hci_get_conn_info(hdev, (void __user *)arg);
968 return hci_get_auth_info(hdev, (void __user *)arg);
971 if (!capable(CAP_NET_ADMIN))
973 return hci_sock_reject_list_add(hdev, (void __user *)arg);
976 if (!capable(CAP_NET_ADMIN))
978 return hci_sock_reject_list_del(hdev, (void __user *)arg);
984 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
987 void __user *argp = (void __user *)arg;
988 struct sock *sk = sock->sk;
991 BT_DBG("cmd %x arg %lx", cmd, arg);
993 /* Make sure the cmd is valid before doing anything */
1007 case HCISETLINKMODE:
1012 case HCIGETCONNINFO:
1013 case HCIGETAUTHINFO:
1015 case HCIUNBLOCKADDR:
1018 return -ENOIOCTLCMD;
1023 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1028 /* When calling an ioctl on an unbound raw socket, then ensure
1029 * that the monitor gets informed. Ensure that the resulting event
1030 * is only send once by checking if the cookie exists or not. The
1031 * socket cookie will be only ever generated once for the lifetime
1032 * of a given socket.
1034 if (hci_sock_gen_cookie(sk)) {
1035 struct sk_buff *skb;
1037 /* Perform careful checks before setting the HCI_SOCK_TRUSTED
1038 * flag. Make sure that not only the current task but also
1039 * the socket opener has the required capability, since
1040 * privileged programs can be tricked into making ioctl calls
1041 * on HCI sockets, and the socket should not be marked as
1042 * trusted simply because the ioctl caller is privileged.
1044 if (sk_capable(sk, CAP_NET_ADMIN))
1045 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1047 /* Send event to monitor */
1048 skb = create_monitor_ctrl_open(sk);
1050 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1051 HCI_SOCK_TRUSTED, NULL);
1060 return hci_get_dev_list(argp);
1063 return hci_get_dev_info(argp);
1065 case HCIGETCONNLIST:
1066 return hci_get_conn_list(argp);
1069 if (!capable(CAP_NET_ADMIN))
1071 return hci_dev_open(arg);
1074 if (!capable(CAP_NET_ADMIN))
1076 return hci_dev_close(arg);
1079 if (!capable(CAP_NET_ADMIN))
1081 return hci_dev_reset(arg);
1084 if (!capable(CAP_NET_ADMIN))
1086 return hci_dev_reset_stat(arg);
1093 case HCISETLINKMODE:
1096 if (!capable(CAP_NET_ADMIN))
1098 return hci_dev_cmd(cmd, argp);
1101 return hci_inquiry(argp);
1106 err = hci_sock_bound_ioctl(sk, cmd, arg);
1113 #ifdef CONFIG_COMPAT
1114 static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1122 return hci_sock_ioctl(sock, cmd, arg);
1125 return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1129 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1132 struct sockaddr_hci haddr;
1133 struct sock *sk = sock->sk;
1134 struct hci_dev *hdev = NULL;
1135 struct sk_buff *skb;
1138 BT_DBG("sock %p sk %p", sock, sk);
1143 memset(&haddr, 0, sizeof(haddr));
1144 len = min_t(unsigned int, sizeof(haddr), addr_len);
1145 memcpy(&haddr, addr, len);
1147 if (haddr.hci_family != AF_BLUETOOTH)
1152 /* Allow detaching from dead device and attaching to alive device, if
1153 * the caller wants to re-bind (instead of close) this socket in
1154 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1156 hdev = hci_pi(sk)->hdev;
1157 if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1158 hci_pi(sk)->hdev = NULL;
1159 sk->sk_state = BT_OPEN;
1164 if (sk->sk_state == BT_BOUND) {
1169 switch (haddr.hci_channel) {
1170 case HCI_CHANNEL_RAW:
1171 if (hci_pi(sk)->hdev) {
1176 if (haddr.hci_dev != HCI_DEV_NONE) {
1177 hdev = hci_dev_get(haddr.hci_dev);
1183 atomic_inc(&hdev->promisc);
1186 hci_pi(sk)->channel = haddr.hci_channel;
1188 if (!hci_sock_gen_cookie(sk)) {
1189 /* In the case when a cookie has already been assigned,
1190 * then there has been already an ioctl issued against
1191 * an unbound socket and with that triggered an open
1192 * notification. Send a close notification first to
1193 * allow the state transition to bounded.
1195 skb = create_monitor_ctrl_close(sk);
1197 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1198 HCI_SOCK_TRUSTED, NULL);
1203 if (capable(CAP_NET_ADMIN))
1204 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1206 hci_pi(sk)->hdev = hdev;
1208 /* Send event to monitor */
1209 skb = create_monitor_ctrl_open(sk);
1211 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1212 HCI_SOCK_TRUSTED, NULL);
1217 case HCI_CHANNEL_USER:
1218 if (hci_pi(sk)->hdev) {
1223 if (haddr.hci_dev == HCI_DEV_NONE) {
1228 if (!capable(CAP_NET_ADMIN)) {
1233 hdev = hci_dev_get(haddr.hci_dev);
1239 if (test_bit(HCI_INIT, &hdev->flags) ||
1240 hci_dev_test_flag(hdev, HCI_SETUP) ||
1241 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1242 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1243 test_bit(HCI_UP, &hdev->flags))) {
1249 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1255 mgmt_index_removed(hdev);
1257 err = hci_dev_open(hdev->id);
1259 if (err == -EALREADY) {
1260 /* In case the transport is already up and
1261 * running, clear the error here.
1263 * This can happen when opening a user
1264 * channel and HCI_AUTO_OFF grace period
1269 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1270 mgmt_index_added(hdev);
1276 hci_pi(sk)->channel = haddr.hci_channel;
1278 if (!hci_sock_gen_cookie(sk)) {
1279 /* In the case when a cookie has already been assigned,
1280 * this socket will transition from a raw socket into
1281 * a user channel socket. For a clean transition, send
1282 * the close notification first.
1284 skb = create_monitor_ctrl_close(sk);
1286 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1287 HCI_SOCK_TRUSTED, NULL);
1292 /* The user channel is restricted to CAP_NET_ADMIN
1293 * capabilities and with that implicitly trusted.
1295 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1297 hci_pi(sk)->hdev = hdev;
1299 /* Send event to monitor */
1300 skb = create_monitor_ctrl_open(sk);
1302 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1303 HCI_SOCK_TRUSTED, NULL);
1307 atomic_inc(&hdev->promisc);
1310 case HCI_CHANNEL_MONITOR:
1311 if (haddr.hci_dev != HCI_DEV_NONE) {
1316 if (!capable(CAP_NET_RAW)) {
1321 hci_pi(sk)->channel = haddr.hci_channel;
1323 /* The monitor interface is restricted to CAP_NET_RAW
1324 * capabilities and with that implicitly trusted.
1326 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1328 send_monitor_note(sk, "Linux version %s (%s)",
1329 init_utsname()->release,
1330 init_utsname()->machine);
1331 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1332 BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1333 send_monitor_replay(sk);
1334 send_monitor_control_replay(sk);
1336 atomic_inc(&monitor_promisc);
1339 case HCI_CHANNEL_LOGGING:
1340 if (haddr.hci_dev != HCI_DEV_NONE) {
1345 if (!capable(CAP_NET_ADMIN)) {
1350 hci_pi(sk)->channel = haddr.hci_channel;
1354 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1359 if (haddr.hci_dev != HCI_DEV_NONE) {
1364 /* Users with CAP_NET_ADMIN capabilities are allowed
1365 * access to all management commands and events. For
1366 * untrusted users the interface is restricted and
1367 * also only untrusted events are sent.
1369 if (capable(CAP_NET_ADMIN))
1370 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1372 hci_pi(sk)->channel = haddr.hci_channel;
1374 /* At the moment the index and unconfigured index events
1375 * are enabled unconditionally. Setting them on each
1376 * socket when binding keeps this functionality. They
1377 * however might be cleared later and then sending of these
1378 * events will be disabled, but that is then intentional.
1380 * This also enables generic events that are safe to be
1381 * received by untrusted users. Example for such events
1382 * are changes to settings, class of device, name etc.
1384 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1385 if (!hci_sock_gen_cookie(sk)) {
1386 /* In the case when a cookie has already been
1387 * assigned, this socket will transition from
1388 * a raw socket into a control socket. To
1389 * allow for a clean transition, send the
1390 * close notification first.
1392 skb = create_monitor_ctrl_close(sk);
1394 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1395 HCI_SOCK_TRUSTED, NULL);
1400 /* Send event to monitor */
1401 skb = create_monitor_ctrl_open(sk);
1403 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1404 HCI_SOCK_TRUSTED, NULL);
1408 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1409 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1410 hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1411 hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1412 hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1413 hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1418 /* Default MTU to HCI_MAX_FRAME_SIZE if not set */
1419 if (!hci_pi(sk)->mtu)
1420 hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE;
1422 sk->sk_state = BT_BOUND;
1429 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1432 struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1433 struct sock *sk = sock->sk;
1434 struct hci_dev *hdev;
1437 BT_DBG("sock %p sk %p", sock, sk);
1444 hdev = hci_hdev_from_sock(sk);
1446 err = PTR_ERR(hdev);
1450 haddr->hci_family = AF_BLUETOOTH;
1451 haddr->hci_dev = hdev->id;
1452 haddr->hci_channel= hci_pi(sk)->channel;
1453 err = sizeof(*haddr);
1460 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1461 struct sk_buff *skb)
1463 __u8 mask = hci_pi(sk)->cmsg_mask;
1465 if (mask & HCI_CMSG_DIR) {
1466 int incoming = bt_cb(skb)->incoming;
1467 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1471 if (mask & HCI_CMSG_TSTAMP) {
1472 #ifdef CONFIG_COMPAT
1473 struct old_timeval32 ctv;
1475 struct __kernel_old_timeval tv;
1479 skb_get_timestamp(skb, &tv);
1483 #ifdef CONFIG_COMPAT
1484 if (!COMPAT_USE_64BIT_TIME &&
1485 (msg->msg_flags & MSG_CMSG_COMPAT)) {
1486 ctv.tv_sec = tv.tv_sec;
1487 ctv.tv_usec = tv.tv_usec;
1493 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1497 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1498 size_t len, int flags)
1500 struct sock *sk = sock->sk;
1501 struct sk_buff *skb;
1503 unsigned int skblen;
1505 BT_DBG("sock %p, sk %p", sock, sk);
1507 if (flags & MSG_OOB)
1510 if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1513 if (sk->sk_state == BT_CLOSED)
1516 skb = skb_recv_datagram(sk, flags, &err);
1523 msg->msg_flags |= MSG_TRUNC;
1527 skb_reset_transport_header(skb);
1528 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1530 switch (hci_pi(sk)->channel) {
1531 case HCI_CHANNEL_RAW:
1532 hci_sock_cmsg(sk, msg, skb);
1534 case HCI_CHANNEL_USER:
1535 case HCI_CHANNEL_MONITOR:
1536 sock_recv_timestamp(msg, sk, skb);
1539 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1540 sock_recv_timestamp(msg, sk, skb);
1544 skb_free_datagram(sk, skb);
1546 if (flags & MSG_TRUNC)
1549 return err ? : copied;
1552 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1553 struct sk_buff *skb)
1556 struct mgmt_hdr *hdr;
1557 u16 opcode, index, len;
1558 struct hci_dev *hdev = NULL;
1559 const struct hci_mgmt_handler *handler;
1560 bool var_len, no_hdev;
1563 BT_DBG("got %d bytes", skb->len);
1565 if (skb->len < sizeof(*hdr))
1568 hdr = (void *)skb->data;
1569 opcode = __le16_to_cpu(hdr->opcode);
1570 index = __le16_to_cpu(hdr->index);
1571 len = __le16_to_cpu(hdr->len);
1573 if (len != skb->len - sizeof(*hdr)) {
1579 if (opcode >= TIZEN_OP_CODE_BASE) {
1580 u16 tizen_opcode_index = opcode - TIZEN_OP_CODE_BASE;
1581 if (tizen_opcode_index >= chan->tizen_handler_count ||
1582 chan->tizen_handlers[tizen_opcode_index].func == NULL) {
1583 BT_DBG("Unknown op %u", opcode);
1584 err = mgmt_cmd_status(sk, index, opcode,
1585 MGMT_STATUS_UNKNOWN_COMMAND);
1589 handler = &chan->tizen_handlers[tizen_opcode_index];
1594 if (chan->channel == HCI_CHANNEL_CONTROL) {
1595 struct sk_buff *cmd;
1597 /* Send event to monitor */
1598 cmd = create_monitor_ctrl_command(sk, index, opcode, len,
1599 skb->data + sizeof(*hdr));
1601 hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd,
1602 HCI_SOCK_TRUSTED, NULL);
1607 if (opcode >= chan->handler_count ||
1608 chan->handlers[opcode].func == NULL) {
1609 BT_DBG("Unknown op %u", opcode);
1610 err = mgmt_cmd_status(sk, index, opcode,
1611 MGMT_STATUS_UNKNOWN_COMMAND);
1615 handler = &chan->handlers[opcode];
1620 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1621 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1622 err = mgmt_cmd_status(sk, index, opcode,
1623 MGMT_STATUS_PERMISSION_DENIED);
1627 if (index != MGMT_INDEX_NONE) {
1628 hdev = hci_dev_get(index);
1630 err = mgmt_cmd_status(sk, index, opcode,
1631 MGMT_STATUS_INVALID_INDEX);
1635 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1636 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1637 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1638 err = mgmt_cmd_status(sk, index, opcode,
1639 MGMT_STATUS_INVALID_INDEX);
1643 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1644 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1645 err = mgmt_cmd_status(sk, index, opcode,
1646 MGMT_STATUS_INVALID_INDEX);
1651 if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
1652 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1653 if (no_hdev != !hdev) {
1654 err = mgmt_cmd_status(sk, index, opcode,
1655 MGMT_STATUS_INVALID_INDEX);
1660 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1661 if ((var_len && len < handler->data_len) ||
1662 (!var_len && len != handler->data_len)) {
1663 err = mgmt_cmd_status(sk, index, opcode,
1664 MGMT_STATUS_INVALID_PARAMS);
1668 if (hdev && chan->hdev_init)
1669 chan->hdev_init(sk, hdev);
1671 cp = skb->data + sizeof(*hdr);
1673 err = handler->func(sk, hdev, cp, len);
1686 static int hci_logging_frame(struct sock *sk, struct sk_buff *skb,
1689 struct hci_mon_hdr *hdr;
1690 struct hci_dev *hdev;
1694 /* The logging frame consists at minimum of the standard header,
1695 * the priority byte, the ident length byte and at least one string
1696 * terminator NUL byte. Anything shorter are invalid packets.
1698 if (skb->len < sizeof(*hdr) + 3)
1701 hdr = (void *)skb->data;
1703 if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr))
1706 if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1707 __u8 priority = skb->data[sizeof(*hdr)];
1708 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1710 /* Only the priorities 0-7 are valid and with that any other
1711 * value results in an invalid packet.
1713 * The priority byte is followed by an ident length byte and
1714 * the NUL terminated ident string. Check that the ident
1715 * length is not overflowing the packet and also that the
1716 * ident string itself is NUL terminated. In case the ident
1717 * length is zero, the length value actually doubles as NUL
1718 * terminator identifier.
1720 * The message follows the ident string (if present) and
1721 * must be NUL terminated. Otherwise it is not a valid packet.
1723 if (priority > 7 || skb->data[skb->len - 1] != 0x00 ||
1724 ident_len > skb->len - sizeof(*hdr) - 3 ||
1725 skb->data[sizeof(*hdr) + ident_len + 1] != 0x00)
1731 index = __le16_to_cpu(hdr->index);
1733 if (index != MGMT_INDEX_NONE) {
1734 hdev = hci_dev_get(index);
1741 hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1743 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1752 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1755 struct sock *sk = sock->sk;
1756 struct hci_mgmt_chan *chan;
1757 struct hci_dev *hdev;
1758 struct sk_buff *skb;
1760 const unsigned int flags = msg->msg_flags;
1762 BT_DBG("sock %p sk %p", sock, sk);
1764 if (flags & MSG_OOB)
1767 if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
1770 if (len < 4 || len > hci_pi(sk)->mtu)
1773 skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0);
1775 return PTR_ERR(skb);
1779 switch (hci_pi(sk)->channel) {
1780 case HCI_CHANNEL_RAW:
1781 case HCI_CHANNEL_USER:
1783 case HCI_CHANNEL_MONITOR:
1786 case HCI_CHANNEL_LOGGING:
1787 err = hci_logging_frame(sk, skb, flags);
1790 mutex_lock(&mgmt_chan_list_lock);
1791 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1793 err = hci_mgmt_cmd(chan, sk, skb);
1797 mutex_unlock(&mgmt_chan_list_lock);
1801 hdev = hci_hdev_from_sock(sk);
1803 err = PTR_ERR(hdev);
1807 if (!test_bit(HCI_UP, &hdev->flags)) {
1812 hci_skb_pkt_type(skb) = skb->data[0];
1815 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1816 /* No permission check is needed for user channel
1817 * since that gets enforced when binding the socket.
1819 * However check that the packet type is valid.
1821 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1822 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1823 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1824 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1829 skb_queue_tail(&hdev->raw_q, skb);
1830 queue_work(hdev->workqueue, &hdev->tx_work);
1831 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1832 u16 opcode = get_unaligned_le16(skb->data);
1833 u16 ogf = hci_opcode_ogf(opcode);
1834 u16 ocf = hci_opcode_ocf(opcode);
1836 if (((ogf > HCI_SFLT_MAX_OGF) ||
1837 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1838 &hci_sec_filter.ocf_mask[ogf])) &&
1839 !capable(CAP_NET_RAW)) {
1844 /* Since the opcode has already been extracted here, store
1845 * a copy of the value for later use by the drivers.
1847 hci_skb_opcode(skb) = opcode;
1850 skb_queue_tail(&hdev->raw_q, skb);
1851 queue_work(hdev->workqueue, &hdev->tx_work);
1853 /* Stand-alone HCI commands must be flagged as
1854 * single-command requests.
1856 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1858 skb_queue_tail(&hdev->cmd_q, skb);
1859 queue_work(hdev->workqueue, &hdev->cmd_work);
1862 if (!capable(CAP_NET_RAW)) {
1867 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1868 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1869 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1874 skb_queue_tail(&hdev->raw_q, skb);
1875 queue_work(hdev->workqueue, &hdev->tx_work);
1889 static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
1890 sockptr_t optval, unsigned int len)
1892 struct hci_ufilter uf = { .opcode = 0 };
1893 struct sock *sk = sock->sk;
1894 int err = 0, opt = 0;
1896 BT_DBG("sk %p, opt %d", sk, optname);
1900 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1907 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1913 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1915 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1918 case HCI_TIME_STAMP:
1919 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1925 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1927 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1932 struct hci_filter *f = &hci_pi(sk)->filter;
1934 uf.type_mask = f->type_mask;
1935 uf.opcode = f->opcode;
1936 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1937 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1940 len = min_t(unsigned int, len, sizeof(uf));
1941 if (copy_from_sockptr(&uf, optval, len)) {
1946 if (!capable(CAP_NET_RAW)) {
1947 uf.type_mask &= hci_sec_filter.type_mask;
1948 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1949 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1953 struct hci_filter *f = &hci_pi(sk)->filter;
1955 f->type_mask = uf.type_mask;
1956 f->opcode = uf.opcode;
1957 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1958 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1972 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1973 sockptr_t optval, unsigned int len)
1975 struct sock *sk = sock->sk;
1979 BT_DBG("sk %p, opt %d", sk, optname);
1981 if (level == SOL_HCI)
1982 return hci_sock_setsockopt_old(sock, level, optname, optval,
1985 if (level != SOL_BLUETOOTH)
1986 return -ENOPROTOOPT;
1993 switch (hci_pi(sk)->channel) {
1994 /* Don't allow changing MTU for channels that are meant for HCI
1997 case HCI_CHANNEL_RAW:
1998 case HCI_CHANNEL_USER:
2003 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
2008 hci_pi(sk)->mtu = opt;
2021 static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname,
2022 char __user *optval, int __user *optlen)
2024 struct hci_ufilter uf;
2025 struct sock *sk = sock->sk;
2026 int len, opt, err = 0;
2028 BT_DBG("sk %p, opt %d", sk, optname);
2030 if (get_user(len, optlen))
2035 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
2042 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
2047 if (put_user(opt, optval))
2051 case HCI_TIME_STAMP:
2052 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
2057 if (put_user(opt, optval))
2063 struct hci_filter *f = &hci_pi(sk)->filter;
2065 memset(&uf, 0, sizeof(uf));
2066 uf.type_mask = f->type_mask;
2067 uf.opcode = f->opcode;
2068 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2069 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2072 len = min_t(unsigned int, len, sizeof(uf));
2073 if (copy_to_user(optval, &uf, len))
2087 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
2088 char __user *optval, int __user *optlen)
2090 struct sock *sk = sock->sk;
2093 BT_DBG("sk %p, opt %d", sk, optname);
2095 if (level == SOL_HCI)
2096 return hci_sock_getsockopt_old(sock, level, optname, optval,
2099 if (level != SOL_BLUETOOTH)
2100 return -ENOPROTOOPT;
2107 if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval))
2120 static void hci_sock_destruct(struct sock *sk)
2123 skb_queue_purge(&sk->sk_receive_queue);
2124 skb_queue_purge(&sk->sk_write_queue);
2127 static const struct proto_ops hci_sock_ops = {
2128 .family = PF_BLUETOOTH,
2129 .owner = THIS_MODULE,
2130 .release = hci_sock_release,
2131 .bind = hci_sock_bind,
2132 .getname = hci_sock_getname,
2133 .sendmsg = hci_sock_sendmsg,
2134 .recvmsg = hci_sock_recvmsg,
2135 .ioctl = hci_sock_ioctl,
2136 #ifdef CONFIG_COMPAT
2137 .compat_ioctl = hci_sock_compat_ioctl,
2139 .poll = datagram_poll,
2140 .listen = sock_no_listen,
2141 .shutdown = sock_no_shutdown,
2142 .setsockopt = hci_sock_setsockopt,
2143 .getsockopt = hci_sock_getsockopt,
2144 .connect = sock_no_connect,
2145 .socketpair = sock_no_socketpair,
2146 .accept = sock_no_accept,
2147 .mmap = sock_no_mmap
2150 static struct proto hci_sk_proto = {
2152 .owner = THIS_MODULE,
2153 .obj_size = sizeof(struct hci_pinfo)
2156 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2161 BT_DBG("sock %p", sock);
2163 if (sock->type != SOCK_RAW)
2164 return -ESOCKTNOSUPPORT;
2166 sock->ops = &hci_sock_ops;
2168 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2172 sock_init_data(sock, sk);
2174 sock_reset_flag(sk, SOCK_ZAPPED);
2176 sk->sk_protocol = protocol;
2178 sock->state = SS_UNCONNECTED;
2179 sk->sk_state = BT_OPEN;
2180 sk->sk_destruct = hci_sock_destruct;
2182 bt_sock_link(&hci_sk_list, sk);
2186 static const struct net_proto_family hci_sock_family_ops = {
2187 .family = PF_BLUETOOTH,
2188 .owner = THIS_MODULE,
2189 .create = hci_sock_create,
2192 int __init hci_sock_init(void)
2196 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2198 err = proto_register(&hci_sk_proto, 0);
2202 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2204 BT_ERR("HCI socket registration failed");
2208 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2210 BT_ERR("Failed to create HCI proc file");
2211 bt_sock_unregister(BTPROTO_HCI);
2215 BT_INFO("HCI socket layer initialized");
2220 proto_unregister(&hci_sk_proto);
2224 void hci_sock_cleanup(void)
2226 bt_procfs_cleanup(&init_net, "hci");
2227 bt_sock_unregister(BTPROTO_HCI);
2228 proto_unregister(&hci_sk_proto);