2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
26 #include <linux/compat.h>
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
37 #include "mgmt_util.h"
39 static LIST_HEAD(mgmt_chan_list);
40 static DEFINE_MUTEX(mgmt_chan_list_lock);
42 static DEFINE_IDA(sock_cookie_ida);
44 static atomic_t monitor_promisc = ATOMIC_INIT(0);
46 /* ----- HCI socket interface ----- */
49 #define hci_pi(sk) ((struct hci_pinfo *) sk)
54 struct hci_filter filter;
56 unsigned short channel;
59 char comm[TASK_COMM_LEN];
63 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
65 struct hci_dev *hdev = hci_pi(sk)->hdev;
68 return ERR_PTR(-EBADFD);
69 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
70 return ERR_PTR(-EPIPE);
74 void hci_sock_set_flag(struct sock *sk, int nr)
76 set_bit(nr, &hci_pi(sk)->flags);
79 void hci_sock_clear_flag(struct sock *sk, int nr)
81 clear_bit(nr, &hci_pi(sk)->flags);
84 int hci_sock_test_flag(struct sock *sk, int nr)
86 return test_bit(nr, &hci_pi(sk)->flags);
89 unsigned short hci_sock_get_channel(struct sock *sk)
91 return hci_pi(sk)->channel;
94 u32 hci_sock_get_cookie(struct sock *sk)
96 return hci_pi(sk)->cookie;
99 static bool hci_sock_gen_cookie(struct sock *sk)
101 int id = hci_pi(sk)->cookie;
104 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
108 hci_pi(sk)->cookie = id;
109 get_task_comm(hci_pi(sk)->comm, current);
116 static void hci_sock_free_cookie(struct sock *sk)
118 int id = hci_pi(sk)->cookie;
121 hci_pi(sk)->cookie = 0xffffffff;
122 ida_simple_remove(&sock_cookie_ida, id);
126 static inline int hci_test_bit(int nr, const void *addr)
128 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
131 /* Security filter */
132 #define HCI_SFLT_MAX_OGF 5
134 struct hci_sec_filter {
137 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
140 static const struct hci_sec_filter hci_sec_filter = {
144 { 0x1000d9fe, 0x0000b00c },
149 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
150 /* OGF_LINK_POLICY */
151 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
153 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
155 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
156 /* OGF_STATUS_PARAM */
157 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
161 static struct bt_sock_list hci_sk_list = {
162 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
165 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
167 struct hci_filter *flt;
168 int flt_type, flt_event;
171 flt = &hci_pi(sk)->filter;
173 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
175 if (!test_bit(flt_type, &flt->type_mask))
178 /* Extra filter for event packets only */
179 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
182 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
184 if (!hci_test_bit(flt_event, &flt->event_mask))
187 /* Check filter only when opcode is set */
191 if (flt_event == HCI_EV_CMD_COMPLETE &&
192 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
195 if (flt_event == HCI_EV_CMD_STATUS &&
196 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
202 /* Send frame to RAW socket */
203 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
206 struct sk_buff *skb_copy = NULL;
208 BT_DBG("hdev %p len %d", hdev, skb->len);
210 read_lock(&hci_sk_list.lock);
212 sk_for_each(sk, &hci_sk_list.head) {
213 struct sk_buff *nskb;
215 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
218 /* Don't send frame to the socket it came from */
222 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
223 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
224 hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
225 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
226 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
227 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
229 if (is_filtered_packet(sk, skb))
231 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
232 if (!bt_cb(skb)->incoming)
234 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
235 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
236 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
237 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
240 /* Don't send frame to other channel types */
245 /* Create a private copy with headroom */
246 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
250 /* Put type byte before the data */
251 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
254 nskb = skb_clone(skb_copy, GFP_ATOMIC);
258 if (sock_queue_rcv_skb(sk, nskb))
262 read_unlock(&hci_sk_list.lock);
267 /* Send frame to sockets with specific channel */
268 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
269 int flag, struct sock *skip_sk)
273 BT_DBG("channel %u len %d", channel, skb->len);
275 sk_for_each(sk, &hci_sk_list.head) {
276 struct sk_buff *nskb;
278 /* Ignore socket without the flag set */
279 if (!hci_sock_test_flag(sk, flag))
282 /* Skip the original socket */
286 if (sk->sk_state != BT_BOUND)
289 if (hci_pi(sk)->channel != channel)
292 nskb = skb_clone(skb, GFP_ATOMIC);
296 if (sock_queue_rcv_skb(sk, nskb))
302 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
303 int flag, struct sock *skip_sk)
305 read_lock(&hci_sk_list.lock);
306 __hci_send_to_channel(channel, skb, flag, skip_sk);
307 read_unlock(&hci_sk_list.lock);
310 /* Send frame to monitor socket */
311 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
313 struct sk_buff *skb_copy = NULL;
314 struct hci_mon_hdr *hdr;
317 if (!atomic_read(&monitor_promisc))
320 BT_DBG("hdev %p len %d", hdev, skb->len);
322 switch (hci_skb_pkt_type(skb)) {
323 case HCI_COMMAND_PKT:
324 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
327 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
329 case HCI_ACLDATA_PKT:
330 if (bt_cb(skb)->incoming)
331 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
333 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
335 case HCI_SCODATA_PKT:
336 if (bt_cb(skb)->incoming)
337 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
339 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
341 case HCI_ISODATA_PKT:
342 if (bt_cb(skb)->incoming)
343 opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
345 opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
348 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
354 /* Create a private copy with headroom */
355 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
359 /* Put header before the data */
360 hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
361 hdr->opcode = opcode;
362 hdr->index = cpu_to_le16(hdev->id);
363 hdr->len = cpu_to_le16(skb->len);
365 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
366 HCI_SOCK_TRUSTED, NULL);
370 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
371 void *data, u16 data_len, ktime_t tstamp,
372 int flag, struct sock *skip_sk)
378 index = cpu_to_le16(hdev->id);
380 index = cpu_to_le16(MGMT_INDEX_NONE);
382 read_lock(&hci_sk_list.lock);
384 sk_for_each(sk, &hci_sk_list.head) {
385 struct hci_mon_hdr *hdr;
388 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
391 /* Ignore socket without the flag set */
392 if (!hci_sock_test_flag(sk, flag))
395 /* Skip the original socket */
399 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
403 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
404 put_unaligned_le16(event, skb_put(skb, 2));
407 skb_put_data(skb, data, data_len);
409 skb->tstamp = tstamp;
411 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
412 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
414 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
416 __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
417 HCI_SOCK_TRUSTED, NULL);
421 read_unlock(&hci_sk_list.lock);
424 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
426 struct hci_mon_hdr *hdr;
427 struct hci_mon_new_index *ni;
428 struct hci_mon_index_info *ii;
434 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
438 ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
439 ni->type = hdev->dev_type;
441 bacpy(&ni->bdaddr, &hdev->bdaddr);
442 memcpy(ni->name, hdev->name, 8);
444 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
448 skb = bt_skb_alloc(0, GFP_ATOMIC);
452 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
456 if (hdev->manufacturer == 0xffff)
461 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
465 ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
466 bacpy(&ii->bdaddr, &hdev->bdaddr);
467 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
469 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
473 skb = bt_skb_alloc(0, GFP_ATOMIC);
477 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
481 skb = bt_skb_alloc(0, GFP_ATOMIC);
485 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
492 __net_timestamp(skb);
494 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
495 hdr->opcode = opcode;
496 hdr->index = cpu_to_le16(hdev->id);
497 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
502 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
504 struct hci_mon_hdr *hdr;
510 /* No message needed when cookie is not present */
511 if (!hci_pi(sk)->cookie)
514 switch (hci_pi(sk)->channel) {
515 case HCI_CHANNEL_RAW:
517 ver[0] = BT_SUBSYS_VERSION;
518 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
520 case HCI_CHANNEL_USER:
522 ver[0] = BT_SUBSYS_VERSION;
523 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
525 case HCI_CHANNEL_CONTROL:
527 mgmt_fill_version_info(ver);
530 /* No message for unsupported format */
534 skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
538 flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
540 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
541 put_unaligned_le16(format, skb_put(skb, 2));
542 skb_put_data(skb, ver, sizeof(ver));
543 put_unaligned_le32(flags, skb_put(skb, 4));
544 skb_put_u8(skb, TASK_COMM_LEN);
545 skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
547 __net_timestamp(skb);
549 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
550 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
551 if (hci_pi(sk)->hdev)
552 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
554 hdr->index = cpu_to_le16(HCI_DEV_NONE);
555 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
560 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
562 struct hci_mon_hdr *hdr;
565 /* No message needed when cookie is not present */
566 if (!hci_pi(sk)->cookie)
569 switch (hci_pi(sk)->channel) {
570 case HCI_CHANNEL_RAW:
571 case HCI_CHANNEL_USER:
572 case HCI_CHANNEL_CONTROL:
575 /* No message for unsupported format */
579 skb = bt_skb_alloc(4, GFP_ATOMIC);
583 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
585 __net_timestamp(skb);
587 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
588 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
589 if (hci_pi(sk)->hdev)
590 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
592 hdr->index = cpu_to_le16(HCI_DEV_NONE);
593 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
598 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
602 struct hci_mon_hdr *hdr;
605 skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
609 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
610 put_unaligned_le16(opcode, skb_put(skb, 2));
613 skb_put_data(skb, buf, len);
615 __net_timestamp(skb);
617 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
618 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
619 hdr->index = cpu_to_le16(index);
620 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
625 static void __printf(2, 3)
626 send_monitor_note(struct sock *sk, const char *fmt, ...)
629 struct hci_mon_hdr *hdr;
634 len = vsnprintf(NULL, 0, fmt, args);
637 skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
642 vsprintf(skb_put(skb, len), fmt, args);
643 *(u8 *)skb_put(skb, 1) = 0;
646 __net_timestamp(skb);
648 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
649 hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
650 hdr->index = cpu_to_le16(HCI_DEV_NONE);
651 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
653 if (sock_queue_rcv_skb(sk, skb))
657 static void send_monitor_replay(struct sock *sk)
659 struct hci_dev *hdev;
661 read_lock(&hci_dev_list_lock);
663 list_for_each_entry(hdev, &hci_dev_list, list) {
666 skb = create_monitor_event(hdev, HCI_DEV_REG);
670 if (sock_queue_rcv_skb(sk, skb))
673 if (!test_bit(HCI_RUNNING, &hdev->flags))
676 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
680 if (sock_queue_rcv_skb(sk, skb))
683 if (test_bit(HCI_UP, &hdev->flags))
684 skb = create_monitor_event(hdev, HCI_DEV_UP);
685 else if (hci_dev_test_flag(hdev, HCI_SETUP))
686 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
691 if (sock_queue_rcv_skb(sk, skb))
696 read_unlock(&hci_dev_list_lock);
699 static void send_monitor_control_replay(struct sock *mon_sk)
703 read_lock(&hci_sk_list.lock);
705 sk_for_each(sk, &hci_sk_list.head) {
708 skb = create_monitor_ctrl_open(sk);
712 if (sock_queue_rcv_skb(mon_sk, skb))
716 read_unlock(&hci_sk_list.lock);
719 /* Generate internal stack event */
720 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
722 struct hci_event_hdr *hdr;
723 struct hci_ev_stack_internal *ev;
726 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
730 hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
731 hdr->evt = HCI_EV_STACK_INTERNAL;
732 hdr->plen = sizeof(*ev) + dlen;
734 ev = skb_put(skb, sizeof(*ev) + dlen);
736 memcpy(ev->data, data, dlen);
738 bt_cb(skb)->incoming = 1;
739 __net_timestamp(skb);
741 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
742 hci_send_to_sock(hdev, skb);
746 void hci_sock_dev_event(struct hci_dev *hdev, int event)
748 BT_DBG("hdev %s event %d", hdev->name, event);
750 if (atomic_read(&monitor_promisc)) {
753 /* Send event to monitor */
754 skb = create_monitor_event(hdev, event);
756 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
757 HCI_SOCK_TRUSTED, NULL);
762 if (event <= HCI_DEV_DOWN) {
763 struct hci_ev_si_device ev;
765 /* Send event to sockets */
767 ev.dev_id = hdev->id;
768 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
771 if (event == HCI_DEV_UNREG) {
774 /* Wake up sockets using this dead device */
775 read_lock(&hci_sk_list.lock);
776 sk_for_each(sk, &hci_sk_list.head) {
777 if (hci_pi(sk)->hdev == hdev) {
779 sk->sk_state_change(sk);
782 read_unlock(&hci_sk_list.lock);
786 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
788 struct hci_mgmt_chan *c;
790 list_for_each_entry(c, &mgmt_chan_list, list) {
791 if (c->channel == channel)
798 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
800 struct hci_mgmt_chan *c;
802 mutex_lock(&mgmt_chan_list_lock);
803 c = __hci_mgmt_chan_find(channel);
804 mutex_unlock(&mgmt_chan_list_lock);
809 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
811 if (c->channel < HCI_CHANNEL_CONTROL)
814 mutex_lock(&mgmt_chan_list_lock);
815 if (__hci_mgmt_chan_find(c->channel)) {
816 mutex_unlock(&mgmt_chan_list_lock);
820 list_add_tail(&c->list, &mgmt_chan_list);
822 mutex_unlock(&mgmt_chan_list_lock);
826 EXPORT_SYMBOL(hci_mgmt_chan_register);
828 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
830 mutex_lock(&mgmt_chan_list_lock);
832 mutex_unlock(&mgmt_chan_list_lock);
834 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
836 static int hci_sock_release(struct socket *sock)
838 struct sock *sk = sock->sk;
839 struct hci_dev *hdev;
842 BT_DBG("sock %p sk %p", sock, sk);
849 switch (hci_pi(sk)->channel) {
850 case HCI_CHANNEL_MONITOR:
851 atomic_dec(&monitor_promisc);
853 case HCI_CHANNEL_RAW:
854 case HCI_CHANNEL_USER:
855 case HCI_CHANNEL_CONTROL:
856 /* Send event to monitor */
857 skb = create_monitor_ctrl_close(sk);
859 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
860 HCI_SOCK_TRUSTED, NULL);
864 hci_sock_free_cookie(sk);
868 bt_sock_unlink(&hci_sk_list, sk);
870 hdev = hci_pi(sk)->hdev;
872 if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
873 !hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
874 /* When releasing a user channel exclusive access,
875 * call hci_dev_do_close directly instead of calling
876 * hci_dev_close to ensure the exclusive access will
877 * be released and the controller brought back down.
879 * The checking of HCI_AUTO_OFF is not needed in this
880 * case since it will have been cleared already when
881 * opening the user channel.
883 * Make sure to also check that we haven't already
884 * unregistered since all the cleanup will have already
885 * been complete and hdev will get released when we put
888 hci_dev_do_close(hdev);
889 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
890 mgmt_index_added(hdev);
893 atomic_dec(&hdev->promisc);
903 static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
908 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
913 err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
915 hci_dev_unlock(hdev);
920 static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
925 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
930 err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
932 hci_dev_unlock(hdev);
937 /* Ioctls that require bound socket */
938 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
941 struct hci_dev *hdev = hci_hdev_from_sock(sk);
944 return PTR_ERR(hdev);
946 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
949 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
952 if (hdev->dev_type != HCI_PRIMARY)
957 if (!capable(CAP_NET_ADMIN))
962 return hci_get_conn_info(hdev, (void __user *)arg);
965 return hci_get_auth_info(hdev, (void __user *)arg);
968 if (!capable(CAP_NET_ADMIN))
970 return hci_sock_reject_list_add(hdev, (void __user *)arg);
973 if (!capable(CAP_NET_ADMIN))
975 return hci_sock_reject_list_del(hdev, (void __user *)arg);
981 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
984 void __user *argp = (void __user *)arg;
985 struct sock *sk = sock->sk;
988 BT_DBG("cmd %x arg %lx", cmd, arg);
990 /* Make sure the cmd is valid before doing anything */
1004 case HCISETLINKMODE:
1009 case HCIGETCONNINFO:
1010 case HCIGETAUTHINFO:
1012 case HCIUNBLOCKADDR:
1015 return -ENOIOCTLCMD;
1020 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1025 /* When calling an ioctl on an unbound raw socket, then ensure
1026 * that the monitor gets informed. Ensure that the resulting event
1027 * is only send once by checking if the cookie exists or not. The
1028 * socket cookie will be only ever generated once for the lifetime
1029 * of a given socket.
1031 if (hci_sock_gen_cookie(sk)) {
1032 struct sk_buff *skb;
1034 /* Perform careful checks before setting the HCI_SOCK_TRUSTED
1035 * flag. Make sure that not only the current task but also
1036 * the socket opener has the required capability, since
1037 * privileged programs can be tricked into making ioctl calls
1038 * on HCI sockets, and the socket should not be marked as
1039 * trusted simply because the ioctl caller is privileged.
1041 if (sk_capable(sk, CAP_NET_ADMIN))
1042 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1044 /* Send event to monitor */
1045 skb = create_monitor_ctrl_open(sk);
1047 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1048 HCI_SOCK_TRUSTED, NULL);
1057 return hci_get_dev_list(argp);
1060 return hci_get_dev_info(argp);
1062 case HCIGETCONNLIST:
1063 return hci_get_conn_list(argp);
1066 if (!capable(CAP_NET_ADMIN))
1068 return hci_dev_open(arg);
1071 if (!capable(CAP_NET_ADMIN))
1073 return hci_dev_close(arg);
1076 if (!capable(CAP_NET_ADMIN))
1078 return hci_dev_reset(arg);
1081 if (!capable(CAP_NET_ADMIN))
1083 return hci_dev_reset_stat(arg);
1090 case HCISETLINKMODE:
1093 if (!capable(CAP_NET_ADMIN))
1095 return hci_dev_cmd(cmd, argp);
1098 return hci_inquiry(argp);
1103 err = hci_sock_bound_ioctl(sk, cmd, arg);
1110 #ifdef CONFIG_COMPAT
1111 static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1119 return hci_sock_ioctl(sock, cmd, arg);
1122 return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1126 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1129 struct sockaddr_hci haddr;
1130 struct sock *sk = sock->sk;
1131 struct hci_dev *hdev = NULL;
1132 struct sk_buff *skb;
1135 BT_DBG("sock %p sk %p", sock, sk);
1140 memset(&haddr, 0, sizeof(haddr));
1141 len = min_t(unsigned int, sizeof(haddr), addr_len);
1142 memcpy(&haddr, addr, len);
1144 if (haddr.hci_family != AF_BLUETOOTH)
1149 /* Allow detaching from dead device and attaching to alive device, if
1150 * the caller wants to re-bind (instead of close) this socket in
1151 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1153 hdev = hci_pi(sk)->hdev;
1154 if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1155 hci_pi(sk)->hdev = NULL;
1156 sk->sk_state = BT_OPEN;
1161 if (sk->sk_state == BT_BOUND) {
1166 switch (haddr.hci_channel) {
1167 case HCI_CHANNEL_RAW:
1168 if (hci_pi(sk)->hdev) {
1173 if (haddr.hci_dev != HCI_DEV_NONE) {
1174 hdev = hci_dev_get(haddr.hci_dev);
1180 atomic_inc(&hdev->promisc);
1183 hci_pi(sk)->channel = haddr.hci_channel;
1185 if (!hci_sock_gen_cookie(sk)) {
1186 /* In the case when a cookie has already been assigned,
1187 * then there has been already an ioctl issued against
1188 * an unbound socket and with that triggered an open
1189 * notification. Send a close notification first to
1190 * allow the state transition to bounded.
1192 skb = create_monitor_ctrl_close(sk);
1194 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1195 HCI_SOCK_TRUSTED, NULL);
1200 if (capable(CAP_NET_ADMIN))
1201 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1203 hci_pi(sk)->hdev = hdev;
1205 /* Send event to monitor */
1206 skb = create_monitor_ctrl_open(sk);
1208 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1209 HCI_SOCK_TRUSTED, NULL);
1214 case HCI_CHANNEL_USER:
1215 if (hci_pi(sk)->hdev) {
1220 if (haddr.hci_dev == HCI_DEV_NONE) {
1225 if (!capable(CAP_NET_ADMIN)) {
1230 hdev = hci_dev_get(haddr.hci_dev);
1236 if (test_bit(HCI_INIT, &hdev->flags) ||
1237 hci_dev_test_flag(hdev, HCI_SETUP) ||
1238 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1239 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1240 test_bit(HCI_UP, &hdev->flags))) {
1246 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1252 mgmt_index_removed(hdev);
1254 err = hci_dev_open(hdev->id);
1256 if (err == -EALREADY) {
1257 /* In case the transport is already up and
1258 * running, clear the error here.
1260 * This can happen when opening a user
1261 * channel and HCI_AUTO_OFF grace period
1266 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1267 mgmt_index_added(hdev);
1273 hci_pi(sk)->channel = haddr.hci_channel;
1275 if (!hci_sock_gen_cookie(sk)) {
1276 /* In the case when a cookie has already been assigned,
1277 * this socket will transition from a raw socket into
1278 * a user channel socket. For a clean transition, send
1279 * the close notification first.
1281 skb = create_monitor_ctrl_close(sk);
1283 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1284 HCI_SOCK_TRUSTED, NULL);
1289 /* The user channel is restricted to CAP_NET_ADMIN
1290 * capabilities and with that implicitly trusted.
1292 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1294 hci_pi(sk)->hdev = hdev;
1296 /* Send event to monitor */
1297 skb = create_monitor_ctrl_open(sk);
1299 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1300 HCI_SOCK_TRUSTED, NULL);
1304 atomic_inc(&hdev->promisc);
1307 case HCI_CHANNEL_MONITOR:
1308 if (haddr.hci_dev != HCI_DEV_NONE) {
1313 if (!capable(CAP_NET_RAW)) {
1318 hci_pi(sk)->channel = haddr.hci_channel;
1320 /* The monitor interface is restricted to CAP_NET_RAW
1321 * capabilities and with that implicitly trusted.
1323 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1325 send_monitor_note(sk, "Linux version %s (%s)",
1326 init_utsname()->release,
1327 init_utsname()->machine);
1328 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1329 BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1330 send_monitor_replay(sk);
1331 send_monitor_control_replay(sk);
1333 atomic_inc(&monitor_promisc);
1336 case HCI_CHANNEL_LOGGING:
1337 if (haddr.hci_dev != HCI_DEV_NONE) {
1342 if (!capable(CAP_NET_ADMIN)) {
1347 hci_pi(sk)->channel = haddr.hci_channel;
1351 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1356 if (haddr.hci_dev != HCI_DEV_NONE) {
1361 /* Users with CAP_NET_ADMIN capabilities are allowed
1362 * access to all management commands and events. For
1363 * untrusted users the interface is restricted and
1364 * also only untrusted events are sent.
1366 if (capable(CAP_NET_ADMIN))
1367 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1369 hci_pi(sk)->channel = haddr.hci_channel;
1371 /* At the moment the index and unconfigured index events
1372 * are enabled unconditionally. Setting them on each
1373 * socket when binding keeps this functionality. They
1374 * however might be cleared later and then sending of these
1375 * events will be disabled, but that is then intentional.
1377 * This also enables generic events that are safe to be
1378 * received by untrusted users. Example for such events
1379 * are changes to settings, class of device, name etc.
1381 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1382 if (!hci_sock_gen_cookie(sk)) {
1383 /* In the case when a cookie has already been
1384 * assigned, this socket will transition from
1385 * a raw socket into a control socket. To
1386 * allow for a clean transition, send the
1387 * close notification first.
1389 skb = create_monitor_ctrl_close(sk);
1391 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1392 HCI_SOCK_TRUSTED, NULL);
1397 /* Send event to monitor */
1398 skb = create_monitor_ctrl_open(sk);
1400 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1401 HCI_SOCK_TRUSTED, NULL);
1405 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1406 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1407 hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1408 hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1409 hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1410 hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1415 /* Default MTU to HCI_MAX_FRAME_SIZE if not set */
1416 if (!hci_pi(sk)->mtu)
1417 hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE;
1419 sk->sk_state = BT_BOUND;
1426 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1429 struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1430 struct sock *sk = sock->sk;
1431 struct hci_dev *hdev;
1434 BT_DBG("sock %p sk %p", sock, sk);
1441 hdev = hci_hdev_from_sock(sk);
1443 err = PTR_ERR(hdev);
1447 haddr->hci_family = AF_BLUETOOTH;
1448 haddr->hci_dev = hdev->id;
1449 haddr->hci_channel= hci_pi(sk)->channel;
1450 err = sizeof(*haddr);
1457 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1458 struct sk_buff *skb)
1460 __u8 mask = hci_pi(sk)->cmsg_mask;
1462 if (mask & HCI_CMSG_DIR) {
1463 int incoming = bt_cb(skb)->incoming;
1464 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1468 if (mask & HCI_CMSG_TSTAMP) {
1469 #ifdef CONFIG_COMPAT
1470 struct old_timeval32 ctv;
1472 struct __kernel_old_timeval tv;
1476 skb_get_timestamp(skb, &tv);
1480 #ifdef CONFIG_COMPAT
1481 if (!COMPAT_USE_64BIT_TIME &&
1482 (msg->msg_flags & MSG_CMSG_COMPAT)) {
1483 ctv.tv_sec = tv.tv_sec;
1484 ctv.tv_usec = tv.tv_usec;
1490 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1494 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1495 size_t len, int flags)
1497 struct sock *sk = sock->sk;
1498 struct sk_buff *skb;
1500 unsigned int skblen;
1502 BT_DBG("sock %p, sk %p", sock, sk);
1504 if (flags & MSG_OOB)
1507 if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1510 if (sk->sk_state == BT_CLOSED)
1513 skb = skb_recv_datagram(sk, flags, &err);
1520 msg->msg_flags |= MSG_TRUNC;
1524 skb_reset_transport_header(skb);
1525 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1527 switch (hci_pi(sk)->channel) {
1528 case HCI_CHANNEL_RAW:
1529 hci_sock_cmsg(sk, msg, skb);
1531 case HCI_CHANNEL_USER:
1532 case HCI_CHANNEL_MONITOR:
1533 sock_recv_timestamp(msg, sk, skb);
1536 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1537 sock_recv_timestamp(msg, sk, skb);
1541 skb_free_datagram(sk, skb);
1543 if (flags & MSG_TRUNC)
1546 return err ? : copied;
1549 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1550 struct sk_buff *skb)
1553 struct mgmt_hdr *hdr;
1554 u16 opcode, index, len;
1555 struct hci_dev *hdev = NULL;
1556 const struct hci_mgmt_handler *handler;
1557 bool var_len, no_hdev;
1560 BT_DBG("got %d bytes", skb->len);
1562 if (skb->len < sizeof(*hdr))
1565 hdr = (void *)skb->data;
1566 opcode = __le16_to_cpu(hdr->opcode);
1567 index = __le16_to_cpu(hdr->index);
1568 len = __le16_to_cpu(hdr->len);
1570 if (len != skb->len - sizeof(*hdr)) {
1575 if (chan->channel == HCI_CHANNEL_CONTROL) {
1576 struct sk_buff *cmd;
1578 /* Send event to monitor */
1579 cmd = create_monitor_ctrl_command(sk, index, opcode, len,
1580 skb->data + sizeof(*hdr));
1582 hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd,
1583 HCI_SOCK_TRUSTED, NULL);
1588 if (opcode >= chan->handler_count ||
1589 chan->handlers[opcode].func == NULL) {
1590 BT_DBG("Unknown op %u", opcode);
1591 err = mgmt_cmd_status(sk, index, opcode,
1592 MGMT_STATUS_UNKNOWN_COMMAND);
1596 handler = &chan->handlers[opcode];
1598 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1599 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1600 err = mgmt_cmd_status(sk, index, opcode,
1601 MGMT_STATUS_PERMISSION_DENIED);
1605 if (index != MGMT_INDEX_NONE) {
1606 hdev = hci_dev_get(index);
1608 err = mgmt_cmd_status(sk, index, opcode,
1609 MGMT_STATUS_INVALID_INDEX);
1613 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1614 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1615 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1616 err = mgmt_cmd_status(sk, index, opcode,
1617 MGMT_STATUS_INVALID_INDEX);
1621 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1622 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1623 err = mgmt_cmd_status(sk, index, opcode,
1624 MGMT_STATUS_INVALID_INDEX);
1629 if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
1630 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1631 if (no_hdev != !hdev) {
1632 err = mgmt_cmd_status(sk, index, opcode,
1633 MGMT_STATUS_INVALID_INDEX);
1638 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1639 if ((var_len && len < handler->data_len) ||
1640 (!var_len && len != handler->data_len)) {
1641 err = mgmt_cmd_status(sk, index, opcode,
1642 MGMT_STATUS_INVALID_PARAMS);
1646 if (hdev && chan->hdev_init)
1647 chan->hdev_init(sk, hdev);
1649 cp = skb->data + sizeof(*hdr);
1651 err = handler->func(sk, hdev, cp, len);
1664 static int hci_logging_frame(struct sock *sk, struct sk_buff *skb,
1667 struct hci_mon_hdr *hdr;
1668 struct hci_dev *hdev;
1672 /* The logging frame consists at minimum of the standard header,
1673 * the priority byte, the ident length byte and at least one string
1674 * terminator NUL byte. Anything shorter are invalid packets.
1676 if (skb->len < sizeof(*hdr) + 3)
1679 hdr = (void *)skb->data;
1681 if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr))
1684 if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1685 __u8 priority = skb->data[sizeof(*hdr)];
1686 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1688 /* Only the priorities 0-7 are valid and with that any other
1689 * value results in an invalid packet.
1691 * The priority byte is followed by an ident length byte and
1692 * the NUL terminated ident string. Check that the ident
1693 * length is not overflowing the packet and also that the
1694 * ident string itself is NUL terminated. In case the ident
1695 * length is zero, the length value actually doubles as NUL
1696 * terminator identifier.
1698 * The message follows the ident string (if present) and
1699 * must be NUL terminated. Otherwise it is not a valid packet.
1701 if (priority > 7 || skb->data[skb->len - 1] != 0x00 ||
1702 ident_len > skb->len - sizeof(*hdr) - 3 ||
1703 skb->data[sizeof(*hdr) + ident_len + 1] != 0x00)
1709 index = __le16_to_cpu(hdr->index);
1711 if (index != MGMT_INDEX_NONE) {
1712 hdev = hci_dev_get(index);
1719 hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1721 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1730 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1733 struct sock *sk = sock->sk;
1734 struct hci_mgmt_chan *chan;
1735 struct hci_dev *hdev;
1736 struct sk_buff *skb;
1738 const unsigned int flags = msg->msg_flags;
1740 BT_DBG("sock %p sk %p", sock, sk);
1742 if (flags & MSG_OOB)
1745 if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
1748 if (len < 4 || len > hci_pi(sk)->mtu)
1751 skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0);
1753 return PTR_ERR(skb);
1757 switch (hci_pi(sk)->channel) {
1758 case HCI_CHANNEL_RAW:
1759 case HCI_CHANNEL_USER:
1761 case HCI_CHANNEL_MONITOR:
1764 case HCI_CHANNEL_LOGGING:
1765 err = hci_logging_frame(sk, skb, flags);
1768 mutex_lock(&mgmt_chan_list_lock);
1769 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1771 err = hci_mgmt_cmd(chan, sk, skb);
1775 mutex_unlock(&mgmt_chan_list_lock);
1779 hdev = hci_hdev_from_sock(sk);
1781 err = PTR_ERR(hdev);
1785 if (!test_bit(HCI_UP, &hdev->flags)) {
1790 hci_skb_pkt_type(skb) = skb->data[0];
1793 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1794 /* No permission check is needed for user channel
1795 * since that gets enforced when binding the socket.
1797 * However check that the packet type is valid.
1799 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1800 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1801 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1802 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1807 skb_queue_tail(&hdev->raw_q, skb);
1808 queue_work(hdev->workqueue, &hdev->tx_work);
1809 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1810 u16 opcode = get_unaligned_le16(skb->data);
1811 u16 ogf = hci_opcode_ogf(opcode);
1812 u16 ocf = hci_opcode_ocf(opcode);
1814 if (((ogf > HCI_SFLT_MAX_OGF) ||
1815 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1816 &hci_sec_filter.ocf_mask[ogf])) &&
1817 !capable(CAP_NET_RAW)) {
1822 /* Since the opcode has already been extracted here, store
1823 * a copy of the value for later use by the drivers.
1825 hci_skb_opcode(skb) = opcode;
1828 skb_queue_tail(&hdev->raw_q, skb);
1829 queue_work(hdev->workqueue, &hdev->tx_work);
1831 /* Stand-alone HCI commands must be flagged as
1832 * single-command requests.
1834 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1836 skb_queue_tail(&hdev->cmd_q, skb);
1837 queue_work(hdev->workqueue, &hdev->cmd_work);
1840 if (!capable(CAP_NET_RAW)) {
1845 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1846 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1847 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1852 skb_queue_tail(&hdev->raw_q, skb);
1853 queue_work(hdev->workqueue, &hdev->tx_work);
1867 static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
1868 sockptr_t optval, unsigned int len)
1870 struct hci_ufilter uf = { .opcode = 0 };
1871 struct sock *sk = sock->sk;
1872 int err = 0, opt = 0;
1874 BT_DBG("sk %p, opt %d", sk, optname);
1878 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1885 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1891 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1893 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1896 case HCI_TIME_STAMP:
1897 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1903 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1905 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1910 struct hci_filter *f = &hci_pi(sk)->filter;
1912 uf.type_mask = f->type_mask;
1913 uf.opcode = f->opcode;
1914 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1915 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1918 len = min_t(unsigned int, len, sizeof(uf));
1919 if (copy_from_sockptr(&uf, optval, len)) {
1924 if (!capable(CAP_NET_RAW)) {
1925 uf.type_mask &= hci_sec_filter.type_mask;
1926 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1927 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1931 struct hci_filter *f = &hci_pi(sk)->filter;
1933 f->type_mask = uf.type_mask;
1934 f->opcode = uf.opcode;
1935 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1936 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1950 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1951 sockptr_t optval, unsigned int len)
1953 struct sock *sk = sock->sk;
1957 BT_DBG("sk %p, opt %d", sk, optname);
1959 if (level == SOL_HCI)
1960 return hci_sock_setsockopt_old(sock, level, optname, optval,
1963 if (level != SOL_BLUETOOTH)
1964 return -ENOPROTOOPT;
1971 switch (hci_pi(sk)->channel) {
1972 /* Don't allow changing MTU for channels that are meant for HCI
1975 case HCI_CHANNEL_RAW:
1976 case HCI_CHANNEL_USER:
1981 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1986 hci_pi(sk)->mtu = opt;
1999 static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname,
2000 char __user *optval, int __user *optlen)
2002 struct hci_ufilter uf;
2003 struct sock *sk = sock->sk;
2004 int len, opt, err = 0;
2006 BT_DBG("sk %p, opt %d", sk, optname);
2008 if (get_user(len, optlen))
2013 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
2020 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
2025 if (put_user(opt, optval))
2029 case HCI_TIME_STAMP:
2030 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
2035 if (put_user(opt, optval))
2041 struct hci_filter *f = &hci_pi(sk)->filter;
2043 memset(&uf, 0, sizeof(uf));
2044 uf.type_mask = f->type_mask;
2045 uf.opcode = f->opcode;
2046 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2047 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2050 len = min_t(unsigned int, len, sizeof(uf));
2051 if (copy_to_user(optval, &uf, len))
2065 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
2066 char __user *optval, int __user *optlen)
2068 struct sock *sk = sock->sk;
2071 BT_DBG("sk %p, opt %d", sk, optname);
2073 if (level == SOL_HCI)
2074 return hci_sock_getsockopt_old(sock, level, optname, optval,
2077 if (level != SOL_BLUETOOTH)
2078 return -ENOPROTOOPT;
2085 if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval))
2098 static void hci_sock_destruct(struct sock *sk)
2101 skb_queue_purge(&sk->sk_receive_queue);
2102 skb_queue_purge(&sk->sk_write_queue);
2105 static const struct proto_ops hci_sock_ops = {
2106 .family = PF_BLUETOOTH,
2107 .owner = THIS_MODULE,
2108 .release = hci_sock_release,
2109 .bind = hci_sock_bind,
2110 .getname = hci_sock_getname,
2111 .sendmsg = hci_sock_sendmsg,
2112 .recvmsg = hci_sock_recvmsg,
2113 .ioctl = hci_sock_ioctl,
2114 #ifdef CONFIG_COMPAT
2115 .compat_ioctl = hci_sock_compat_ioctl,
2117 .poll = datagram_poll,
2118 .listen = sock_no_listen,
2119 .shutdown = sock_no_shutdown,
2120 .setsockopt = hci_sock_setsockopt,
2121 .getsockopt = hci_sock_getsockopt,
2122 .connect = sock_no_connect,
2123 .socketpair = sock_no_socketpair,
2124 .accept = sock_no_accept,
2125 .mmap = sock_no_mmap
2128 static struct proto hci_sk_proto = {
2130 .owner = THIS_MODULE,
2131 .obj_size = sizeof(struct hci_pinfo)
2134 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2139 BT_DBG("sock %p", sock);
2141 if (sock->type != SOCK_RAW)
2142 return -ESOCKTNOSUPPORT;
2144 sock->ops = &hci_sock_ops;
2146 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2150 sock_init_data(sock, sk);
2152 sock_reset_flag(sk, SOCK_ZAPPED);
2154 sk->sk_protocol = protocol;
2156 sock->state = SS_UNCONNECTED;
2157 sk->sk_state = BT_OPEN;
2158 sk->sk_destruct = hci_sock_destruct;
2160 bt_sock_link(&hci_sk_list, sk);
2164 static const struct net_proto_family hci_sock_family_ops = {
2165 .family = PF_BLUETOOTH,
2166 .owner = THIS_MODULE,
2167 .create = hci_sock_create,
2170 int __init hci_sock_init(void)
2174 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2176 err = proto_register(&hci_sk_proto, 0);
2180 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2182 BT_ERR("HCI socket registration failed");
2186 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2188 BT_ERR("Failed to create HCI proc file");
2189 bt_sock_unregister(BTPROTO_HCI);
2193 BT_INFO("HCI socket layer initialized");
2198 proto_unregister(&hci_sk_proto);
2202 void hci_sock_cleanup(void)
2204 bt_procfs_cleanup(&init_net, "hci");
2205 bt_sock_unregister(BTPROTO_HCI);
2206 proto_unregister(&hci_sk_proto);