2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
26 #include <linux/compat.h>
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
37 #include <net/bluetooth/mgmt_tizen.h>
40 #include "mgmt_util.h"
42 static LIST_HEAD(mgmt_chan_list);
43 static DEFINE_MUTEX(mgmt_chan_list_lock);
45 static DEFINE_IDA(sock_cookie_ida);
47 static atomic_t monitor_promisc = ATOMIC_INIT(0);
49 /* ----- HCI socket interface ----- */
52 #define hci_pi(sk) ((struct hci_pinfo *) sk)
57 struct hci_filter filter;
59 unsigned short channel;
62 char comm[TASK_COMM_LEN];
66 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
68 struct hci_dev *hdev = hci_pi(sk)->hdev;
71 return ERR_PTR(-EBADFD);
72 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
73 return ERR_PTR(-EPIPE);
77 void hci_sock_set_flag(struct sock *sk, int nr)
79 set_bit(nr, &hci_pi(sk)->flags);
82 void hci_sock_clear_flag(struct sock *sk, int nr)
84 clear_bit(nr, &hci_pi(sk)->flags);
87 int hci_sock_test_flag(struct sock *sk, int nr)
89 return test_bit(nr, &hci_pi(sk)->flags);
92 unsigned short hci_sock_get_channel(struct sock *sk)
94 return hci_pi(sk)->channel;
97 u32 hci_sock_get_cookie(struct sock *sk)
99 return hci_pi(sk)->cookie;
102 static bool hci_sock_gen_cookie(struct sock *sk)
104 int id = hci_pi(sk)->cookie;
107 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
111 hci_pi(sk)->cookie = id;
112 get_task_comm(hci_pi(sk)->comm, current);
119 static void hci_sock_free_cookie(struct sock *sk)
121 int id = hci_pi(sk)->cookie;
124 hci_pi(sk)->cookie = 0xffffffff;
125 ida_simple_remove(&sock_cookie_ida, id);
129 static inline int hci_test_bit(int nr, const void *addr)
131 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
134 /* Security filter */
135 #define HCI_SFLT_MAX_OGF 5
137 struct hci_sec_filter {
140 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
143 static const struct hci_sec_filter hci_sec_filter = {
147 { 0x1000d9fe, 0x0000b00c },
152 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
153 /* OGF_LINK_POLICY */
154 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
156 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
158 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
159 /* OGF_STATUS_PARAM */
160 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
164 static struct bt_sock_list hci_sk_list = {
165 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
168 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
170 struct hci_filter *flt;
171 int flt_type, flt_event;
174 flt = &hci_pi(sk)->filter;
176 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
178 if (!test_bit(flt_type, &flt->type_mask))
181 /* Extra filter for event packets only */
182 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
185 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
187 if (!hci_test_bit(flt_event, &flt->event_mask))
190 /* Check filter only when opcode is set */
194 if (flt_event == HCI_EV_CMD_COMPLETE &&
195 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
198 if (flt_event == HCI_EV_CMD_STATUS &&
199 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
205 /* Send frame to RAW socket */
206 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
209 struct sk_buff *skb_copy = NULL;
211 BT_DBG("hdev %p len %d", hdev, skb->len);
213 read_lock(&hci_sk_list.lock);
215 sk_for_each(sk, &hci_sk_list.head) {
216 struct sk_buff *nskb;
218 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
221 /* Don't send frame to the socket it came from */
225 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
226 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
227 hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
228 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
229 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
230 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
232 if (is_filtered_packet(sk, skb))
234 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
235 if (!bt_cb(skb)->incoming)
237 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
238 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
239 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
240 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
243 /* Don't send frame to other channel types */
248 /* Create a private copy with headroom */
249 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
253 /* Put type byte before the data */
254 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
257 nskb = skb_clone(skb_copy, GFP_ATOMIC);
261 if (sock_queue_rcv_skb(sk, nskb))
265 read_unlock(&hci_sk_list.lock);
270 static void hci_sock_copy_creds(struct sock *sk, struct sk_buff *skb)
272 struct scm_creds *creds;
274 if (!sk || WARN_ON(!skb))
277 creds = &bt_cb(skb)->creds;
279 /* Check if peer credentials is set */
280 if (!sk->sk_peer_pid) {
281 /* Check if parent peer credentials is set */
282 if (bt_sk(sk)->parent && bt_sk(sk)->parent->sk_peer_pid)
283 sk = bt_sk(sk)->parent;
288 /* Check if scm_creds already set */
289 if (creds->pid == pid_vnr(sk->sk_peer_pid))
292 memset(creds, 0, sizeof(*creds));
294 creds->pid = pid_vnr(sk->sk_peer_pid);
295 if (sk->sk_peer_cred) {
296 creds->uid = sk->sk_peer_cred->uid;
297 creds->gid = sk->sk_peer_cred->gid;
301 static struct sk_buff *hci_skb_clone(struct sk_buff *skb)
303 struct sk_buff *nskb;
308 nskb = skb_clone(skb, GFP_ATOMIC);
312 hci_sock_copy_creds(skb->sk, nskb);
317 /* Send frame to sockets with specific channel */
318 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
319 int flag, struct sock *skip_sk)
323 BT_DBG("channel %u len %d", channel, skb->len);
325 sk_for_each(sk, &hci_sk_list.head) {
326 struct sk_buff *nskb;
328 /* Ignore socket without the flag set */
329 if (!hci_sock_test_flag(sk, flag))
332 /* Skip the original socket */
336 if (sk->sk_state != BT_BOUND)
339 if (hci_pi(sk)->channel != channel)
342 nskb = hci_skb_clone(skb);
346 if (sock_queue_rcv_skb(sk, nskb))
352 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
353 int flag, struct sock *skip_sk)
355 read_lock(&hci_sk_list.lock);
356 __hci_send_to_channel(channel, skb, flag, skip_sk);
357 read_unlock(&hci_sk_list.lock);
360 /* Send frame to monitor socket */
361 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
363 struct sk_buff *skb_copy = NULL;
364 struct hci_mon_hdr *hdr;
367 if (!atomic_read(&monitor_promisc))
370 BT_DBG("hdev %p len %d", hdev, skb->len);
372 switch (hci_skb_pkt_type(skb)) {
373 case HCI_COMMAND_PKT:
374 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
377 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
379 case HCI_ACLDATA_PKT:
380 if (bt_cb(skb)->incoming)
381 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
383 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
385 case HCI_SCODATA_PKT:
386 if (bt_cb(skb)->incoming)
387 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
389 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
391 case HCI_ISODATA_PKT:
392 if (bt_cb(skb)->incoming)
393 opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
395 opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
398 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
404 /* Create a private copy with headroom */
405 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
409 hci_sock_copy_creds(skb->sk, skb_copy);
411 /* Put header before the data */
412 hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
413 hdr->opcode = opcode;
414 hdr->index = cpu_to_le16(hdev->id);
415 hdr->len = cpu_to_le16(skb->len);
417 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
418 HCI_SOCK_TRUSTED, NULL);
422 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
423 void *data, u16 data_len, ktime_t tstamp,
424 int flag, struct sock *skip_sk)
430 index = cpu_to_le16(hdev->id);
432 index = cpu_to_le16(MGMT_INDEX_NONE);
434 read_lock(&hci_sk_list.lock);
436 sk_for_each(sk, &hci_sk_list.head) {
437 struct hci_mon_hdr *hdr;
440 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
443 /* Ignore socket without the flag set */
444 if (!hci_sock_test_flag(sk, flag))
447 /* Skip the original socket */
451 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
455 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
456 put_unaligned_le16(event, skb_put(skb, 2));
459 skb_put_data(skb, data, data_len);
461 skb->tstamp = tstamp;
463 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
464 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
466 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
468 __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
469 HCI_SOCK_TRUSTED, NULL);
473 read_unlock(&hci_sk_list.lock);
476 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
478 struct hci_mon_hdr *hdr;
479 struct hci_mon_new_index *ni;
480 struct hci_mon_index_info *ii;
486 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
490 ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
491 ni->type = hdev->dev_type;
493 bacpy(&ni->bdaddr, &hdev->bdaddr);
494 memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
495 strnlen(hdev->name, sizeof(ni->name)), '\0');
497 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
501 skb = bt_skb_alloc(0, GFP_ATOMIC);
505 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
509 if (hdev->manufacturer == 0xffff)
514 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
518 ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
519 bacpy(&ii->bdaddr, &hdev->bdaddr);
520 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
522 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
526 skb = bt_skb_alloc(0, GFP_ATOMIC);
530 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
534 skb = bt_skb_alloc(0, GFP_ATOMIC);
538 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
545 __net_timestamp(skb);
547 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
548 hdr->opcode = opcode;
549 hdr->index = cpu_to_le16(hdev->id);
550 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
555 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
557 struct hci_mon_hdr *hdr;
563 /* No message needed when cookie is not present */
564 if (!hci_pi(sk)->cookie)
567 switch (hci_pi(sk)->channel) {
568 case HCI_CHANNEL_RAW:
570 ver[0] = BT_SUBSYS_VERSION;
571 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
573 case HCI_CHANNEL_USER:
575 ver[0] = BT_SUBSYS_VERSION;
576 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
578 case HCI_CHANNEL_CONTROL:
580 mgmt_fill_version_info(ver);
583 /* No message for unsupported format */
587 skb = bt_skb_alloc(14 + TASK_COMM_LEN, GFP_ATOMIC);
591 hci_sock_copy_creds(sk, skb);
593 flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
595 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
596 put_unaligned_le16(format, skb_put(skb, 2));
597 skb_put_data(skb, ver, sizeof(ver));
598 put_unaligned_le32(flags, skb_put(skb, 4));
599 skb_put_u8(skb, TASK_COMM_LEN);
600 skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
602 __net_timestamp(skb);
604 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
605 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
606 if (hci_pi(sk)->hdev)
607 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
609 hdr->index = cpu_to_le16(HCI_DEV_NONE);
610 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
615 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
617 struct hci_mon_hdr *hdr;
620 /* No message needed when cookie is not present */
621 if (!hci_pi(sk)->cookie)
624 switch (hci_pi(sk)->channel) {
625 case HCI_CHANNEL_RAW:
626 case HCI_CHANNEL_USER:
627 case HCI_CHANNEL_CONTROL:
630 /* No message for unsupported format */
634 skb = bt_skb_alloc(4, GFP_ATOMIC);
638 hci_sock_copy_creds(sk, skb);
640 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
642 __net_timestamp(skb);
644 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
645 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
646 if (hci_pi(sk)->hdev)
647 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
649 hdr->index = cpu_to_le16(HCI_DEV_NONE);
650 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
655 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
659 struct hci_mon_hdr *hdr;
662 skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
666 hci_sock_copy_creds(sk, skb);
668 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
669 put_unaligned_le16(opcode, skb_put(skb, 2));
672 skb_put_data(skb, buf, len);
674 __net_timestamp(skb);
676 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
677 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
678 hdr->index = cpu_to_le16(index);
679 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
684 static void __printf(2, 3)
685 send_monitor_note(struct sock *sk, const char *fmt, ...)
688 struct hci_mon_hdr *hdr;
693 len = vsnprintf(NULL, 0, fmt, args);
696 skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
700 hci_sock_copy_creds(sk, skb);
703 vsprintf(skb_put(skb, len), fmt, args);
704 *(u8 *)skb_put(skb, 1) = 0;
707 __net_timestamp(skb);
709 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
710 hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
711 hdr->index = cpu_to_le16(HCI_DEV_NONE);
712 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
714 if (sock_queue_rcv_skb(sk, skb))
718 static void send_monitor_replay(struct sock *sk)
720 struct hci_dev *hdev;
722 read_lock(&hci_dev_list_lock);
724 list_for_each_entry(hdev, &hci_dev_list, list) {
727 skb = create_monitor_event(hdev, HCI_DEV_REG);
731 if (sock_queue_rcv_skb(sk, skb))
734 if (!test_bit(HCI_RUNNING, &hdev->flags))
737 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
741 if (sock_queue_rcv_skb(sk, skb))
744 if (test_bit(HCI_UP, &hdev->flags))
745 skb = create_monitor_event(hdev, HCI_DEV_UP);
746 else if (hci_dev_test_flag(hdev, HCI_SETUP))
747 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
752 if (sock_queue_rcv_skb(sk, skb))
757 read_unlock(&hci_dev_list_lock);
760 static void send_monitor_control_replay(struct sock *mon_sk)
764 read_lock(&hci_sk_list.lock);
766 sk_for_each(sk, &hci_sk_list.head) {
769 skb = create_monitor_ctrl_open(sk);
773 if (sock_queue_rcv_skb(mon_sk, skb))
777 read_unlock(&hci_sk_list.lock);
780 /* Generate internal stack event */
781 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
783 struct hci_event_hdr *hdr;
784 struct hci_ev_stack_internal *ev;
787 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
791 hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
792 hdr->evt = HCI_EV_STACK_INTERNAL;
793 hdr->plen = sizeof(*ev) + dlen;
795 ev = skb_put(skb, sizeof(*ev) + dlen);
797 memcpy(ev->data, data, dlen);
799 bt_cb(skb)->incoming = 1;
800 __net_timestamp(skb);
802 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
803 hci_send_to_sock(hdev, skb);
807 void hci_sock_dev_event(struct hci_dev *hdev, int event)
809 BT_DBG("hdev %s event %d", hdev->name, event);
811 if (atomic_read(&monitor_promisc)) {
814 /* Send event to monitor */
815 skb = create_monitor_event(hdev, event);
817 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
818 HCI_SOCK_TRUSTED, NULL);
823 if (event <= HCI_DEV_DOWN) {
824 struct hci_ev_si_device ev;
826 /* Send event to sockets */
828 ev.dev_id = hdev->id;
829 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
832 if (event == HCI_DEV_UNREG) {
835 /* Wake up sockets using this dead device */
836 read_lock(&hci_sk_list.lock);
837 sk_for_each(sk, &hci_sk_list.head) {
838 if (hci_pi(sk)->hdev == hdev) {
840 sk->sk_state_change(sk);
843 read_unlock(&hci_sk_list.lock);
847 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
849 struct hci_mgmt_chan *c;
851 list_for_each_entry(c, &mgmt_chan_list, list) {
852 if (c->channel == channel)
859 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
861 struct hci_mgmt_chan *c;
863 mutex_lock(&mgmt_chan_list_lock);
864 c = __hci_mgmt_chan_find(channel);
865 mutex_unlock(&mgmt_chan_list_lock);
870 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
872 if (c->channel < HCI_CHANNEL_CONTROL)
875 mutex_lock(&mgmt_chan_list_lock);
876 if (__hci_mgmt_chan_find(c->channel)) {
877 mutex_unlock(&mgmt_chan_list_lock);
881 list_add_tail(&c->list, &mgmt_chan_list);
883 mutex_unlock(&mgmt_chan_list_lock);
887 EXPORT_SYMBOL(hci_mgmt_chan_register);
889 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
891 mutex_lock(&mgmt_chan_list_lock);
893 mutex_unlock(&mgmt_chan_list_lock);
895 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
897 static int hci_sock_release(struct socket *sock)
899 struct sock *sk = sock->sk;
900 struct hci_dev *hdev;
903 BT_DBG("sock %p sk %p", sock, sk);
910 switch (hci_pi(sk)->channel) {
911 case HCI_CHANNEL_MONITOR:
912 atomic_dec(&monitor_promisc);
914 case HCI_CHANNEL_RAW:
915 case HCI_CHANNEL_USER:
916 case HCI_CHANNEL_CONTROL:
917 /* Send event to monitor */
918 skb = create_monitor_ctrl_close(sk);
920 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
921 HCI_SOCK_TRUSTED, NULL);
925 hci_sock_free_cookie(sk);
929 bt_sock_unlink(&hci_sk_list, sk);
931 hdev = hci_pi(sk)->hdev;
933 if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
934 !hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
935 /* When releasing a user channel exclusive access,
936 * call hci_dev_do_close directly instead of calling
937 * hci_dev_close to ensure the exclusive access will
938 * be released and the controller brought back down.
940 * The checking of HCI_AUTO_OFF is not needed in this
941 * case since it will have been cleared already when
942 * opening the user channel.
944 * Make sure to also check that we haven't already
945 * unregistered since all the cleanup will have already
946 * been complete and hdev will get released when we put
949 hci_dev_do_close(hdev);
950 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
951 mgmt_index_added(hdev);
954 atomic_dec(&hdev->promisc);
964 static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
969 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
974 err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
976 hci_dev_unlock(hdev);
981 static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
986 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
991 err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
993 hci_dev_unlock(hdev);
998 /* Ioctls that require bound socket */
999 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
1002 struct hci_dev *hdev = hci_hdev_from_sock(sk);
1005 return PTR_ERR(hdev);
1007 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1010 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1013 if (hdev->dev_type != HCI_PRIMARY)
1018 if (!capable(CAP_NET_ADMIN))
1022 case HCIGETCONNINFO:
1023 return hci_get_conn_info(hdev, (void __user *)arg);
1025 case HCIGETAUTHINFO:
1026 return hci_get_auth_info(hdev, (void __user *)arg);
1029 if (!capable(CAP_NET_ADMIN))
1031 return hci_sock_reject_list_add(hdev, (void __user *)arg);
1033 case HCIUNBLOCKADDR:
1034 if (!capable(CAP_NET_ADMIN))
1036 return hci_sock_reject_list_del(hdev, (void __user *)arg);
1039 return -ENOIOCTLCMD;
1042 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
1045 void __user *argp = (void __user *)arg;
1046 struct sock *sk = sock->sk;
1049 BT_DBG("cmd %x arg %lx", cmd, arg);
1051 /* Make sure the cmd is valid before doing anything */
1055 case HCIGETCONNLIST:
1065 case HCISETLINKMODE:
1070 case HCIGETCONNINFO:
1071 case HCIGETAUTHINFO:
1073 case HCIUNBLOCKADDR:
1076 return -ENOIOCTLCMD;
1081 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1086 /* When calling an ioctl on an unbound raw socket, then ensure
1087 * that the monitor gets informed. Ensure that the resulting event
1088 * is only send once by checking if the cookie exists or not. The
1089 * socket cookie will be only ever generated once for the lifetime
1090 * of a given socket.
1092 if (hci_sock_gen_cookie(sk)) {
1093 struct sk_buff *skb;
1095 /* Perform careful checks before setting the HCI_SOCK_TRUSTED
1096 * flag. Make sure that not only the current task but also
1097 * the socket opener has the required capability, since
1098 * privileged programs can be tricked into making ioctl calls
1099 * on HCI sockets, and the socket should not be marked as
1100 * trusted simply because the ioctl caller is privileged.
1102 if (sk_capable(sk, CAP_NET_ADMIN))
1103 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1105 /* Send event to monitor */
1106 skb = create_monitor_ctrl_open(sk);
1108 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1109 HCI_SOCK_TRUSTED, NULL);
1118 return hci_get_dev_list(argp);
1121 return hci_get_dev_info(argp);
1123 case HCIGETCONNLIST:
1124 return hci_get_conn_list(argp);
1127 if (!capable(CAP_NET_ADMIN))
1129 return hci_dev_open(arg);
1132 if (!capable(CAP_NET_ADMIN))
1134 return hci_dev_close(arg);
1137 if (!capable(CAP_NET_ADMIN))
1139 return hci_dev_reset(arg);
1142 if (!capable(CAP_NET_ADMIN))
1144 return hci_dev_reset_stat(arg);
1151 case HCISETLINKMODE:
1154 if (!capable(CAP_NET_ADMIN))
1156 return hci_dev_cmd(cmd, argp);
1159 return hci_inquiry(argp);
1164 err = hci_sock_bound_ioctl(sk, cmd, arg);
1171 #ifdef CONFIG_COMPAT
1172 static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1180 return hci_sock_ioctl(sock, cmd, arg);
1183 return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1187 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1190 struct sockaddr_hci haddr;
1191 struct sock *sk = sock->sk;
1192 struct hci_dev *hdev = NULL;
1193 struct sk_buff *skb;
1196 BT_DBG("sock %p sk %p", sock, sk);
1201 memset(&haddr, 0, sizeof(haddr));
1202 len = min_t(unsigned int, sizeof(haddr), addr_len);
1203 memcpy(&haddr, addr, len);
1205 if (haddr.hci_family != AF_BLUETOOTH)
1210 /* Allow detaching from dead device and attaching to alive device, if
1211 * the caller wants to re-bind (instead of close) this socket in
1212 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1214 hdev = hci_pi(sk)->hdev;
1215 if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1216 hci_pi(sk)->hdev = NULL;
1217 sk->sk_state = BT_OPEN;
1222 if (sk->sk_state == BT_BOUND) {
1227 switch (haddr.hci_channel) {
1228 case HCI_CHANNEL_RAW:
1229 if (hci_pi(sk)->hdev) {
1234 if (haddr.hci_dev != HCI_DEV_NONE) {
1235 hdev = hci_dev_get(haddr.hci_dev);
1241 atomic_inc(&hdev->promisc);
1244 hci_pi(sk)->channel = haddr.hci_channel;
1246 if (!hci_sock_gen_cookie(sk)) {
1247 /* In the case when a cookie has already been assigned,
1248 * then there has been already an ioctl issued against
1249 * an unbound socket and with that triggered an open
1250 * notification. Send a close notification first to
1251 * allow the state transition to bounded.
1253 skb = create_monitor_ctrl_close(sk);
1255 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1256 HCI_SOCK_TRUSTED, NULL);
1261 if (capable(CAP_NET_ADMIN))
1262 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1264 hci_pi(sk)->hdev = hdev;
1266 /* Send event to monitor */
1267 skb = create_monitor_ctrl_open(sk);
1269 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1270 HCI_SOCK_TRUSTED, NULL);
1275 case HCI_CHANNEL_USER:
1276 if (hci_pi(sk)->hdev) {
1281 if (haddr.hci_dev == HCI_DEV_NONE) {
1286 if (!capable(CAP_NET_ADMIN)) {
1291 hdev = hci_dev_get(haddr.hci_dev);
1297 if (test_bit(HCI_INIT, &hdev->flags) ||
1298 hci_dev_test_flag(hdev, HCI_SETUP) ||
1299 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1300 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1301 test_bit(HCI_UP, &hdev->flags))) {
1307 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1313 mgmt_index_removed(hdev);
1315 err = hci_dev_open(hdev->id);
1317 if (err == -EALREADY) {
1318 /* In case the transport is already up and
1319 * running, clear the error here.
1321 * This can happen when opening a user
1322 * channel and HCI_AUTO_OFF grace period
1327 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1328 mgmt_index_added(hdev);
1334 hci_pi(sk)->channel = haddr.hci_channel;
1336 if (!hci_sock_gen_cookie(sk)) {
1337 /* In the case when a cookie has already been assigned,
1338 * this socket will transition from a raw socket into
1339 * a user channel socket. For a clean transition, send
1340 * the close notification first.
1342 skb = create_monitor_ctrl_close(sk);
1344 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1345 HCI_SOCK_TRUSTED, NULL);
1350 /* The user channel is restricted to CAP_NET_ADMIN
1351 * capabilities and with that implicitly trusted.
1353 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1355 hci_pi(sk)->hdev = hdev;
1357 /* Send event to monitor */
1358 skb = create_monitor_ctrl_open(sk);
1360 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1361 HCI_SOCK_TRUSTED, NULL);
1365 atomic_inc(&hdev->promisc);
1368 case HCI_CHANNEL_MONITOR:
1369 if (haddr.hci_dev != HCI_DEV_NONE) {
1374 if (!capable(CAP_NET_RAW)) {
1379 hci_pi(sk)->channel = haddr.hci_channel;
1381 /* The monitor interface is restricted to CAP_NET_RAW
1382 * capabilities and with that implicitly trusted.
1384 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1386 send_monitor_note(sk, "Linux version %s (%s)",
1387 init_utsname()->release,
1388 init_utsname()->machine);
1389 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1390 BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1391 send_monitor_replay(sk);
1392 send_monitor_control_replay(sk);
1394 atomic_inc(&monitor_promisc);
1397 case HCI_CHANNEL_LOGGING:
1398 if (haddr.hci_dev != HCI_DEV_NONE) {
1403 if (!capable(CAP_NET_ADMIN)) {
1408 hci_pi(sk)->channel = haddr.hci_channel;
1412 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1417 if (haddr.hci_dev != HCI_DEV_NONE) {
1422 /* Users with CAP_NET_ADMIN capabilities are allowed
1423 * access to all management commands and events. For
1424 * untrusted users the interface is restricted and
1425 * also only untrusted events are sent.
1427 if (capable(CAP_NET_ADMIN))
1428 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1430 hci_pi(sk)->channel = haddr.hci_channel;
1432 /* At the moment the index and unconfigured index events
1433 * are enabled unconditionally. Setting them on each
1434 * socket when binding keeps this functionality. They
1435 * however might be cleared later and then sending of these
1436 * events will be disabled, but that is then intentional.
1438 * This also enables generic events that are safe to be
1439 * received by untrusted users. Example for such events
1440 * are changes to settings, class of device, name etc.
1442 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1443 if (!hci_sock_gen_cookie(sk)) {
1444 /* In the case when a cookie has already been
1445 * assigned, this socket will transition from
1446 * a raw socket into a control socket. To
1447 * allow for a clean transition, send the
1448 * close notification first.
1450 skb = create_monitor_ctrl_close(sk);
1452 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1453 HCI_SOCK_TRUSTED, NULL);
1458 /* Send event to monitor */
1459 skb = create_monitor_ctrl_open(sk);
1461 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1462 HCI_SOCK_TRUSTED, NULL);
1466 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1467 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1468 hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1469 hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1470 hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1471 hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1476 /* Default MTU to HCI_MAX_FRAME_SIZE if not set */
1477 if (!hci_pi(sk)->mtu)
1478 hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE;
1480 sk->sk_state = BT_BOUND;
1487 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1490 struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1491 struct sock *sk = sock->sk;
1492 struct hci_dev *hdev;
1495 BT_DBG("sock %p sk %p", sock, sk);
1502 hdev = hci_hdev_from_sock(sk);
1504 err = PTR_ERR(hdev);
1508 haddr->hci_family = AF_BLUETOOTH;
1509 haddr->hci_dev = hdev->id;
1510 haddr->hci_channel= hci_pi(sk)->channel;
1511 err = sizeof(*haddr);
1518 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1519 struct sk_buff *skb)
1521 __u8 mask = hci_pi(sk)->cmsg_mask;
1523 if (mask & HCI_CMSG_DIR) {
1524 int incoming = bt_cb(skb)->incoming;
1525 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1529 if (mask & HCI_CMSG_TSTAMP) {
1530 #ifdef CONFIG_COMPAT
1531 struct old_timeval32 ctv;
1533 struct __kernel_old_timeval tv;
1537 skb_get_timestamp(skb, &tv);
1541 #ifdef CONFIG_COMPAT
1542 if (!COMPAT_USE_64BIT_TIME &&
1543 (msg->msg_flags & MSG_CMSG_COMPAT)) {
1544 ctv.tv_sec = tv.tv_sec;
1545 ctv.tv_usec = tv.tv_usec;
1551 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1555 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1556 size_t len, int flags)
1558 struct scm_cookie scm;
1559 struct sock *sk = sock->sk;
1560 struct sk_buff *skb;
1562 unsigned int skblen;
1564 BT_DBG("sock %p, sk %p", sock, sk);
1566 if (flags & MSG_OOB)
1569 if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1572 if (sk->sk_state == BT_CLOSED)
1575 skb = skb_recv_datagram(sk, flags, &err);
1582 msg->msg_flags |= MSG_TRUNC;
1586 skb_reset_transport_header(skb);
1587 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1589 switch (hci_pi(sk)->channel) {
1590 case HCI_CHANNEL_RAW:
1591 hci_sock_cmsg(sk, msg, skb);
1593 case HCI_CHANNEL_USER:
1594 case HCI_CHANNEL_MONITOR:
1595 sock_recv_timestamp(msg, sk, skb);
1598 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1599 sock_recv_timestamp(msg, sk, skb);
1603 memset(&scm, 0, sizeof(scm));
1604 scm.creds = bt_cb(skb)->creds;
1606 skb_free_datagram(sk, skb);
1608 if (flags & MSG_TRUNC)
1611 scm_recv(sock, msg, &scm, flags);
1613 return err ? : copied;
1616 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1617 struct sk_buff *skb)
1620 struct mgmt_hdr *hdr;
1621 u16 opcode, index, len;
1622 struct hci_dev *hdev = NULL;
1623 const struct hci_mgmt_handler *handler;
1624 bool var_len, no_hdev;
1627 BT_DBG("got %d bytes", skb->len);
1629 if (skb->len < sizeof(*hdr))
1632 hdr = (void *)skb->data;
1633 opcode = __le16_to_cpu(hdr->opcode);
1634 index = __le16_to_cpu(hdr->index);
1635 len = __le16_to_cpu(hdr->len);
1637 if (len != skb->len - sizeof(*hdr)) {
1643 if (opcode >= TIZEN_OP_CODE_BASE) {
1644 u16 tizen_opcode_index = opcode - TIZEN_OP_CODE_BASE;
1645 if (tizen_opcode_index >= chan->tizen_handler_count ||
1646 chan->tizen_handlers[tizen_opcode_index].func == NULL) {
1647 BT_DBG("Unknown op %u", opcode);
1648 err = mgmt_cmd_status(sk, index, opcode,
1649 MGMT_STATUS_UNKNOWN_COMMAND);
1653 handler = &chan->tizen_handlers[tizen_opcode_index];
1658 if (chan->channel == HCI_CHANNEL_CONTROL) {
1659 struct sk_buff *cmd;
1661 /* Send event to monitor */
1662 cmd = create_monitor_ctrl_command(sk, index, opcode, len,
1663 skb->data + sizeof(*hdr));
1665 hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd,
1666 HCI_SOCK_TRUSTED, NULL);
1671 if (opcode >= chan->handler_count ||
1672 chan->handlers[opcode].func == NULL) {
1673 BT_DBG("Unknown op %u", opcode);
1674 err = mgmt_cmd_status(sk, index, opcode,
1675 MGMT_STATUS_UNKNOWN_COMMAND);
1679 handler = &chan->handlers[opcode];
1684 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1685 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1686 err = mgmt_cmd_status(sk, index, opcode,
1687 MGMT_STATUS_PERMISSION_DENIED);
1691 if (index != MGMT_INDEX_NONE) {
1692 hdev = hci_dev_get(index);
1694 err = mgmt_cmd_status(sk, index, opcode,
1695 MGMT_STATUS_INVALID_INDEX);
1699 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1700 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1701 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1702 err = mgmt_cmd_status(sk, index, opcode,
1703 MGMT_STATUS_INVALID_INDEX);
1707 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1708 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1709 err = mgmt_cmd_status(sk, index, opcode,
1710 MGMT_STATUS_INVALID_INDEX);
1715 if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
1716 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1717 if (no_hdev != !hdev) {
1718 err = mgmt_cmd_status(sk, index, opcode,
1719 MGMT_STATUS_INVALID_INDEX);
1724 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1725 if ((var_len && len < handler->data_len) ||
1726 (!var_len && len != handler->data_len)) {
1727 err = mgmt_cmd_status(sk, index, opcode,
1728 MGMT_STATUS_INVALID_PARAMS);
1732 if (hdev && chan->hdev_init)
1733 chan->hdev_init(sk, hdev);
1735 cp = skb->data + sizeof(*hdr);
1737 err = handler->func(sk, hdev, cp, len);
1750 static int hci_logging_frame(struct sock *sk, struct sk_buff *skb,
1753 struct hci_mon_hdr *hdr;
1754 struct hci_dev *hdev;
1758 /* The logging frame consists at minimum of the standard header,
1759 * the priority byte, the ident length byte and at least one string
1760 * terminator NUL byte. Anything shorter are invalid packets.
1762 if (skb->len < sizeof(*hdr) + 3)
1765 hdr = (void *)skb->data;
1767 if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr))
1770 if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1771 __u8 priority = skb->data[sizeof(*hdr)];
1772 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1774 /* Only the priorities 0-7 are valid and with that any other
1775 * value results in an invalid packet.
1777 * The priority byte is followed by an ident length byte and
1778 * the NUL terminated ident string. Check that the ident
1779 * length is not overflowing the packet and also that the
1780 * ident string itself is NUL terminated. In case the ident
1781 * length is zero, the length value actually doubles as NUL
1782 * terminator identifier.
1784 * The message follows the ident string (if present) and
1785 * must be NUL terminated. Otherwise it is not a valid packet.
1787 if (priority > 7 || skb->data[skb->len - 1] != 0x00 ||
1788 ident_len > skb->len - sizeof(*hdr) - 3 ||
1789 skb->data[sizeof(*hdr) + ident_len + 1] != 0x00)
1795 index = __le16_to_cpu(hdr->index);
1797 if (index != MGMT_INDEX_NONE) {
1798 hdev = hci_dev_get(index);
1805 hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1807 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1816 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1819 struct sock *sk = sock->sk;
1820 struct hci_mgmt_chan *chan;
1821 struct hci_dev *hdev;
1822 struct sk_buff *skb;
1824 const unsigned int flags = msg->msg_flags;
1826 BT_DBG("sock %p sk %p", sock, sk);
1828 if (flags & MSG_OOB)
1831 if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
1834 if (len < 4 || len > hci_pi(sk)->mtu)
1837 skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0);
1839 return PTR_ERR(skb);
1843 switch (hci_pi(sk)->channel) {
1844 case HCI_CHANNEL_RAW:
1845 case HCI_CHANNEL_USER:
1847 case HCI_CHANNEL_MONITOR:
1850 case HCI_CHANNEL_LOGGING:
1851 err = hci_logging_frame(sk, skb, flags);
1854 mutex_lock(&mgmt_chan_list_lock);
1855 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1857 err = hci_mgmt_cmd(chan, sk, skb);
1861 mutex_unlock(&mgmt_chan_list_lock);
1865 hdev = hci_hdev_from_sock(sk);
1867 err = PTR_ERR(hdev);
1871 if (!test_bit(HCI_UP, &hdev->flags)) {
1876 hci_skb_pkt_type(skb) = skb->data[0];
1879 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1880 /* No permission check is needed for user channel
1881 * since that gets enforced when binding the socket.
1883 * However check that the packet type is valid.
1885 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1886 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1887 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1888 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1893 skb_queue_tail(&hdev->raw_q, skb);
1894 queue_work(hdev->workqueue, &hdev->tx_work);
1895 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1896 u16 opcode = get_unaligned_le16(skb->data);
1897 u16 ogf = hci_opcode_ogf(opcode);
1898 u16 ocf = hci_opcode_ocf(opcode);
1900 if (((ogf > HCI_SFLT_MAX_OGF) ||
1901 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1902 &hci_sec_filter.ocf_mask[ogf])) &&
1903 !capable(CAP_NET_RAW)) {
1908 /* Since the opcode has already been extracted here, store
1909 * a copy of the value for later use by the drivers.
1911 hci_skb_opcode(skb) = opcode;
1914 skb_queue_tail(&hdev->raw_q, skb);
1915 queue_work(hdev->workqueue, &hdev->tx_work);
1917 /* Stand-alone HCI commands must be flagged as
1918 * single-command requests.
1920 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1922 skb_queue_tail(&hdev->cmd_q, skb);
1923 queue_work(hdev->workqueue, &hdev->cmd_work);
1926 if (!capable(CAP_NET_RAW)) {
1931 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1932 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1933 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1938 skb_queue_tail(&hdev->raw_q, skb);
1939 queue_work(hdev->workqueue, &hdev->tx_work);
1953 static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
1954 sockptr_t optval, unsigned int len)
1956 struct hci_ufilter uf = { .opcode = 0 };
1957 struct sock *sk = sock->sk;
1958 int err = 0, opt = 0;
1960 BT_DBG("sk %p, opt %d", sk, optname);
1964 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1971 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1977 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1979 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1982 case HCI_TIME_STAMP:
1983 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1989 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1991 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1996 struct hci_filter *f = &hci_pi(sk)->filter;
1998 uf.type_mask = f->type_mask;
1999 uf.opcode = f->opcode;
2000 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2001 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2004 len = min_t(unsigned int, len, sizeof(uf));
2005 if (copy_from_sockptr(&uf, optval, len)) {
2010 if (!capable(CAP_NET_RAW)) {
2011 uf.type_mask &= hci_sec_filter.type_mask;
2012 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
2013 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
2017 struct hci_filter *f = &hci_pi(sk)->filter;
2019 f->type_mask = uf.type_mask;
2020 f->opcode = uf.opcode;
2021 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
2022 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
2036 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
2037 sockptr_t optval, unsigned int len)
2039 struct sock *sk = sock->sk;
2043 BT_DBG("sk %p, opt %d", sk, optname);
2045 if (level == SOL_HCI)
2046 return hci_sock_setsockopt_old(sock, level, optname, optval,
2049 if (level != SOL_BLUETOOTH)
2050 return -ENOPROTOOPT;
2057 switch (hci_pi(sk)->channel) {
2058 /* Don't allow changing MTU for channels that are meant for HCI
2061 case HCI_CHANNEL_RAW:
2062 case HCI_CHANNEL_USER:
2067 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
2072 hci_pi(sk)->mtu = opt;
2085 static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname,
2086 char __user *optval, int __user *optlen)
2088 struct hci_ufilter uf;
2089 struct sock *sk = sock->sk;
2090 int len, opt, err = 0;
2092 BT_DBG("sk %p, opt %d", sk, optname);
2094 if (get_user(len, optlen))
2099 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
2106 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
2111 if (put_user(opt, optval))
2115 case HCI_TIME_STAMP:
2116 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
2121 if (put_user(opt, optval))
2127 struct hci_filter *f = &hci_pi(sk)->filter;
2129 memset(&uf, 0, sizeof(uf));
2130 uf.type_mask = f->type_mask;
2131 uf.opcode = f->opcode;
2132 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2133 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2136 len = min_t(unsigned int, len, sizeof(uf));
2137 if (copy_to_user(optval, &uf, len))
2151 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
2152 char __user *optval, int __user *optlen)
2154 struct sock *sk = sock->sk;
2157 BT_DBG("sk %p, opt %d", sk, optname);
2159 if (level == SOL_HCI)
2160 return hci_sock_getsockopt_old(sock, level, optname, optval,
2163 if (level != SOL_BLUETOOTH)
2164 return -ENOPROTOOPT;
2171 if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval))
2184 static void hci_sock_destruct(struct sock *sk)
2187 skb_queue_purge(&sk->sk_receive_queue);
2188 skb_queue_purge(&sk->sk_write_queue);
2191 static const struct proto_ops hci_sock_ops = {
2192 .family = PF_BLUETOOTH,
2193 .owner = THIS_MODULE,
2194 .release = hci_sock_release,
2195 .bind = hci_sock_bind,
2196 .getname = hci_sock_getname,
2197 .sendmsg = hci_sock_sendmsg,
2198 .recvmsg = hci_sock_recvmsg,
2199 .ioctl = hci_sock_ioctl,
2200 #ifdef CONFIG_COMPAT
2201 .compat_ioctl = hci_sock_compat_ioctl,
2203 .poll = datagram_poll,
2204 .listen = sock_no_listen,
2205 .shutdown = sock_no_shutdown,
2206 .setsockopt = hci_sock_setsockopt,
2207 .getsockopt = hci_sock_getsockopt,
2208 .connect = sock_no_connect,
2209 .socketpair = sock_no_socketpair,
2210 .accept = sock_no_accept,
2211 .mmap = sock_no_mmap
2214 static struct proto hci_sk_proto = {
2216 .owner = THIS_MODULE,
2217 .obj_size = sizeof(struct hci_pinfo)
2220 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2225 BT_DBG("sock %p", sock);
2227 if (sock->type != SOCK_RAW)
2228 return -ESOCKTNOSUPPORT;
2230 sock->ops = &hci_sock_ops;
2232 sk = bt_sock_alloc(net, sock, &hci_sk_proto, protocol, GFP_ATOMIC,
2237 sock->state = SS_UNCONNECTED;
2238 sk->sk_destruct = hci_sock_destruct;
2240 bt_sock_link(&hci_sk_list, sk);
2244 static const struct net_proto_family hci_sock_family_ops = {
2245 .family = PF_BLUETOOTH,
2246 .owner = THIS_MODULE,
2247 .create = hci_sock_create,
2250 int __init hci_sock_init(void)
2254 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2256 err = proto_register(&hci_sk_proto, 0);
2260 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2262 BT_ERR("HCI socket registration failed");
2266 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2268 BT_ERR("Failed to create HCI proc file");
2269 bt_sock_unregister(BTPROTO_HCI);
2273 BT_INFO("HCI socket layer initialized");
2278 proto_unregister(&hci_sk_proto);
2282 void hci_sock_cleanup(void)
2284 bt_procfs_cleanup(&init_net, "hci");
2285 bt_sock_unregister(BTPROTO_HCI);
2286 proto_unregister(&hci_sk_proto);