struct hci_rp_write_link_policy *rp = data;
struct hci_conn *conn;
void *sent;
+#ifdef TIZEN_BT
+ struct hci_cp_write_link_policy cp;
+ struct hci_conn *sco_conn;
+#endif
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (conn)
conn->link_policy = get_unaligned_le16(sent + 2);
+#ifdef TIZEN_BT
+ sco_conn = hci_conn_hash_lookup_sco(hdev);
+ if (sco_conn && bacmp(&sco_conn->dst, &conn->dst) == 0 &&
+ conn->link_policy & HCI_LP_SNIFF) {
+ BT_ERR("SNIFF is not allowed during sco connection");
+ cp.handle = __cpu_to_le16(conn->handle);
+ cp.policy = __cpu_to_le16(conn->link_policy & ~HCI_LP_SNIFF);
+ hci_send_cmd(hdev, HCI_OP_WRITE_LINK_POLICY, sizeof(cp), &cp);
+ }
+#endif
+
hci_dev_unlock(hdev);
return rp->status;
{
struct hci_rp_read_class_of_dev *rp = data;
+ if (WARN_ON(!hdev))
+ return HCI_ERROR_UNSPECIFIED;
+
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
} else {
conn->enc_key_size = rp->key_size;
status = 0;
+
+ if (conn->enc_key_size < hdev->min_enc_key_size) {
+ /* As slave role, the conn->state has been set to
+ * BT_CONNECTED and l2cap conn req might not be received
+ * yet, at this moment the l2cap layer almost does
+ * nothing with the non-zero status.
+ * So we also clear encrypt related bits, and then the
+ * handler of l2cap conn req will get the right secure
+ * state at a later time.
+ */
+ status = HCI_ERROR_AUTH_FAILURE;
+ clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
+ clear_bit(HCI_CONN_AES_CCM, &conn->flags);
+ }
}
- hci_encrypt_cfm(conn, 0);
+ hci_encrypt_cfm(conn, status);
done:
hci_dev_unlock(hdev);
if (!rp->status)
conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
- hci_encrypt_cfm(conn, 0);
-
unlock:
hci_dev_unlock(hdev);
d->last_adv_data_len = 0;
}
+#ifndef TIZEN_BT
static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 bdaddr_type, s8 rssi, u32 flags,
u8 *data, u8 len)
memcpy(d->last_adv_data, data, len);
d->last_adv_data_len = len;
}
+#endif
static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
{
* therefore discovery as stopped.
*/
if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
+#ifndef TIZEN_BT /* The below line is kernel bug. */
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+#else
+ hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
+#endif
else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
hdev->discovery.state == DISCOVERY_FINDING)
queue_work(hdev->workqueue, &hdev->reenable_adv_work);
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
+#ifdef TIZEN_BT
+ hci_dev_lock(hdev);
+#else
if (rp->status)
return rp->status;
+#endif
hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
+#ifdef TIZEN_BT
+ mgmt_le_read_host_suggested_data_length_complete(hdev, rp->status);
+
+ hci_dev_unlock(hdev);
+#endif
+
return rp->status;
}
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
+#ifndef TIZEN_BT
return rp->status;
+#else
+ goto unblock;
+#endif
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
if (!sent)
+#ifndef TIZEN_BT
return rp->status;
+#else
+ goto unblock;
+#endif
hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
return rp->status;
+#ifdef TIZEN_BT
+unblock:
+ mgmt_le_write_host_suggested_data_length_complete(hdev, rp->status);
+ return rp->status;
+#endif
}
static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
+#ifndef TIZEN_BT
if (rp->status)
return rp->status;
+#else
+ hci_dev_lock(hdev);
+#endif
hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
+#ifdef TIZEN_BT
+ mgmt_le_read_maximum_data_length_complete(hdev, rp->status);
+ hci_dev_unlock(hdev);
+#endif
+
return rp->status;
}
return rp->status;
}
+#ifdef TIZEN_BT
+static u8 hci_cc_enable_rssi(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
+{
+ struct hci_cc_rsp_enable_rssi *rp = data;
+
+ BT_DBG("hci_cc_enable_rssi - %s status 0x%2.2x Event_LE_ext_Opcode 0x%2.2x",
+ hdev->name, rp->status, rp->le_ext_opcode);
+
+ mgmt_enable_rssi_cc(hdev, rp, rp->status);
+
+ return rp->status;
+}
+
+static u8 hci_cc_get_raw_rssi(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
+{
+ struct hci_cc_rp_get_raw_rssi *rp = data;
+
+ BT_DBG("hci_cc_get_raw_rssi- %s Get Raw Rssi Response[%2.2x %4.4x %2.2X]",
+ hdev->name, rp->status, rp->conn_handle, rp->rssi_dbm);
+
+ mgmt_raw_rssi_response(hdev, rp, rp->status);
+
+ return rp->status;
+}
+
+static void hci_vendor_ext_rssi_link_alert_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_vendor_specific_rssi_alert *ev = (void *)skb->data;
+
+ BT_DBG("RSSI event LE_RSSI_LINK_ALERT %X", LE_RSSI_LINK_ALERT);
+
+ mgmt_rssi_alert_evt(hdev, ev->conn_handle, ev->alert_type,
+ ev->rssi_dbm);
+}
+
+static void hci_vendor_specific_group_ext_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_ext_vendor_specific *ev = (void *)skb->data;
+ __u8 event_le_ext_sub_code;
+
+ BT_DBG("RSSI event LE_META_VENDOR_SPECIFIC_GROUP_EVENT: %X",
+ LE_META_VENDOR_SPECIFIC_GROUP_EVENT);
+
+ skb_pull(skb, sizeof(*ev));
+ event_le_ext_sub_code = ev->event_le_ext_sub_code;
+
+ switch (event_le_ext_sub_code) {
+ case LE_RSSI_LINK_ALERT:
+ hci_vendor_ext_rssi_link_alert_evt(hdev, skb);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void hci_vendor_multi_adv_state_change_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_vendor_specific_multi_adv_state *ev = (void *)skb->data;
+
+ BT_DBG("LE_MULTI_ADV_STATE_CHANGE_SUB_EVENT");
+
+ mgmt_multi_adv_state_change_evt(hdev, ev->adv_instance,
+ ev->state_change_reason,
+ ev->connection_handle);
+}
+
+static void hci_vendor_specific_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
+{
+ struct hci_ev_vendor_specific *ev = (void *)skb->data;
+ __u8 event_sub_code;
+
+ BT_DBG("hci_vendor_specific_evt");
+
+ skb_pull(skb, sizeof(*ev));
+ event_sub_code = ev->event_sub_code;
+
+ switch (event_sub_code) {
+ case LE_META_VENDOR_SPECIFIC_GROUP_EVENT:
+ hci_vendor_specific_group_ext_evt(hdev, skb);
+ break;
+
+ case LE_MULTI_ADV_STATE_CHANGE_SUB_EVENT:
+ hci_vendor_multi_adv_state_change_evt(hdev, skb);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void hci_le_data_length_changed_complete_evt(struct hci_dev *hdev,
+ void *data,
+ struct sk_buff *skb)
+{
+ struct hci_ev_le_data_len_change *ev = (void *)skb->data;
+ struct hci_conn *conn;
+
+ BT_DBG("%s status", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
+ if (conn) {
+ conn->tx_len = le16_to_cpu(ev->tx_len);
+ conn->tx_time = le16_to_cpu(ev->tx_time);
+ conn->rx_len = le16_to_cpu(ev->rx_len);
+ conn->rx_time = le16_to_cpu(ev->rx_time);
+
+ mgmt_le_data_length_change_complete(hdev, &conn->dst,
+ conn->tx_len, conn->tx_time,
+ conn->rx_len, conn->rx_time);
+ }
+
+ hci_dev_unlock(hdev);
+}
+#endif
+
static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
return;
}
- set_bit(HCI_INQUIRY, &hdev->flags);
+ if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
+ set_bit(HCI_INQUIRY, &hdev->flags);
}
static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
}
} else {
if (!conn) {
- conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
- HCI_ROLE_MASTER);
+ conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
+ HCI_ROLE_MASTER);
if (!conn)
bt_dev_err(hdev, "no memory for new connection");
}
struct discovery_state *discov = &hdev->discovery;
struct inquiry_entry *e;
+#ifdef TIZEN_BT
/* Update the mgmt connected state if necessary. Be careful with
* conn objects that exist but are not (yet) connected however.
* Only those in BT_CONFIG or BT_CONNECTED states can be
* considered connected.
*/
if (conn &&
+ (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) {
+ if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ mgmt_device_connected(hdev, conn, name, name_len);
+ else
+ mgmt_device_name_update(hdev, bdaddr, name, name_len);
+ }
+#else
+ if (conn &&
(conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
mgmt_device_connected(hdev, conn, name, name_len);
+#endif
if (discov->state == DISCOVERY_STOPPED)
return;
goto unlock;
if (status) {
+ u8 type = conn->type;
mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
conn->dst_type, status);
- if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
- hdev->cur_adv_instance = conn->adv_instance;
+ if (type == LE_LINK)
hci_enable_advertising(hdev);
- }
/* Inform sockets conn is gone before we delete it */
hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
&ev->bdaddr,
BDADDR_BREDR)) {
- conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
- HCI_ROLE_SLAVE);
+ conn = hci_conn_add_unset(hdev, ev->link_type,
+ &ev->bdaddr, HCI_ROLE_SLAVE);
if (!conn) {
bt_dev_err(hdev, "no memory for new conn");
goto unlock;
hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
&cp);
}
+
+#ifdef TIZEN_BT
+ if (get_link_mode(conn) & HCI_LM_MASTER)
+ hci_conn_change_supervision_timeout(conn,
+ LINK_SUPERVISION_TIMEOUT);
+#endif
}
if (conn->type == ACL_LINK)
if (ie)
memcpy(ie->data.dev_class, ev->dev_class, 3);
+#ifdef TIZEN_BT
+ if ((ev->link_type == SCO_LINK || ev->link_type == ESCO_LINK) &&
+ hci_conn_hash_lookup_sco(hdev)) {
+ struct hci_cp_reject_conn_req cp;
+
+ bacpy(&cp.bdaddr, &ev->bdaddr);
+ cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
+ hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ,
+ sizeof(cp), &cp);
+ hci_dev_unlock(hdev);
+ return;
+ }
+#endif
+
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
&ev->bdaddr);
if (!conn) {
- conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
- HCI_ROLE_SLAVE);
+ conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
+ HCI_ROLE_SLAVE);
if (!conn) {
bt_dev_err(hdev, "no memory for new connection");
goto unlock;
struct hci_conn_params *params;
struct hci_conn *conn;
bool mgmt_connected;
+ u8 type;
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
break;
}
}
+ type = conn->type;
hci_disconn_cfm(conn, ev->reason);
+ hci_conn_del(conn);
/* Re-enable advertising if necessary, since it might
* have been disabled by the connection. From the
* or until a connection is created or until the Advertising
* is timed out due to Directed Advertising."
*/
- if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
- hdev->cur_adv_instance = conn->adv_instance;
+
+ if (type == LE_LINK)
hci_enable_advertising(hdev);
- }
- hci_conn_del(conn);
+
+#ifdef TIZEN_BT
+ if (conn->type == ACL_LINK && !hci_conn_num(hdev, ACL_LINK)) {
+ int iscan;
+ int pscan;
+
+ iscan = test_bit(HCI_ISCAN, &hdev->flags);
+ pscan = test_bit(HCI_PSCAN, &hdev->flags);
+ if (!iscan && !pscan) {
+ u8 scan_enable = SCAN_PAGE;
+
+ hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE,
+ sizeof(scan_enable), &scan_enable);
+ }
+ }
+#endif
unlock:
hci_dev_unlock(hdev);
if (!conn)
goto unlock;
+#ifdef TIZEN_BT
+ /* PIN or Key Missing patch */
+ BT_DBG("remote_auth %x, remote_cap %x, auth_type %x, io_capability %x",
+ conn->remote_auth, conn->remote_cap,
+ conn->auth_type, conn->io_capability);
+
+ if (ev->status == 0x06 && hci_conn_ssp_enabled(conn)) {
+ struct hci_cp_auth_requested cp;
+
+ BT_DBG("Pin or key missing");
+ hci_remove_link_key(hdev, &conn->dst);
+ cp.handle = cpu_to_le16(conn->handle);
+ hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
+ sizeof(cp), &cp);
+ goto unlock;
+ }
+#endif
+
if (!ev->status) {
clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
-
- if (!hci_conn_ssp_enabled(conn) &&
- test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
- bt_dev_info(hdev, "re-auth of legacy device is not possible.");
- } else {
- set_bit(HCI_CONN_AUTH, &conn->flags);
- conn->sec_level = conn->pending_sec_level;
- }
+ set_bit(HCI_CONN_AUTH, &conn->flags);
+ conn->sec_level = conn->pending_sec_level;
} else {
if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
}
clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
- clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
if (conn->state == BT_CONFIG) {
if (!ev->status && hci_conn_ssp_enabled(conn)) {
cp.handle = cpu_to_le16(conn->handle);
cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
- sizeof(cp), &cp)) {
+ sizeof(cp), &cp))
bt_dev_err(hdev, "write auth payload timeout failed");
- goto notify;
- }
-
- goto unlock;
}
notify:
hci_cc_le_set_per_adv_enable),
HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
sizeof(struct hci_rp_le_read_transmit_power)),
+#ifdef TIZEN_BT
+ HCI_CC(HCI_OP_ENABLE_RSSI, hci_cc_enable_rssi,
+ sizeof(struct hci_cc_rsp_enable_rssi)),
+ HCI_CC(HCI_OP_GET_RAW_RSSI, hci_cc_get_raw_rssi,
+ sizeof(struct hci_cc_rp_get_raw_rssi)),
+#endif
HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
sizeof(struct hci_rp_le_read_buffer_size_v2)),
bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
+#ifdef TIZEN_BT
+ hci_dev_lock(hdev);
+ mgmt_hardware_error(hdev, ev->code);
+ hci_dev_unlock(hdev);
+#endif
hdev->hw_error_code = ev->code;
queue_work(hdev->req_workqueue, &hdev->error_reset);
clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
hci_role_switch_cfm(conn, ev->status, ev->role);
+#ifdef TIZEN_BT
+ if (!ev->status && (get_link_mode(conn) & HCI_LM_MASTER))
+ hci_conn_change_supervision_timeout(conn,
+ LINK_SUPERVISION_TIMEOUT);
+#endif
}
hci_dev_unlock(hdev);
static u8 hci_get_auth_req(struct hci_conn *conn)
{
+#ifdef TIZEN_BT
+ if (conn->remote_auth == HCI_AT_GENERAL_BONDING_MITM) {
+ if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
+ conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
+ return HCI_AT_GENERAL_BONDING_MITM;
+ }
+#endif
+
/* If remote requests no-bonding follow that lead */
if (conn->remote_auth == HCI_AT_NO_BONDING ||
conn->remote_auth == HCI_AT_NO_BONDING_MITM)
if (status)
goto unlock;
- conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
+ conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
if (!conn) {
bt_dev_err(hdev, "no memory for new connection");
goto unlock;
}
}
} else {
+#ifdef TIZEN_BT
+ /* LE auto connect */
+ bacpy(&conn->dst, bdaddr);
+#endif
cancel_delayed_work(&conn->le_conn_timeout);
}
conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
- if (handle > HCI_CONN_HANDLE_MAX) {
- bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle,
- HCI_CONN_HANDLE_MAX);
- status = HCI_ERROR_INVALID_PARAMETERS;
- }
-
/* All connection failure handling is taken care of by the
* hci_conn_failed function which is triggered by the HCI
* request completion callbacks used for connecting.
*/
- if (status)
+ if (status || hci_conn_set_handle(conn, handle))
goto unlock;
/* Drop the connection if it has been aborted */
mgmt_device_connected(hdev, conn, NULL, 0);
conn->sec_level = BT_SECURITY_LOW;
- conn->handle = handle;
conn->state = BT_CONFIG;
- /* Store current advertising instance as connection advertising instance
- * when sotfware rotation is in use so it can be re-enabled when
- * disconnected.
- */
- if (!ext_adv_capable(hdev))
- conn->adv_instance = hdev->cur_adv_instance;
-
conn->le_conn_interval = interval;
conn->le_conn_latency = latency;
conn->le_supv_timeout = supervision_timeout;
{
struct hci_evt_le_ext_adv_set_term *ev = data;
struct hci_conn *conn;
- struct adv_info *adv, *n;
+ struct adv_info *n;
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
hci_dev_lock(hdev);
- adv = hci_find_adv_instance(hdev, ev->handle);
if (ev->status) {
+ struct adv_info *adv;
+ adv = hci_find_adv_instance(hdev, ev->handle);
if (!adv)
goto unlock;
goto unlock;
}
- if (adv)
- adv->enabled = false;
-
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
if (conn) {
- /* Store handle in the connection so the correct advertising
- * instance can be re-enabled when disconnected.
- */
- conn->adv_instance = ev->handle;
+ struct adv_info *adv_instance;
if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
bacmp(&conn->resp_addr, BDADDR_ANY))
goto unlock;
}
- if (adv)
- bacpy(&conn->resp_addr, &adv->random_addr);
+ adv_instance = hci_find_adv_instance(hdev, ev->handle);
+ if (adv_instance)
+ bacpy(&conn->resp_addr, &adv_instance->random_addr);
}
unlock:
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
if (conn) {
+#ifdef TIZEN_BT
+ if (ev->status) {
+ hci_dev_unlock(hdev);
+ mgmt_le_conn_update_failed(hdev, &conn->dst,
+ conn->type, conn->dst_type, ev->status);
+ return;
+ }
+#endif
conn->le_conn_interval = le16_to_cpu(ev->interval);
conn->le_conn_latency = le16_to_cpu(ev->latency);
conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
}
hci_dev_unlock(hdev);
+
+#ifdef TIZEN_BT
+ mgmt_le_conn_updated(hdev, &conn->dst, conn->type,
+ conn->dst_type, conn->le_conn_interval,
+ conn->le_conn_latency, conn->le_supv_timeout);
+#endif
}
/* This function requires the caller holds hdev->lock */
u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
bool ext_adv, bool ctl_time, u64 instant)
{
+#ifndef TIZEN_BT
struct discovery_state *d = &hdev->discovery;
+ bool match;
+#endif
struct smp_irk *irk;
struct hci_conn *conn;
- bool match, bdaddr_resolved;
+ bool bdaddr_resolved;
u32 flags;
u8 *ptr;
if (type == LE_ADV_DIRECT_IND)
return;
+#ifndef TIZEN_BT
+ /* Handle all adv packet in platform */
if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
bdaddr, bdaddr_type) &&
idr_is_empty(&hdev->adv_monitors_idr))
return;
+#endif
+#ifdef TIZEN_BT
+ mgmt_le_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
+ rssi, flags, data, len, NULL, 0, type);
+#else
mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
rssi, flags, data, len, NULL, 0, 0);
+#endif
return;
}
if (type == LE_ADV_SCAN_RSP)
flags = MGMT_DEV_FOUND_SCAN_RSP;
+#ifdef TIZEN_BT
+ /* Disable adv ind and scan rsp merging */
+ mgmt_le_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
+ rssi, flags, data, len, NULL, 0, type);
+#else
/* If there's nothing pending either store the data from this
* event or send an immediate device found event if the data
* should not be stored for later.
d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
d->last_adv_data, d->last_adv_data_len, data, len, 0);
clear_pending_adv_report(hdev);
+#endif
}
static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
struct hci_ev_le_pa_sync_established *ev = data;
int mask = hdev->link_mode;
__u8 flags = 0;
- struct hci_conn *bis;
+ struct hci_conn *pa_sync;
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
if (!(flags & HCI_PROTO_DEFER))
goto unlock;
- /* Add connection to indicate the PA sync event */
- bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
- HCI_ROLE_SLAVE);
+ if (ev->status) {
+ /* Add connection to indicate the failed PA sync event */
+ pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
+ HCI_ROLE_SLAVE);
- if (!bis)
- goto unlock;
+ if (!pa_sync)
+ goto unlock;
- if (ev->status)
- set_bit(HCI_CONN_PA_SYNC_FAILED, &bis->flags);
- else
- set_bit(HCI_CONN_PA_SYNC, &bis->flags);
+ set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
- /* Notify connection to iso layer */
- hci_connect_cfm(bis, ev->status);
+ /* Notify iso layer */
+ hci_connect_cfm(pa_sync, ev->status);
+ }
unlock:
hci_dev_unlock(hdev);
cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
if (!cis) {
- cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE);
+ cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE,
+ cis_handle);
if (!cis) {
hci_le_reject_cis(hdev, ev->cis_handle);
goto unlock;
}
- cis->handle = cis_handle;
}
cis->iso_qos.ucast.cig = ev->cig_id;
hci_dev_lock(hdev);
if (!ev->status) {
- pa_sync = hci_conn_hash_lookup_pa_sync(hdev, ev->handle);
+ pa_sync = hci_conn_hash_lookup_pa_sync_big_handle(hdev, ev->handle);
if (pa_sync)
/* Also mark the BIG sync established event on the
* associated PA sync hcon
bis = hci_conn_hash_lookup_handle(hdev, handle);
if (!bis) {
bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
- HCI_ROLE_SLAVE);
+ HCI_ROLE_SLAVE, handle);
if (!bis)
continue;
- bis->handle = handle;
}
if (ev->status != 0x42)
struct hci_evt_le_big_info_adv_report *ev = data;
int mask = hdev->link_mode;
__u8 flags = 0;
+ struct hci_conn *pa_sync;
bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
hci_dev_lock(hdev);
mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
- if (!(mask & HCI_LM_ACCEPT))
+ if (!(mask & HCI_LM_ACCEPT)) {
hci_le_pa_term_sync(hdev, ev->sync_handle);
+ goto unlock;
+ }
+
+ if (!(flags & HCI_PROTO_DEFER))
+ goto unlock;
+ pa_sync = hci_conn_hash_lookup_pa_sync_handle
+ (hdev,
+ le16_to_cpu(ev->sync_handle));
+
+ if (pa_sync)
+ goto unlock;
+
+ /* Add connection to indicate the PA sync event */
+ pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
+ HCI_ROLE_SLAVE);
+
+ if (!pa_sync)
+ goto unlock;
+
+ pa_sync->sync_handle = le16_to_cpu(ev->sync_handle);
+ set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags);
+
+ /* Notify iso layer */
+ hci_connect_cfm(pa_sync, 0x00);
+
+unlock:
hci_dev_unlock(hdev);
}
HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
hci_le_remote_conn_param_req_evt,
sizeof(struct hci_ev_le_remote_conn_param_req)),
+#ifdef TIZEN_BT
+ /* [0x07 = HCI_EV_LE_DATA_LEN_CHANGE] */
+ HCI_LE_EV(HCI_EV_LE_DATA_LEN_CHANGE,
+ hci_le_data_length_changed_complete_evt,
+ sizeof(struct hci_ev_le_data_len_change)),
+#endif
/* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
hci_le_enh_conn_complete_evt,
/* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
sizeof(struct hci_ev_num_comp_blocks)),
+#ifdef TIZEN_BT
+ /* [0xFF = HCI_EV_VENDOR_SPECIFIC] */
+ HCI_EV(HCI_EV_VENDOR_SPECIFIC, hci_vendor_specific_evt,
+ sizeof(struct hci_ev_vendor_specific)),
+#else
/* [0xff = HCI_EV_VENDOR] */
HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
+#endif
};
static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,