1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * BlueZ - Bluetooth protocol stack for Linux
6 * Copyright (C) 2010 GSyC/LibreSoft, Universidad Rey Juan Carlos.
7 * Copyright (C) 2010 Signove
8 * Copyright (C) 2014 Intel Corporation. All rights reserved.
16 #include <netinet/in.h>
23 #include "lib/bluetooth.h"
24 #include "bluetooth/l2cap.h"
25 #include "btio/btio.h"
27 #include "src/shared/timeout.h"
31 #define MCAP_BTCLOCK_HALF (MCAP_BTCLOCK_FIELD / 2)
32 #define CLK CLOCK_MONOTONIC
34 #define MCAP_CSP_ERROR g_quark_from_static_string("mcap-csp-error-quark")
35 #define MAX_RETRIES 10
36 #define SAMPLE_COUNT 20
38 #define RESPONSE_TIMER 6 /* seconds */
39 #define MAX_CACHED 10 /* 10 devices */
41 #define MCAP_ERROR g_quark_from_static_string("mcap-error-quark")
43 #define RELEASE_TIMER(__mcl) do { \
45 timeout_remove(__mcl->tid); \
51 uint64_t base_tmstamp; /* CSP base timestamp */
52 struct timespec base_time; /* CSP base time when timestamp set */
53 guint local_caps; /* CSP-Cent.: have got remote caps */
54 guint remote_caps; /* CSP-Perip: remote central got caps */
55 guint rem_req_acc; /* CSP-Perip: accuracy req by central */
56 guint ind_expected; /* CSP-Cent.: indication expected */
57 uint8_t csp_req; /* CSP-Cent.: Request control flag */
58 guint ind_timer; /* CSP-Perip: indication timer */
59 guint set_timer; /* CSP-Perip: delayed set timer */
60 void *set_data; /* CSP-Perip: delayed set data */
61 void *csp_priv_data; /* CSP-Cent.: In-flight request data */
64 struct mcap_sync_cap_cbdata {
69 struct mcap_sync_set_cbdata {
75 int ts_acc; /* timestamp accuracy */
76 int ts_res; /* timestamp resolution */
77 int latency; /* Read BT clock latency */
78 int preempt_thresh; /* Preemption threshold for latency */
79 int syncleadtime_ms; /* SyncLeadTime in ms */
82 struct sync_set_data {
84 uint32_t sched_btclock;
91 struct mcap_mcl *mcl; /* MCL for this operation */
92 mcap_mcl_connect_cb connect_cb; /* Connect callback */
93 GDestroyNotify destroy; /* Destroy callback */
94 gpointer user_data; /* Callback user data */
98 mcap_mdl_operation_cb op;
99 mcap_mdl_operation_conf_cb op_conf;
100 mcap_mdl_notify_cb notify;
103 struct mcap_mdl_op_cb {
104 struct mcap_mdl *mdl; /* MDL for this operation */
105 mcap_cb_type cb; /* Operation callback */
106 GDestroyNotify destroy; /* Destroy callback */
107 gpointer user_data; /* Callback user data */
110 /* MCAP finite state machine functions */
111 static void proc_req_connected(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t l);
112 static void proc_req_pending(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t l);
113 static void proc_req_active(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t l);
115 static void (*proc_req[])(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t len) = {
121 static gboolean csp_caps_initialized = FALSE;
122 struct csp_caps _caps;
124 static void mcap_cache_mcl(struct mcap_mcl *mcl);
126 static void default_mdl_connected_cb(struct mcap_mdl *mdl, gpointer data)
128 DBG("MCAP Unmanaged mdl connection");
131 static void default_mdl_closed_cb(struct mcap_mdl *mdl, gpointer data)
133 DBG("MCAP Unmanaged mdl closed");
136 static void default_mdl_deleted_cb(struct mcap_mdl *mdl, gpointer data)
138 DBG("MCAP Unmanaged mdl deleted");
141 static void default_mdl_aborted_cb(struct mcap_mdl *mdl, gpointer data)
143 DBG("MCAP Unmanaged mdl aborted");
146 static uint8_t default_mdl_conn_req_cb(struct mcap_mcl *mcl,
147 uint8_t mdepid, uint16_t mdlid,
148 uint8_t *conf, gpointer data)
150 DBG("MCAP mdl remote connection aborted");
151 /* Due to this callback isn't managed this request won't be supported */
152 return MCAP_REQUEST_NOT_SUPPORTED;
155 static uint8_t default_mdl_reconn_req_cb(struct mcap_mdl *mdl,
158 DBG("MCAP mdl remote reconnection aborted");
159 /* Due to this callback isn't managed this request won't be supported */
160 return MCAP_REQUEST_NOT_SUPPORTED;
163 static void set_default_cb(struct mcap_mcl *mcl)
166 mcl->cb = g_new0(struct mcap_mdl_cb, 1);
168 mcl->cb->mdl_connected = default_mdl_connected_cb;
169 mcl->cb->mdl_closed = default_mdl_closed_cb;
170 mcl->cb->mdl_deleted = default_mdl_deleted_cb;
171 mcl->cb->mdl_aborted = default_mdl_aborted_cb;
172 mcl->cb->mdl_conn_req = default_mdl_conn_req_cb;
173 mcl->cb->mdl_reconn_req = default_mdl_reconn_req_cb;
176 static char *error2str(uint8_t rc)
181 case MCAP_INVALID_OP_CODE:
182 return "Invalid Op Code";
183 case MCAP_INVALID_PARAM_VALUE:
184 return "Invalid Parameter Value";
185 case MCAP_INVALID_MDEP:
186 return "Invalid MDEP";
189 case MCAP_INVALID_MDL:
190 return "Invalid MDL";
193 case MCAP_INVALID_OPERATION:
194 return "Invalid Operation";
195 case MCAP_RESOURCE_UNAVAILABLE:
196 return "Resource Unavailable";
197 case MCAP_UNSPECIFIED_ERROR:
198 return "Unspecified Error";
199 case MCAP_REQUEST_NOT_SUPPORTED:
200 return "Request Not Supported";
201 case MCAP_CONFIGURATION_REJECTED:
202 return "Configuration Rejected";
204 return "Unknown Response Code";
208 static gboolean mcap_send_std_opcode(struct mcap_mcl *mcl, void *cmd,
209 uint32_t size, GError **err)
211 if (mcl->state == MCL_IDLE) {
212 g_set_error(err, MCAP_ERROR, MCAP_ERROR_FAILED,
213 "MCL is not connected");
217 if (mcl->req != MCL_AVAILABLE) {
218 g_set_error(err, MCAP_ERROR, MCAP_ERROR_RESOURCE_UNAVAILABLE,
223 if (!(mcl->ctrl & MCAP_CTRL_STD_OP)) {
224 g_set_error(err, MCAP_ERROR, MCAP_ERROR_REQUEST_NOT_SUPPORTED,
225 "Remote does not support standard opcodes");
229 if (mcl->state == MCL_PENDING) {
230 g_set_error(err, MCAP_ERROR, MCAP_ERROR_INVALID_OPERATION,
231 "Not Std Op. Codes can be sent in PENDING State");
235 if (mcap_send_data(g_io_channel_unix_get_fd(mcl->cc), cmd, size) < 0) {
236 g_set_error(err, MCAP_ERROR, MCAP_ERROR_FAILED,
237 "Command can't be sent, write error");
242 mcl->req = MCL_WAITING_RSP;
247 static void update_mcl_state(struct mcap_mcl *mcl)
250 struct mcap_mdl *mdl;
252 if (mcl->state == MCL_PENDING)
255 for (l = mcl->mdls; l; l = l->next) {
258 if (mdl->state == MDL_CONNECTED) {
259 mcl->state = MCL_ACTIVE;
264 mcl->state = MCL_CONNECTED;
267 static void shutdown_mdl(struct mcap_mdl *mdl)
269 mdl->state = MDL_CLOSED;
272 g_source_remove(mdl->wid);
277 g_io_channel_shutdown(mdl->dc, TRUE, NULL);
278 g_io_channel_unref(mdl->dc);
283 static void free_mdl(struct mcap_mdl *mdl)
288 mcap_mcl_unref(mdl->mcl);
292 static int cmp_mdl_state(gconstpointer a, gconstpointer b)
294 const struct mcap_mdl *mdl = a;
295 const MDLState *st = b;
297 if (mdl->state == *st)
299 else if (mdl->state < *st)
305 static void free_mcap_mdl_op(struct mcap_mdl_op_cb *op)
308 op->destroy(op->user_data);
311 mcap_mdl_unref(op->mdl);
316 static void free_mcl_priv_data(struct mcap_mcl *mcl)
318 free_mcap_mdl_op(mcl->priv_data);
319 mcl->priv_data = NULL;
322 static void mcap_notify_error(struct mcap_mcl *mcl, GError *err)
324 struct mcap_mdl_op_cb *con = mcl->priv_data;
325 struct mcap_mdl *mdl;
329 if (!con || !mcl->lcmd)
332 switch (mcl->lcmd[0]) {
333 case MCAP_MD_CREATE_MDL_REQ:
335 l = g_slist_find_custom(mcl->mdls, &st, cmp_mdl_state);
337 mcl->mdls = g_slist_remove(mcl->mdls, mdl);
339 update_mcl_state(mcl);
340 con->cb.op_conf(NULL, 0, err, con->user_data);
342 case MCAP_MD_ABORT_MDL_REQ:
344 l = g_slist_find_custom(mcl->mdls, &st, cmp_mdl_state);
345 shutdown_mdl(l->data);
346 update_mcl_state(mcl);
347 con->cb.notify(err, con->user_data);
349 case MCAP_MD_DELETE_MDL_REQ:
350 for (l = mcl->mdls; l; l = l->next) {
352 if (mdl->state == MDL_DELETING)
353 mdl->state = (mdl->dc) ? MDL_CONNECTED :
356 update_mcl_state(mcl);
357 con->cb.notify(err, con->user_data);
359 case MCAP_MD_RECONNECT_MDL_REQ:
361 l = g_slist_find_custom(mcl->mdls, &st, cmp_mdl_state);
362 shutdown_mdl(l->data);
363 update_mcl_state(mcl);
364 con->cb.op(NULL, err, con->user_data);
368 free_mcl_priv_data(mcl);
373 int mcap_send_data(int sock, const void *buf, uint32_t size)
375 const uint8_t *buf_b = buf;
378 while (sent < size) {
379 int n = write(sock, buf_b + sent, size - sent);
388 static int mcap_send_cmd(struct mcap_mcl *mcl, uint8_t oc, uint8_t rc,
389 uint16_t mdl, uint8_t *data, size_t len)
397 sock = g_io_channel_unix_get_fd(mcl->cc);
399 cmd = g_malloc(sizeof(mcap_rsp) + len);
402 cmd->mdl = htons(mdl);
405 memcpy(cmd->data, data, len);
407 sent = mcap_send_data(sock, cmd, sizeof(mcap_rsp) + len);
413 static struct mcap_mdl *get_mdl(struct mcap_mcl *mcl, uint16_t mdlid)
416 struct mcap_mdl *mdl;
418 for (l = mcl->mdls; l; l = l->next) {
420 if (mdlid == mdl->mdlid)
427 static uint16_t generate_mdlid(struct mcap_mcl *mcl)
429 uint16_t mdlid = mcl->next_mdl;
430 struct mcap_mdl *mdl;
433 mdl = get_mdl(mcl, mdlid);
435 mcl->next_mdl = (mdlid % MCAP_MDLID_FINAL) + 1;
438 mdlid = (mdlid % MCAP_MDLID_FINAL) + 1;
439 } while (mdlid != mcl->next_mdl);
441 /* No more mdlids availables */
445 static mcap_md_req *create_req(uint8_t op, uint16_t mdl_id)
447 mcap_md_req *req_cmd;
449 req_cmd = g_new0(mcap_md_req, 1);
452 req_cmd->mdl = htons(mdl_id);
457 static mcap_md_create_mdl_req *create_mdl_req(uint16_t mdl_id, uint8_t mdep,
460 mcap_md_create_mdl_req *req_mdl;
462 req_mdl = g_new0(mcap_md_create_mdl_req, 1);
464 req_mdl->op = MCAP_MD_CREATE_MDL_REQ;
465 req_mdl->mdl = htons(mdl_id);
466 req_mdl->mdep = mdep;
467 req_mdl->conf = conf;
472 static int compare_mdl(gconstpointer a, gconstpointer b)
474 const struct mcap_mdl *mdla = a;
475 const struct mcap_mdl *mdlb = b;
477 if (mdla->mdlid == mdlb->mdlid)
479 else if (mdla->mdlid < mdlb->mdlid)
485 static bool wait_response_timer(gpointer data)
487 struct mcap_mcl *mcl = data;
493 g_set_error(&gerr, MCAP_ERROR, MCAP_ERROR_FAILED,
494 "Timeout waiting response");
496 mcap_notify_error(mcl, gerr);
499 mcl->mi->mcl_disconnected_cb(mcl, mcl->mi->user_data);
505 gboolean mcap_create_mdl(struct mcap_mcl *mcl,
508 mcap_mdl_operation_conf_cb connect_cb,
510 GDestroyNotify destroy,
513 struct mcap_mdl *mdl;
514 struct mcap_mdl_op_cb *con;
515 mcap_md_create_mdl_req *cmd;
518 id = generate_mdlid(mcl);
520 g_set_error(err, MCAP_ERROR, MCAP_ERROR_FAILED,
521 "Not more mdlids available");
525 mdl = g_new0(struct mcap_mdl, 1);
526 mdl->mcl = mcap_mcl_ref(mcl);
528 mdl->mdep_id = mdepid;
529 mdl->state = MDL_WAITING;
531 con = g_new0(struct mcap_mdl_op_cb, 1);
532 con->mdl = mcap_mdl_ref(mdl);
533 con->cb.op_conf = connect_cb;
534 con->destroy = destroy;
535 con->user_data = user_data;
537 cmd = create_mdl_req(id, mdepid, conf);
538 if (!mcap_send_std_opcode(mcl, cmd, sizeof(mcap_md_create_mdl_req),
540 mcap_mdl_unref(con->mdl);
546 mcl->state = MCL_ACTIVE;
547 mcl->priv_data = con;
549 mcl->mdls = g_slist_insert_sorted(mcl->mdls, mcap_mdl_ref(mdl),
551 mcl->tid = timeout_add_seconds(RESPONSE_TIMER, wait_response_timer,
556 gboolean mcap_reconnect_mdl(struct mcap_mdl *mdl,
557 mcap_mdl_operation_cb reconnect_cb,
559 GDestroyNotify destroy,
562 struct mcap_mdl_op_cb *con;
563 struct mcap_mcl *mcl = mdl->mcl;
566 if (mdl->state != MDL_CLOSED) {
567 g_set_error(err, MCAP_ERROR, MCAP_ERROR_FAILED,
568 "MDL is not closed");
572 cmd = create_req(MCAP_MD_RECONNECT_MDL_REQ, mdl->mdlid);
573 if (!mcap_send_std_opcode(mcl, cmd, sizeof(mcap_md_req), err)) {
578 mdl->state = MDL_WAITING;
580 con = g_new0(struct mcap_mdl_op_cb, 1);
581 con->mdl = mcap_mdl_ref(mdl);
582 con->cb.op = reconnect_cb;
583 con->destroy = destroy;
584 con->user_data = user_data;
586 mcl->state = MCL_ACTIVE;
587 mcl->priv_data = con;
589 mcl->tid = timeout_add_seconds(RESPONSE_TIMER, wait_response_timer,
594 static gboolean send_delete_req(struct mcap_mcl *mcl,
595 struct mcap_mdl_op_cb *con,
601 cmd = create_req(MCAP_MD_DELETE_MDL_REQ, mdlid);
602 if (!mcap_send_std_opcode(mcl, cmd, sizeof(mcap_md_req), err)) {
607 mcl->priv_data = con;
609 mcl->tid = timeout_add_seconds(RESPONSE_TIMER, wait_response_timer,
614 gboolean mcap_delete_all_mdls(struct mcap_mcl *mcl,
615 mcap_mdl_notify_cb delete_cb,
617 GDestroyNotify destroy,
621 struct mcap_mdl *mdl;
622 struct mcap_mdl_op_cb *con;
624 DBG("MCL in state: %d", mcl->state);
626 g_set_error(err, MCAP_ERROR, MCAP_ERROR_FAILED,
627 "There are not MDLs created");
631 for (l = mcl->mdls; l; l = l->next) {
633 if (mdl->state != MDL_WAITING)
634 mdl->state = MDL_DELETING;
637 con = g_new0(struct mcap_mdl_op_cb, 1);
639 con->cb.notify = delete_cb;
640 con->destroy = destroy;
641 con->user_data = user_data;
644 if (!send_delete_req(mcl, con, MCAP_ALL_MDLIDS, err)) {
652 gboolean mcap_delete_mdl(struct mcap_mdl *mdl, mcap_mdl_notify_cb delete_cb,
654 GDestroyNotify destroy,
657 struct mcap_mcl *mcl= mdl->mcl;
658 struct mcap_mdl_op_cb *con;
661 l = g_slist_find(mcl->mdls, mdl);
664 g_set_error(err, MCAP_ERROR, MCAP_ERROR_INVALID_MDL,
665 "%s" , error2str(MCAP_INVALID_MDEP));
669 if (mdl->state == MDL_WAITING) {
670 g_set_error(err, MCAP_ERROR, MCAP_ERROR_FAILED,
671 "Mdl is not created");
675 mdl->state = MDL_DELETING;
677 con = g_new0(struct mcap_mdl_op_cb, 1);
678 con->mdl = mcap_mdl_ref(mdl);
679 con->cb.notify = delete_cb;
680 con->destroy = destroy;
681 con->user_data = user_data;
683 if (!send_delete_req(mcl, con, mdl->mdlid, err)) {
684 mcap_mdl_unref(con->mdl);
692 gboolean mcap_mdl_abort(struct mcap_mdl *mdl, mcap_mdl_notify_cb abort_cb,
694 GDestroyNotify destroy,
697 struct mcap_mdl_op_cb *con;
698 struct mcap_mcl *mcl = mdl->mcl;
701 if (mdl->state != MDL_WAITING) {
702 g_set_error(err, MCAP_ERROR, MCAP_ERROR_FAILED,
703 "Mdl in invalid state");
707 cmd = create_req(MCAP_MD_ABORT_MDL_REQ, mdl->mdlid);
708 if (!mcap_send_std_opcode(mcl, cmd, sizeof(mcap_md_req), err)) {
713 con = g_new0(struct mcap_mdl_op_cb, 1);
714 con->mdl = mcap_mdl_ref(mdl);
715 con->cb.notify = abort_cb;
716 con->destroy = destroy;
717 con->user_data = user_data;
719 mcl->priv_data = con;
720 mcl->tid = timeout_add_seconds(RESPONSE_TIMER, wait_response_timer,
725 static struct mcap_mcl *find_mcl(GSList *list, const bdaddr_t *addr)
727 struct mcap_mcl *mcl;
729 for (; list; list = list->next) {
732 if (!bacmp(&mcl->addr, addr))
739 int mcap_mdl_get_fd(struct mcap_mdl *mdl)
741 if (!mdl || mdl->state != MDL_CONNECTED)
744 return g_io_channel_unix_get_fd(mdl->dc);
747 uint16_t mcap_mdl_get_mdlid(struct mcap_mdl *mdl)
750 return MCAP_MDLID_RESERVED;
755 static void shutdown_mdl_cb(void *data, void *user_data)
760 static void mdl_unref_cb(void *data, void *user_data)
762 mcap_mdl_unref(data);
765 static void close_mcl(struct mcap_mcl *mcl, gboolean cache_requested)
767 gboolean save = ((!(mcl->ctrl & MCAP_CTRL_FREE)) && cache_requested);
772 g_io_channel_shutdown(mcl->cc, TRUE, NULL);
773 g_io_channel_unref(mcl->cc);
778 g_source_remove(mcl->wid);
788 free_mcl_priv_data(mcl);
790 g_slist_foreach(mcl->mdls, shutdown_mdl_cb, NULL);
794 mcl->state = MCL_IDLE;
799 g_slist_foreach(mcl->mdls, mdl_unref_cb, NULL);
800 g_slist_free(mcl->mdls);
804 static void mcap_mcl_shutdown(struct mcap_mcl *mcl)
806 close_mcl(mcl, TRUE);
809 static void mcap_mcl_release(struct mcap_mcl *mcl)
811 close_mcl(mcl, FALSE);
814 static void mcap_cache_mcl(struct mcap_mcl *mcl)
817 struct mcap_mcl *last;
820 if (mcl->ctrl & MCAP_CTRL_CACHED)
823 mcl->mi->mcls = g_slist_remove(mcl->mi->mcls, mcl);
825 if (mcl->ctrl & MCAP_CTRL_NOCACHE) {
826 mcl->mi->cached = g_slist_remove(mcl->mi->cached, mcl);
827 mcap_mcl_release(mcl);
834 len = g_slist_length(mcl->mi->cached);
835 if (len == MAX_CACHED) {
836 /* Remove the latest cached mcl */
837 l = g_slist_last(mcl->mi->cached);
839 mcl->mi->cached = g_slist_remove(mcl->mi->cached, last);
840 last->ctrl &= ~MCAP_CTRL_CACHED;
841 if (last->ctrl & MCAP_CTRL_CONN) {
843 * We have to release this MCL if connection is not
846 last->ctrl |= MCAP_CTRL_FREE;
848 mcap_mcl_release(last);
849 last->mi->mcl_uncached_cb(last, last->mi->user_data);
851 mcap_mcl_unref(last);
854 mcl->mi->cached = g_slist_prepend(mcl->mi->cached, mcl);
855 mcl->ctrl |= MCAP_CTRL_CACHED;
856 mcap_mcl_shutdown(mcl);
859 static void mcap_uncache_mcl(struct mcap_mcl *mcl)
861 if (!(mcl->ctrl & MCAP_CTRL_CACHED))
864 DBG("Got MCL from cache");
866 mcl->mi->cached = g_slist_remove(mcl->mi->cached, mcl);
867 mcl->mi->mcls = g_slist_prepend(mcl->mi->mcls, mcl);
868 mcl->ctrl &= ~MCAP_CTRL_CACHED;
869 mcl->ctrl &= ~MCAP_CTRL_FREE;
872 void mcap_close_mcl(struct mcap_mcl *mcl, gboolean cache)
877 if (mcl->ctrl & MCAP_CTRL_FREE) {
878 mcap_mcl_release(mcl);
883 mcl->ctrl |= MCAP_CTRL_NOCACHE;
886 g_io_channel_shutdown(mcl->cc, TRUE, NULL);
887 g_io_channel_unref(mcl->cc);
889 mcl->state = MCL_IDLE;
890 } else if ((mcl->ctrl & MCAP_CTRL_CACHED) &&
891 (mcl->ctrl & MCAP_CTRL_NOCACHE)) {
892 mcl->ctrl &= ~MCAP_CTRL_CACHED;
893 mcl->mi->cached = g_slist_remove(mcl->mi->cached, mcl);
894 mcap_mcl_release(mcl);
899 struct mcap_mcl *mcap_mcl_ref(struct mcap_mcl *mcl)
903 DBG("mcap_mcl_ref(%p): ref=%d", mcl, mcl->ref);
908 void mcap_mcl_unref(struct mcap_mcl *mcl)
912 DBG("mcap_mcl_unref(%p): ref=%d", mcl, mcl->ref);
917 mcap_mcl_release(mcl);
918 mcap_instance_unref(mcl->mi);
923 static gboolean parse_set_opts(struct mcap_mdl_cb *mdl_cb, GError **err,
924 McapMclCb cb1, va_list args)
927 struct mcap_mdl_cb *c;
929 c = g_new0(struct mcap_mdl_cb, 1);
931 while (cb != MCAP_MDL_CB_INVALID) {
933 case MCAP_MDL_CB_CONNECTED:
934 c->mdl_connected = va_arg(args, mcap_mdl_event_cb);
936 case MCAP_MDL_CB_CLOSED:
937 c->mdl_closed = va_arg(args, mcap_mdl_event_cb);
939 case MCAP_MDL_CB_DELETED:
940 c->mdl_deleted = va_arg(args, mcap_mdl_event_cb);
942 case MCAP_MDL_CB_ABORTED:
943 c->mdl_aborted = va_arg(args, mcap_mdl_event_cb);
945 case MCAP_MDL_CB_REMOTE_CONN_REQ:
946 c->mdl_conn_req = va_arg(args,
947 mcap_remote_mdl_conn_req_cb);
949 case MCAP_MDL_CB_REMOTE_RECONN_REQ:
950 c->mdl_reconn_req = va_arg(args,
951 mcap_remote_mdl_reconn_req_cb);
953 case MCAP_MDL_CB_INVALID:
955 g_set_error(err, MCAP_ERROR, MCAP_ERROR_INVALID_ARGS,
956 "Unknown option %d", cb);
960 cb = va_arg(args, int);
963 /* Set new callbacks */
964 if (c->mdl_connected)
965 mdl_cb->mdl_connected = c->mdl_connected;
967 mdl_cb->mdl_closed = c->mdl_closed;
969 mdl_cb->mdl_deleted = c->mdl_deleted;
971 mdl_cb->mdl_aborted = c->mdl_aborted;
973 mdl_cb->mdl_conn_req = c->mdl_conn_req;
974 if (c->mdl_reconn_req)
975 mdl_cb->mdl_reconn_req = c->mdl_reconn_req;
982 gboolean mcap_mcl_set_cb(struct mcap_mcl *mcl, gpointer user_data,
983 GError **gerr, McapMclCb cb1, ...)
989 ret = parse_set_opts(mcl->cb, gerr, cb1, args);
995 mcl->cb->user_data = user_data;
999 void mcap_mcl_get_addr(struct mcap_mcl *mcl, bdaddr_t *addr)
1001 bacpy(addr, &mcl->addr);
1004 static void mcap_del_mdl(gpointer elem, gpointer user_data)
1006 struct mcap_mdl *mdl = elem;
1007 gboolean notify = *(gboolean *) user_data;
1010 mdl->mcl->cb->mdl_deleted(mdl, mdl->mcl->cb->user_data);
1013 mcap_mdl_unref(mdl);
1016 static gboolean check_cmd_req_length(struct mcap_mcl *mcl, void *cmd,
1017 uint32_t rlen, uint32_t explen, uint8_t rspcod)
1022 if (rlen != explen) {
1023 if (rlen >= sizeof(mcap_md_req)) {
1025 mdl_id = ntohs(req->mdl);
1027 /* We can't get mdlid */
1028 mdl_id = MCAP_MDLID_RESERVED;
1030 mcap_send_cmd(mcl, rspcod, MCAP_INVALID_PARAM_VALUE, mdl_id,
1037 static void process_md_create_mdl_req(struct mcap_mcl *mcl, void *cmd,
1040 mcap_md_create_mdl_req *req;
1041 struct mcap_mdl *mdl;
1047 if (!check_cmd_req_length(mcl, cmd, len, sizeof(mcap_md_create_mdl_req),
1048 MCAP_MD_CREATE_MDL_RSP))
1052 mdl_id = ntohs(req->mdl);
1053 if (mdl_id < MCAP_MDLID_INITIAL || mdl_id > MCAP_MDLID_FINAL) {
1054 mcap_send_cmd(mcl, MCAP_MD_CREATE_MDL_RSP, MCAP_INVALID_MDL,
1059 mdep_id = req->mdep;
1060 if (mdep_id > MCAP_MDEPID_FINAL) {
1061 mcap_send_cmd(mcl, MCAP_MD_CREATE_MDL_RSP, MCAP_INVALID_MDEP,
1066 mdl = get_mdl(mcl, mdl_id);
1067 if (mdl && (mdl->state == MDL_WAITING || mdl->state == MDL_DELETING )) {
1069 * Creation request arrives for a MDL that is being managed
1072 mcap_send_cmd(mcl, MCAP_MD_CREATE_MDL_RSP, MCAP_MDL_BUSY,
1077 cfga = conf = req->conf;
1078 /* Callback to upper layer */
1079 rsp = mcl->cb->mdl_conn_req(mcl, mdep_id, mdl_id, &conf,
1080 mcl->cb->user_data);
1081 if (mcl->state == MCL_IDLE) {
1082 /* MCL has been closed int the callback */
1086 if (cfga != 0 && cfga != conf) {
1088 * Remote device set default configuration but upper profile
1089 * has changed it. Protocol Error: force closing the MCL by
1090 * remote device using UNSPECIFIED_ERROR response
1092 mcap_send_cmd(mcl, MCAP_MD_CREATE_MDL_RSP,
1093 MCAP_UNSPECIFIED_ERROR, mdl_id, NULL, 0);
1096 if (rsp != MCAP_SUCCESS) {
1097 mcap_send_cmd(mcl, MCAP_MD_CREATE_MDL_RSP, rsp, mdl_id,
1103 mdl = g_new0(struct mcap_mdl, 1);
1104 mdl->mcl = mcap_mcl_ref(mcl);
1105 mdl->mdlid = mdl_id;
1106 mcl->mdls = g_slist_insert_sorted(mcl->mdls, mcap_mdl_ref(mdl),
1108 } else if (mdl->state == MDL_CONNECTED) {
1110 * MCAP specification says that we should close the MCL if
1111 * it is open when we receive a MD_CREATE_MDL_REQ
1116 mdl->mdep_id = mdep_id;
1117 mdl->state = MDL_WAITING;
1119 mcl->state = MCL_PENDING;
1120 mcap_send_cmd(mcl, MCAP_MD_CREATE_MDL_RSP, MCAP_SUCCESS, mdl_id,
1124 static void process_md_reconnect_mdl_req(struct mcap_mcl *mcl, void *cmd,
1128 struct mcap_mdl *mdl;
1132 if (!check_cmd_req_length(mcl, cmd, len, sizeof(mcap_md_req),
1133 MCAP_MD_RECONNECT_MDL_RSP))
1137 mdl_id = ntohs(req->mdl);
1139 mdl = get_mdl(mcl, mdl_id);
1141 mcap_send_cmd(mcl, MCAP_MD_RECONNECT_MDL_RSP, MCAP_INVALID_MDL,
1144 } else if (mdl->state == MDL_WAITING || mdl->state == MDL_DELETING ) {
1146 * Creation request arrives for a MDL that is being managed
1149 mcap_send_cmd(mcl, MCAP_MD_RECONNECT_MDL_RSP, MCAP_MDL_BUSY,
1154 /* Callback to upper layer */
1155 rsp = mcl->cb->mdl_reconn_req(mdl, mcl->cb->user_data);
1156 if (mcl->state == MCL_IDLE)
1159 if (rsp != MCAP_SUCCESS) {
1160 mcap_send_cmd(mcl, MCAP_MD_RECONNECT_MDL_RSP, rsp, mdl_id,
1165 if (mdl->state == MDL_CONNECTED)
1168 mdl->state = MDL_WAITING;
1169 mcl->state = MCL_PENDING;
1170 mcap_send_cmd(mcl, MCAP_MD_RECONNECT_MDL_RSP, MCAP_SUCCESS, mdl_id,
1174 static void process_md_abort_mdl_req(struct mcap_mcl *mcl, void *cmd,
1179 struct mcap_mdl *mdl, *abrt;
1182 if (!check_cmd_req_length(mcl, cmd, len, sizeof(mcap_md_req),
1183 MCAP_MD_ABORT_MDL_RSP))
1187 mdl_id = ntohs(req->mdl);
1188 mcl->state = MCL_CONNECTED;
1190 for (l = mcl->mdls; l; l = l->next) {
1192 if (mdl_id == mdl->mdlid && mdl->state == MDL_WAITING) {
1194 if (mcl->state != MCL_CONNECTED)
1198 if (mdl->state == MDL_CONNECTED && mcl->state != MCL_ACTIVE)
1199 mcl->state = MCL_ACTIVE;
1201 if (abrt && mcl->state == MCL_ACTIVE)
1206 mcap_send_cmd(mcl, MCAP_MD_ABORT_MDL_RSP, MCAP_INVALID_MDL,
1211 mcl->cb->mdl_aborted(abrt, mcl->cb->user_data);
1212 abrt->state = MDL_CLOSED;
1213 mcap_send_cmd(mcl, MCAP_MD_ABORT_MDL_RSP, MCAP_SUCCESS, mdl_id,
1217 static void process_md_delete_mdl_req(struct mcap_mcl *mcl, void *cmd,
1221 struct mcap_mdl *mdl, *aux;
1226 if (!check_cmd_req_length(mcl, cmd, len, sizeof(mcap_md_req),
1227 MCAP_MD_DELETE_MDL_RSP))
1231 mdlid = ntohs(req->mdl);
1232 if (mdlid == MCAP_ALL_MDLIDS) {
1234 g_slist_foreach(mcl->mdls, mcap_del_mdl, ¬ify);
1235 g_slist_free(mcl->mdls);
1237 mcl->state = MCL_CONNECTED;
1241 if (mdlid < MCAP_MDLID_INITIAL || mdlid > MCAP_MDLID_FINAL) {
1242 mcap_send_cmd(mcl, MCAP_MD_DELETE_MDL_RSP, MCAP_INVALID_MDL,
1247 for (l = mcl->mdls, mdl = NULL; l; l = l->next) {
1249 if (aux->mdlid == mdlid) {
1255 if (!mdl || mdl->state == MDL_WAITING) {
1256 mcap_send_cmd(mcl, MCAP_MD_DELETE_MDL_RSP, MCAP_INVALID_MDL,
1261 mcl->mdls = g_slist_remove(mcl->mdls, mdl);
1262 update_mcl_state(mcl);
1264 mcap_del_mdl(mdl, ¬ify);
1267 mcap_send_cmd(mcl, MCAP_MD_DELETE_MDL_RSP, MCAP_SUCCESS, mdlid,
1271 static void invalid_req_state(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t len)
1275 error("Invalid cmd received (op code = %d) in state %d", cmd[0],
1278 * Get previously mdlid sent to generate an appropriate
1279 * response if it is possible
1281 mdlr = len < sizeof(mcap_md_req) ? MCAP_MDLID_RESERVED :
1282 ntohs(((mcap_md_req *) cmd)->mdl);
1283 mcap_send_cmd(mcl, cmd[0]+1, MCAP_INVALID_OPERATION, mdlr, NULL, 0);
1286 /* Function used to process commands depending of MCL state */
1287 static void proc_req_connected(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t len)
1290 case MCAP_MD_CREATE_MDL_REQ:
1291 process_md_create_mdl_req(mcl, cmd, len);
1293 case MCAP_MD_RECONNECT_MDL_REQ:
1294 process_md_reconnect_mdl_req(mcl, cmd, len);
1296 case MCAP_MD_DELETE_MDL_REQ:
1297 process_md_delete_mdl_req(mcl, cmd, len);
1300 invalid_req_state(mcl, cmd, len);
1304 static void proc_req_pending(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t len)
1306 if (cmd[0] == MCAP_MD_ABORT_MDL_REQ)
1307 process_md_abort_mdl_req(mcl, cmd, len);
1309 invalid_req_state(mcl, cmd, len);
1312 static void proc_req_active(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t len)
1315 case MCAP_MD_CREATE_MDL_REQ:
1316 process_md_create_mdl_req(mcl, cmd, len);
1318 case MCAP_MD_RECONNECT_MDL_REQ:
1319 process_md_reconnect_mdl_req(mcl, cmd, len);
1321 case MCAP_MD_DELETE_MDL_REQ:
1322 process_md_delete_mdl_req(mcl, cmd, len);
1325 invalid_req_state(mcl, cmd, len);
1329 /* Function used to process replies */
1330 static gboolean check_err_rsp(struct mcap_mcl *mcl, mcap_rsp *rsp,
1331 uint32_t rlen, uint32_t len, GError **gerr)
1333 mcap_md_req *cmdlast = (mcap_md_req *) mcl->lcmd;
1334 int err = MCAP_ERROR_FAILED;
1335 gboolean close = FALSE;
1338 if (rsp->op == MCAP_ERROR_RSP) {
1339 msg = "MCAP_ERROR_RSP received";
1344 /* Check if the response matches with the last request */
1345 if (rlen < sizeof(mcap_rsp) || (mcl->lcmd[0] + 1) != rsp->op) {
1346 msg = "Protocol error";
1352 msg = "Protocol error";
1357 if (rsp->mdl != cmdlast->mdl) {
1358 msg = "MDLID received doesn't match with MDLID sent";
1363 if (rsp->rc == MCAP_REQUEST_NOT_SUPPORTED) {
1364 msg = "Remote does not support opcodes";
1365 mcl->ctrl &= ~MCAP_CTRL_STD_OP;
1369 if (rsp->rc == MCAP_UNSPECIFIED_ERROR) {
1370 msg = "Unspecified error";
1375 if (rsp->rc != MCAP_SUCCESS) {
1376 msg = error2str(rsp->rc);
1384 g_set_error(gerr, MCAP_ERROR, err, "%s", msg);
1388 static gboolean process_md_create_mdl_rsp(struct mcap_mcl *mcl,
1389 mcap_rsp *rsp, uint32_t len)
1391 mcap_md_create_mdl_req *cmdlast = (mcap_md_create_mdl_req *) mcl->lcmd;
1392 struct mcap_mdl_op_cb *conn = mcl->priv_data;
1393 mcap_mdl_operation_conf_cb connect_cb = conn->cb.op_conf;
1394 gpointer user_data = conn->user_data;
1395 struct mcap_mdl *mdl = conn->mdl;
1396 uint8_t conf = cmdlast->conf;
1398 GError *gerr = NULL;
1400 close = check_err_rsp(mcl, rsp, len, sizeof(mcap_rsp) + 1, &gerr);
1403 mcl->req = MCL_AVAILABLE;
1408 /* Check if preferences changed */
1409 if (conf != 0x00 && rsp->data[0] != conf) {
1410 g_set_error(&gerr, MCAP_ERROR, MCAP_ERROR_FAILED,
1411 "Configuration changed");
1416 connect_cb(mdl, rsp->data[0], gerr, user_data);
1420 connect_cb(NULL, 0, gerr, user_data);
1421 mcl->mdls = g_slist_remove(mcl->mdls, mdl);
1422 mcap_mdl_unref(mdl);
1424 update_mcl_state(mcl);
1428 static gboolean process_md_reconnect_mdl_rsp(struct mcap_mcl *mcl,
1429 mcap_rsp *rsp, uint32_t len)
1431 struct mcap_mdl_op_cb *reconn = mcl->priv_data;
1432 mcap_mdl_operation_cb reconn_cb = reconn->cb.op;
1433 gpointer user_data = reconn->user_data;
1434 struct mcap_mdl *mdl = reconn->mdl;
1435 GError *gerr = NULL;
1438 close = check_err_rsp(mcl, rsp, len, sizeof(mcap_rsp), &gerr);
1442 mcl->req = MCL_AVAILABLE;
1444 reconn_cb(mdl, gerr, user_data);
1450 update_mcl_state(mcl);
1452 if (rsp->rc != MCAP_INVALID_MDL)
1455 /* Remove cached mdlid */
1456 mcl->mdls = g_slist_remove(mcl->mdls, mdl);
1457 mcl->cb->mdl_deleted(mdl, mcl->cb->user_data);
1458 mcap_mdl_unref(mdl);
1463 static gboolean process_md_abort_mdl_rsp(struct mcap_mcl *mcl,
1464 mcap_rsp *rsp, uint32_t len)
1466 struct mcap_mdl_op_cb *abrt = mcl->priv_data;
1467 mcap_mdl_notify_cb abrt_cb = abrt->cb.notify;
1468 gpointer user_data = abrt->user_data;
1469 struct mcap_mdl *mdl = abrt->mdl;
1470 GError *gerr = NULL;
1473 close = check_err_rsp(mcl, rsp, len, sizeof(mcap_rsp), &gerr);
1477 mcl->req = MCL_AVAILABLE;
1479 abrt_cb(gerr, user_data);
1482 if (len >= sizeof(mcap_rsp) && rsp->rc == MCAP_INVALID_MDL) {
1483 mcl->mdls = g_slist_remove(mcl->mdls, mdl);
1484 mcl->cb->mdl_deleted(mdl, mcl->cb->user_data);
1485 mcap_mdl_unref(mdl);
1491 update_mcl_state(mcl);
1496 static void restore_mdl(gpointer elem, gpointer data)
1498 struct mcap_mdl *mdl = elem;
1500 if (mdl->state == MDL_DELETING) {
1502 mdl->state = MDL_CONNECTED;
1504 mdl->state = MDL_CLOSED;
1505 } else if (mdl->state == MDL_CLOSED)
1506 mdl->mcl->cb->mdl_closed(mdl, mdl->mcl->cb->user_data);
1509 static void check_mdl_del_err(struct mcap_mdl *mdl, mcap_rsp *rsp)
1511 if (rsp->rc != MCAP_ERROR_INVALID_MDL) {
1512 restore_mdl(mdl, NULL);
1516 /* MDL does not exist in remote side, we can delete it */
1517 mdl->mcl->mdls = g_slist_remove(mdl->mcl->mdls, mdl);
1518 mcap_mdl_unref(mdl);
1521 static gboolean process_md_delete_mdl_rsp(struct mcap_mcl *mcl, mcap_rsp *rsp,
1524 struct mcap_mdl_op_cb *del = mcl->priv_data;
1525 struct mcap_mdl *mdl = del->mdl;
1526 mcap_mdl_notify_cb deleted_cb = del->cb.notify;
1527 gpointer user_data = del->user_data;
1528 mcap_md_req *cmdlast = (mcap_md_req *) mcl->lcmd;
1529 uint16_t mdlid = ntohs(cmdlast->mdl);
1530 GError *gerr = NULL;
1532 gboolean notify = FALSE;
1534 close = check_err_rsp(mcl, rsp, len, sizeof(mcap_rsp), &gerr);
1538 mcl->req = MCL_AVAILABLE;
1542 check_mdl_del_err(mdl, rsp);
1544 g_slist_foreach(mcl->mdls, restore_mdl, NULL);
1545 deleted_cb(gerr, user_data);
1550 if (mdlid == MCAP_ALL_MDLIDS) {
1551 g_slist_foreach(mcl->mdls, mcap_del_mdl, ¬ify);
1552 g_slist_free(mcl->mdls);
1554 mcl->state = MCL_CONNECTED;
1556 mcl->mdls = g_slist_remove(mcl->mdls, mdl);
1557 update_mcl_state(mcl);
1558 mcap_del_mdl(mdl, ¬ify);
1561 deleted_cb(gerr, user_data);
1566 static void post_process_rsp(struct mcap_mcl *mcl, struct mcap_mdl_op_cb *op)
1568 if (mcl->priv_data != op) {
1570 * Queued MCAP request in some callback.
1571 * We should not delete the mcl private data
1573 free_mcap_mdl_op(op);
1576 * This is not a queued request. It's safe
1577 * delete the mcl private data here.
1579 free_mcl_priv_data(mcl);
1583 static void proc_response(struct mcap_mcl *mcl, void *buf, uint32_t len)
1585 struct mcap_mdl_op_cb *op = mcl->priv_data;
1586 mcap_rsp *rsp = buf;
1591 switch (mcl->lcmd[0] + 1) {
1592 case MCAP_MD_CREATE_MDL_RSP:
1593 close = process_md_create_mdl_rsp(mcl, rsp, len);
1594 post_process_rsp(mcl, op);
1596 case MCAP_MD_RECONNECT_MDL_RSP:
1597 close = process_md_reconnect_mdl_rsp(mcl, rsp, len);
1598 post_process_rsp(mcl, op);
1600 case MCAP_MD_ABORT_MDL_RSP:
1601 close = process_md_abort_mdl_rsp(mcl, rsp, len);
1602 post_process_rsp(mcl, op);
1604 case MCAP_MD_DELETE_MDL_RSP:
1605 close = process_md_delete_mdl_rsp(mcl, rsp, len);
1606 post_process_rsp(mcl, op);
1609 DBG("Unknown cmd response received (op code = %d)", rsp->op);
1615 mcl->mi->mcl_disconnected_cb(mcl, mcl->mi->user_data);
1616 mcap_cache_mcl(mcl);
1620 static void proc_cmd(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t len)
1622 GError *gerr = NULL;
1624 if (cmd[0] > MCAP_MD_SYNC_INFO_IND ||
1625 (cmd[0] > MCAP_MD_DELETE_MDL_RSP &&
1626 cmd[0] < MCAP_MD_SYNC_CAP_REQ)) {
1627 error("Unknown cmd received (op code = %d)", cmd[0]);
1628 mcap_send_cmd(mcl, MCAP_ERROR_RSP, MCAP_INVALID_OP_CODE,
1629 MCAP_MDLID_RESERVED, NULL, 0);
1633 if (cmd[0] >= MCAP_MD_SYNC_CAP_REQ &&
1634 cmd[0] <= MCAP_MD_SYNC_INFO_IND) {
1635 proc_sync_cmd(mcl, cmd, len);
1639 if (!(mcl->ctrl & MCAP_CTRL_STD_OP)) {
1640 /* In case the remote device doesn't work correctly */
1641 error("Remote device does not support opcodes, cmd ignored");
1645 if (mcl->req == MCL_WAITING_RSP) {
1646 if (cmd[0] & 0x01) {
1647 /* Request arrived when a response is expected */
1648 if (mcl->role == MCL_INITIATOR)
1651 /* Initiator will ignore our last request */
1653 mcl->req = MCL_AVAILABLE;
1654 g_set_error(&gerr, MCAP_ERROR, MCAP_ERROR_REQ_IGNORED,
1655 "Initiator sent a request with more priority");
1656 mcap_notify_error(mcl, gerr);
1657 proc_req[mcl->state](mcl, cmd, len);
1660 proc_response(mcl, cmd, len);
1661 } else if (cmd[0] & 0x01)
1662 proc_req[mcl->state](mcl, cmd, len);
1665 static gboolean mdl_event_cb(GIOChannel *chan, GIOCondition cond, gpointer data)
1668 struct mcap_mdl *mdl = data;
1671 DBG("Close MDL %d", mdl->mdlid);
1673 notify = (mdl->state == MDL_CONNECTED);
1676 update_mcl_state(mdl->mcl);
1679 /*Callback to upper layer */
1680 mdl->mcl->cb->mdl_closed(mdl, mdl->mcl->cb->user_data);
1686 static void mcap_connect_mdl_cb(GIOChannel *chan, GError *conn_err,
1689 struct mcap_mdl_op_cb *con = data;
1690 struct mcap_mdl *mdl = con->mdl;
1691 mcap_mdl_operation_cb cb = con->cb.op;
1692 gpointer user_data = con->user_data;
1694 DBG("mdl connect callback");
1697 DBG("ERROR: mdl connect callback");
1698 mdl->state = MDL_CLOSED;
1699 g_io_channel_unref(mdl->dc);
1701 cb(mdl, conn_err, user_data);
1705 mdl->state = MDL_CONNECTED;
1706 mdl->wid = g_io_add_watch_full(mdl->dc, G_PRIORITY_DEFAULT,
1707 G_IO_ERR | G_IO_HUP | G_IO_NVAL,
1708 (GIOFunc) mdl_event_cb,
1710 (GDestroyNotify) mcap_mdl_unref);
1712 cb(mdl, conn_err, user_data);
1715 gboolean mcap_connect_mdl(struct mcap_mdl *mdl, uint8_t mode,
1717 mcap_mdl_operation_cb connect_cb,
1719 GDestroyNotify destroy,
1722 struct mcap_mdl_op_cb *con;
1724 if (mdl->state != MDL_WAITING) {
1725 g_set_error(err, MCAP_ERROR, MCAP_ERROR_INVALID_MDL,
1726 "%s", error2str(MCAP_INVALID_MDL));
1730 if ((mode != BT_IO_MODE_ERTM) && (mode != BT_IO_MODE_STREAMING)) {
1731 g_set_error(err, MCAP_ERROR, MCAP_ERROR_INVALID_ARGS,
1732 "Invalid MDL configuration");
1736 con = g_new0(struct mcap_mdl_op_cb, 1);
1737 con->mdl = mcap_mdl_ref(mdl);
1738 con->cb.op = connect_cb;
1739 con->destroy = destroy;
1740 con->user_data = user_data;
1742 mdl->dc = bt_io_connect(mcap_connect_mdl_cb, con,
1743 (GDestroyNotify) free_mcap_mdl_op, err,
1744 BT_IO_OPT_SOURCE_BDADDR, &mdl->mcl->mi->src,
1745 BT_IO_OPT_DEST_BDADDR, &mdl->mcl->addr,
1746 BT_IO_OPT_PSM, dcpsm,
1747 BT_IO_OPT_MTU, MCAP_DC_MTU,
1748 BT_IO_OPT_SEC_LEVEL, mdl->mcl->mi->sec,
1749 BT_IO_OPT_MODE, mode,
1752 DBG("MDL Connection error");
1753 mdl->state = MDL_CLOSED;
1754 mcap_mdl_unref(con->mdl);
1762 static gboolean mcl_control_cb(GIOChannel *chan, GIOCondition cond,
1765 GError *gerr = NULL;
1766 struct mcap_mcl *mcl = data;
1768 uint8_t buf[MCAP_CC_MTU];
1770 if (cond & (G_IO_ERR | G_IO_HUP | G_IO_NVAL))
1773 sk = g_io_channel_unix_get_fd(chan);
1774 len = read(sk, buf, sizeof(buf));
1778 proc_cmd(mcl, buf, (uint32_t) len);
1782 if (mcl->state != MCL_IDLE) {
1783 if (mcl->req == MCL_WAITING_RSP) {
1784 /* notify error in pending callback */
1785 g_set_error(&gerr, MCAP_ERROR, MCAP_ERROR_MCL_CLOSED,
1787 mcap_notify_error(mcl, gerr);
1790 mcl->mi->mcl_disconnected_cb(mcl, mcl->mi->user_data);
1792 mcap_cache_mcl(mcl);
1796 static void mcap_connect_mcl_cb(GIOChannel *chan, GError *conn_err,
1800 struct connect_mcl *con = user_data;
1801 struct mcap_mcl *aux, *mcl = con->mcl;
1802 mcap_mcl_connect_cb connect_cb = con->connect_cb;
1803 gpointer data = con->user_data;
1804 GError *gerr = NULL;
1806 mcl->ctrl &= ~MCAP_CTRL_CONN;
1809 if (mcl->ctrl & MCAP_CTRL_FREE) {
1810 mcap_mcl_release(mcl);
1811 mcl->mi->mcl_uncached_cb(mcl, mcl->mi->user_data);
1813 connect_cb(NULL, conn_err, data);
1817 ba2str(&mcl->addr, dstaddr);
1819 aux = find_mcl(mcl->mi->mcls, &mcl->addr);
1821 /* Double MCL connection case */
1822 error("MCL error: Device %s is already connected", dstaddr);
1823 g_set_error(&gerr, MCAP_ERROR, MCAP_ERROR_ALREADY_EXISTS,
1824 "MCL %s is already connected", dstaddr);
1825 connect_cb(NULL, gerr, data);
1830 mcl->state = MCL_CONNECTED;
1831 mcl->role = MCL_INITIATOR;
1832 mcl->req = MCL_AVAILABLE;
1833 mcl->ctrl |= MCAP_CTRL_STD_OP;
1835 mcap_sync_init(mcl);
1837 if (mcl->ctrl & MCAP_CTRL_CACHED)
1838 mcap_uncache_mcl(mcl);
1840 mcl->ctrl &= ~MCAP_CTRL_FREE;
1841 mcl->mi->mcls = g_slist_prepend(mcl->mi->mcls,
1845 mcl->wid = g_io_add_watch_full(mcl->cc, G_PRIORITY_DEFAULT,
1846 G_IO_IN | G_IO_ERR | G_IO_HUP | G_IO_NVAL,
1847 (GIOFunc) mcl_control_cb,
1849 (GDestroyNotify) mcap_mcl_unref);
1850 connect_cb(mcl, gerr, data);
1853 static void set_mdl_properties(GIOChannel *chan, struct mcap_mdl *mdl)
1855 struct mcap_mcl *mcl = mdl->mcl;
1857 mdl->state = MDL_CONNECTED;
1858 mdl->dc = g_io_channel_ref(chan);
1859 mdl->wid = g_io_add_watch_full(mdl->dc, G_PRIORITY_DEFAULT,
1860 G_IO_ERR | G_IO_HUP | G_IO_NVAL,
1861 (GIOFunc) mdl_event_cb,
1863 (GDestroyNotify) mcap_mdl_unref);
1865 mcl->state = MCL_ACTIVE;
1866 mcl->cb->mdl_connected(mdl, mcl->cb->user_data);
1869 static void mcl_io_destroy(gpointer data)
1871 struct connect_mcl *con = data;
1873 mcap_mcl_unref(con->mcl);
1875 con->destroy(con->user_data);
1879 gboolean mcap_create_mcl(struct mcap_instance *mi,
1880 const bdaddr_t *addr,
1882 mcap_mcl_connect_cb connect_cb,
1884 GDestroyNotify destroy,
1887 struct mcap_mcl *mcl;
1888 struct connect_mcl *con;
1890 mcl = find_mcl(mi->mcls, addr);
1892 g_set_error(err, MCAP_ERROR, MCAP_ERROR_ALREADY_EXISTS,
1893 "MCL is already connected.");
1897 mcl = find_mcl(mi->cached, addr);
1899 mcl = g_new0(struct mcap_mcl, 1);
1900 mcl->mi = mcap_instance_ref(mi);
1901 mcl->state = MCL_IDLE;
1902 bacpy(&mcl->addr, addr);
1903 set_default_cb(mcl);
1904 mcl->next_mdl = (rand() % MCAP_MDLID_FINAL) + 1;
1907 mcl->ctrl |= MCAP_CTRL_CONN;
1909 con = g_new0(struct connect_mcl, 1);
1910 con->mcl = mcap_mcl_ref(mcl);
1911 con->connect_cb = connect_cb;
1912 con->destroy = destroy;
1913 con->user_data = user_data;
1915 mcl->cc = bt_io_connect(mcap_connect_mcl_cb, con,
1916 mcl_io_destroy, err,
1917 BT_IO_OPT_SOURCE_BDADDR, &mi->src,
1918 BT_IO_OPT_DEST_BDADDR, addr,
1919 BT_IO_OPT_PSM, ccpsm,
1920 BT_IO_OPT_MTU, MCAP_CC_MTU,
1921 BT_IO_OPT_SEC_LEVEL, mi->sec,
1922 BT_IO_OPT_MODE, BT_IO_MODE_ERTM,
1925 mcl->ctrl &= ~MCAP_CTRL_CONN;
1926 if (mcl->ctrl & MCAP_CTRL_FREE) {
1927 mcap_mcl_release(mcl);
1928 mcl->mi->mcl_uncached_cb(mcl, mcl->mi->user_data);
1930 mcap_mcl_unref(con->mcl);
1938 static void connect_dc_event_cb(GIOChannel *chan, GError *gerr,
1941 struct mcap_instance *mi = user_data;
1942 struct mcap_mcl *mcl;
1943 struct mcap_mdl *mdl;
1951 bt_io_get(chan, &err, BT_IO_OPT_DEST_BDADDR, &dst, BT_IO_OPT_INVALID);
1953 error("%s", err->message);
1958 mcl = find_mcl(mi->mcls, &dst);
1959 if (!mcl || mcl->state != MCL_PENDING)
1962 for (l = mcl->mdls; l; l = l->next) {
1964 if (mdl->state == MDL_WAITING) {
1965 set_mdl_properties(chan, mdl);
1971 g_io_channel_shutdown(chan, TRUE, NULL);
1974 static void set_mcl_conf(GIOChannel *chan, struct mcap_mcl *mcl)
1978 mcl->state = MCL_CONNECTED;
1979 mcl->role = MCL_ACCEPTOR;
1980 mcl->req = MCL_AVAILABLE;
1981 mcl->cc = g_io_channel_ref(chan);
1982 mcl->ctrl |= MCAP_CTRL_STD_OP;
1984 mcap_sync_init(mcl);
1986 reconn = (mcl->ctrl & MCAP_CTRL_CACHED);
1988 mcap_uncache_mcl(mcl);
1990 mcl->mi->mcls = g_slist_prepend(mcl->mi->mcls,
1993 mcl->wid = g_io_add_watch_full(mcl->cc, G_PRIORITY_DEFAULT,
1994 G_IO_IN | G_IO_ERR | G_IO_HUP | G_IO_NVAL,
1995 (GIOFunc) mcl_control_cb,
1997 (GDestroyNotify) mcap_mcl_unref);
1999 /* Callback to report new MCL */
2001 mcl->mi->mcl_reconnected_cb(mcl, mcl->mi->user_data);
2003 mcl->mi->mcl_connected_cb(mcl, mcl->mi->user_data);
2006 static void connect_mcl_event_cb(GIOChannel *chan, GError *gerr,
2009 struct mcap_instance *mi = user_data;
2010 struct mcap_mcl *mcl;
2012 char address[18], srcstr[18];
2018 bt_io_get(chan, &err,
2019 BT_IO_OPT_DEST_BDADDR, &dst,
2020 BT_IO_OPT_DEST, address,
2023 error("%s", err->message);
2028 ba2str(&mi->src, srcstr);
2029 mcl = find_mcl(mi->mcls, &dst);
2031 error("Control channel already created with %s on adapter %s",
2036 mcl = find_mcl(mi->cached, &dst);
2038 mcl = g_new0(struct mcap_mcl, 1);
2039 mcl->mi = mcap_instance_ref(mi);
2040 bacpy(&mcl->addr, &dst);
2041 set_default_cb(mcl);
2042 mcl->next_mdl = (rand() % MCAP_MDLID_FINAL) + 1;
2045 set_mcl_conf(chan, mcl);
2049 g_io_channel_shutdown(chan, TRUE, NULL);
2052 struct mcap_instance *mcap_create_instance(const bdaddr_t *src,
2056 mcap_mcl_event_cb mcl_connected,
2057 mcap_mcl_event_cb mcl_reconnected,
2058 mcap_mcl_event_cb mcl_disconnected,
2059 mcap_mcl_event_cb mcl_uncached,
2060 mcap_info_ind_event_cb mcl_sync_info_ind,
2064 struct mcap_instance *mi;
2066 if (sec < BT_IO_SEC_MEDIUM) {
2067 g_set_error(gerr, MCAP_ERROR, MCAP_ERROR_INVALID_ARGS,
2068 "Security level can't be minor of %d",
2073 if (!(mcl_connected && mcl_reconnected &&
2074 mcl_disconnected && mcl_uncached)) {
2075 g_set_error(gerr, MCAP_ERROR, MCAP_ERROR_INVALID_ARGS,
2076 "The callbacks can't be null");
2080 mi = g_new0(struct mcap_instance, 1);
2082 bacpy(&mi->src, src);
2085 mi->mcl_connected_cb = mcl_connected;
2086 mi->mcl_reconnected_cb = mcl_reconnected;
2087 mi->mcl_disconnected_cb = mcl_disconnected;
2088 mi->mcl_uncached_cb = mcl_uncached;
2089 mi->mcl_sync_infoind_cb = mcl_sync_info_ind;
2090 mi->user_data = user_data;
2091 mi->csp_enabled = FALSE;
2093 /* Listen incoming connections in control channel */
2094 mi->ccio = bt_io_listen(connect_mcl_event_cb, NULL, mi,
2096 BT_IO_OPT_SOURCE_BDADDR, &mi->src,
2097 BT_IO_OPT_PSM, ccpsm,
2098 BT_IO_OPT_MTU, MCAP_CC_MTU,
2099 BT_IO_OPT_SEC_LEVEL, sec,
2100 BT_IO_OPT_MODE, BT_IO_MODE_ERTM,
2103 error("%s", (*gerr)->message);
2108 /* Listen incoming connections in data channels */
2109 mi->dcio = bt_io_listen(connect_dc_event_cb, NULL, mi,
2111 BT_IO_OPT_SOURCE_BDADDR, &mi->src,
2112 BT_IO_OPT_PSM, dcpsm,
2113 BT_IO_OPT_MTU, MCAP_DC_MTU,
2114 BT_IO_OPT_SEC_LEVEL, sec,
2117 g_io_channel_shutdown(mi->ccio, TRUE, NULL);
2118 g_io_channel_unref(mi->ccio);
2120 error("%s", (*gerr)->message);
2125 /* Initialize random seed to generate mdlids for this instance */
2128 return mcap_instance_ref(mi);
2131 void mcap_release_instance(struct mcap_instance *mi)
2139 g_io_channel_shutdown(mi->ccio, TRUE, NULL);
2140 g_io_channel_unref(mi->ccio);
2145 g_io_channel_shutdown(mi->dcio, TRUE, NULL);
2146 g_io_channel_unref(mi->dcio);
2150 for (l = mi->mcls; l; l = l->next) {
2151 mcap_mcl_release(l->data);
2152 mcap_mcl_unref(l->data);
2155 g_slist_free(mi->mcls);
2158 for (l = mi->cached; l; l = l->next) {
2159 mcap_mcl_release(l->data);
2160 mcap_mcl_unref(l->data);
2163 g_slist_free(mi->cached);
2167 struct mcap_instance *mcap_instance_ref(struct mcap_instance *mi)
2171 DBG("mcap_instance_ref(%p): ref=%d", mi, mi->ref);
2176 void mcap_instance_unref(struct mcap_instance *mi)
2180 DBG("mcap_instance_unref(%p): ref=%d", mi, mi->ref);
2185 mcap_release_instance(mi);
2189 uint16_t mcap_get_ctrl_psm(struct mcap_instance *mi, GError **err)
2193 if (!(mi && mi->ccio)) {
2194 g_set_error(err, MCAP_ERROR, MCAP_ERROR_INVALID_ARGS,
2195 "Invalid MCAP instance");
2199 if (!bt_io_get(mi->ccio, err, BT_IO_OPT_PSM, &lpsm, BT_IO_OPT_INVALID))
2205 uint16_t mcap_get_data_psm(struct mcap_instance *mi, GError **err)
2209 if (!(mi && mi->dcio)) {
2210 g_set_error(err, MCAP_ERROR, MCAP_ERROR_INVALID_ARGS,
2211 "Invalid MCAP instance");
2215 if (!bt_io_get(mi->dcio, err, BT_IO_OPT_PSM, &lpsm, BT_IO_OPT_INVALID))
2221 gboolean mcap_set_data_chan_mode(struct mcap_instance *mi, uint8_t mode,
2224 if (!(mi && mi->dcio)) {
2225 g_set_error(err, MCAP_ERROR, MCAP_ERROR_INVALID_ARGS,
2226 "Invalid MCAP instance");
2230 return bt_io_set(mi->dcio, err, BT_IO_OPT_MODE, mode,
2234 struct mcap_mdl *mcap_mdl_ref(struct mcap_mdl *mdl)
2238 DBG("mcap_mdl_ref(%p): ref=%d", mdl, mdl->ref);
2243 void mcap_mdl_unref(struct mcap_mdl *mdl)
2247 DBG("mcap_mdl_unref(%p): ref=%d", mdl, mdl->ref);
2256 static int send_sync_cmd(struct mcap_mcl *mcl, const void *buf, uint32_t size)
2260 if (mcl->cc == NULL)
2263 sock = g_io_channel_unix_get_fd(mcl->cc);
2264 return mcap_send_data(sock, buf, size);
2267 static int send_unsupported_cap_req(struct mcap_mcl *mcl)
2269 mcap_md_sync_cap_rsp *cmd;
2272 cmd = g_new0(mcap_md_sync_cap_rsp, 1);
2273 cmd->op = MCAP_MD_SYNC_CAP_RSP;
2274 cmd->rc = MCAP_REQUEST_NOT_SUPPORTED;
2276 sent = send_sync_cmd(mcl, cmd, sizeof(*cmd));
2282 static int send_unsupported_set_req(struct mcap_mcl *mcl)
2284 mcap_md_sync_set_rsp *cmd;
2287 cmd = g_new0(mcap_md_sync_set_rsp, 1);
2288 cmd->op = MCAP_MD_SYNC_SET_RSP;
2289 cmd->rc = MCAP_REQUEST_NOT_SUPPORTED;
2291 sent = send_sync_cmd(mcl, cmd, sizeof(*cmd));
2297 static void reset_tmstamp(struct mcap_csp *csp, struct timespec *base_time,
2298 uint64_t new_tmstamp)
2300 csp->base_tmstamp = new_tmstamp;
2302 csp->base_time = *base_time;
2304 clock_gettime(CLK, &csp->base_time);
2307 void mcap_sync_init(struct mcap_mcl *mcl)
2309 if (!mcl->mi->csp_enabled) {
2314 mcl->csp = g_new0(struct mcap_csp, 1);
2316 mcl->csp->rem_req_acc = 10000; /* safe divisor */
2317 mcl->csp->set_data = NULL;
2318 mcl->csp->csp_priv_data = NULL;
2320 reset_tmstamp(mcl->csp, NULL, 0);
2323 void mcap_sync_stop(struct mcap_mcl *mcl)
2328 if (mcl->csp->ind_timer)
2329 g_source_remove(mcl->csp->ind_timer);
2331 if (mcl->csp->set_timer)
2332 g_source_remove(mcl->csp->set_timer);
2334 if (mcl->csp->set_data)
2335 g_free(mcl->csp->set_data);
2337 if (mcl->csp->csp_priv_data)
2338 g_free(mcl->csp->csp_priv_data);
2340 mcl->csp->ind_timer = 0;
2341 mcl->csp->set_timer = 0;
2342 mcl->csp->set_data = NULL;
2343 mcl->csp->csp_priv_data = NULL;
2349 static uint64_t time_us(struct timespec *tv)
2351 return tv->tv_sec * 1000000ll + tv->tv_nsec / 1000ll;
2354 static int64_t bt2us(int bt)
2359 static int bt2ms(int bt)
2361 return bt * 312.5 / 1000;
2364 static int btoffset(uint32_t btclk1, uint32_t btclk2)
2366 int offset = btclk2 - btclk1;
2368 if (offset <= -MCAP_BTCLOCK_HALF)
2369 offset += MCAP_BTCLOCK_FIELD;
2370 else if (offset > MCAP_BTCLOCK_HALF)
2371 offset -= MCAP_BTCLOCK_FIELD;
2376 static int btdiff(uint32_t btclk1, uint32_t btclk2)
2378 return btoffset(btclk1, btclk2);
2381 static gboolean valid_btclock(uint32_t btclk)
2383 return btclk <= MCAP_BTCLOCK_MAX;
2386 /* This call may fail; either deal with retry or use read_btclock_retry */
2387 static gboolean read_btclock(struct mcap_mcl *mcl, uint32_t *btclock,
2388 uint16_t *btaccuracy)
2391 * FIXME: btd_adapter_read_clock(...) always return FALSE, current
2392 * code doesn't support CSP (Clock Synchronization Protocol). To avoid
2393 * build dependancy on struct 'btd_adapter', removing this code.
2399 static gboolean read_btclock_retry(struct mcap_mcl *mcl, uint32_t *btclock,
2400 uint16_t *btaccuracy)
2404 while (--retries >= 0) {
2405 if (read_btclock(mcl, btclock, btaccuracy))
2407 DBG("CSP: retrying to read bt clock...");
2413 static gboolean get_btrole(struct mcap_mcl *mcl)
2418 if (mcl->cc == NULL)
2421 sock = g_io_channel_unix_get_fd(mcl->cc);
2422 len = sizeof(flags);
2424 if (getsockopt(sock, SOL_L2CAP, L2CAP_LM, &flags, &len))
2425 DBG("CSP: could not read role");
2427 return flags & L2CAP_LM_MASTER;
2430 uint64_t mcap_get_timestamp(struct mcap_mcl *mcl,
2431 struct timespec *given_time)
2433 struct timespec now;
2437 return MCAP_TMSTAMP_DONTSET;
2442 if (clock_gettime(CLK, &now) < 0)
2443 return MCAP_TMSTAMP_DONTSET;
2445 tmstamp = time_us(&now) - time_us(&mcl->csp->base_time)
2446 + mcl->csp->base_tmstamp;
2451 uint32_t mcap_get_btclock(struct mcap_mcl *mcl)
2457 return MCAP_BTCLOCK_IMMEDIATE;
2459 if (!read_btclock_retry(mcl, &btclock, &accuracy))
2460 btclock = 0xffffffff;
2465 static gboolean initialize_caps(struct mcap_mcl *mcl)
2467 struct timespec t1, t2;
2468 int latencies[SAMPLE_COUNT];
2469 int latency, avg, dev;
2471 uint16_t btaccuracy;
2475 clock_getres(CLK, &t1);
2477 _caps.ts_res = time_us(&t1);
2478 if (_caps.ts_res < 1)
2481 _caps.ts_acc = 20; /* ppm, estimated */
2483 /* A little exercise before measuing latency */
2484 clock_gettime(CLK, &t1);
2485 read_btclock_retry(mcl, &btclock, &btaccuracy);
2487 /* Read clock a number of times and measure latency */
2490 retries = MAX_RETRIES;
2491 while (i < SAMPLE_COUNT && retries > 0) {
2492 clock_gettime(CLK, &t1);
2493 if (!read_btclock(mcl, &btclock, &btaccuracy)) {
2497 clock_gettime(CLK, &t2);
2499 latency = time_us(&t2) - time_us(&t1);
2500 latencies[i] = latency;
2508 /* Calculate average and deviation */
2509 avg /= SAMPLE_COUNT;
2511 for (i = 0; i < SAMPLE_COUNT; ++i)
2512 dev += abs(latencies[i] - avg);
2513 dev /= SAMPLE_COUNT;
2515 /* Calculate corrected average, without 'freak' latencies */
2517 for (i = 0; i < SAMPLE_COUNT; ++i) {
2518 if (latencies[i] > (avg + dev * 6))
2521 latency += latencies[i];
2523 latency /= SAMPLE_COUNT;
2525 _caps.latency = latency;
2526 _caps.preempt_thresh = latency * 4;
2527 _caps.syncleadtime_ms = latency * 50 / 1000;
2529 csp_caps_initialized = TRUE;
2533 static struct csp_caps *caps(struct mcap_mcl *mcl)
2535 if (!csp_caps_initialized)
2536 if (!initialize_caps(mcl)) {
2537 /* Temporary failure in reading BT clock */
2544 static int send_sync_cap_rsp(struct mcap_mcl *mcl, uint8_t rspcode,
2545 uint8_t btclockres, uint16_t synclead,
2546 uint16_t tmstampres, uint16_t tmstampacc)
2548 mcap_md_sync_cap_rsp *rsp;
2551 rsp = g_new0(mcap_md_sync_cap_rsp, 1);
2553 rsp->op = MCAP_MD_SYNC_CAP_RSP;
2556 rsp->btclock = btclockres;
2557 rsp->sltime = htons(synclead);
2558 rsp->timestnr = htons(tmstampres);
2559 rsp->timestna = htons(tmstampacc);
2561 sent = send_sync_cmd(mcl, rsp, sizeof(*rsp));
2567 static void proc_sync_cap_req(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t len)
2569 mcap_md_sync_cap_req *req;
2570 uint16_t required_accuracy;
2571 uint16_t our_accuracy;
2575 if (len != sizeof(mcap_md_sync_cap_req)) {
2576 send_sync_cap_rsp(mcl, MCAP_INVALID_PARAM_VALUE,
2582 send_sync_cap_rsp(mcl, MCAP_RESOURCE_UNAVAILABLE,
2587 req = (mcap_md_sync_cap_req *) cmd;
2588 required_accuracy = ntohs(req->timest);
2589 our_accuracy = caps(mcl)->ts_acc;
2592 if (required_accuracy < our_accuracy || required_accuracy < 1) {
2593 send_sync_cap_rsp(mcl, MCAP_RESOURCE_UNAVAILABLE,
2598 if (!read_btclock_retry(mcl, &btclock, &btres)) {
2599 send_sync_cap_rsp(mcl, MCAP_RESOURCE_UNAVAILABLE,
2604 mcl->csp->remote_caps = 1;
2605 mcl->csp->rem_req_acc = required_accuracy;
2607 send_sync_cap_rsp(mcl, MCAP_SUCCESS, btres,
2608 caps(mcl)->syncleadtime_ms,
2609 caps(mcl)->ts_res, our_accuracy);
2612 static int send_sync_set_rsp(struct mcap_mcl *mcl, uint8_t rspcode,
2613 uint32_t btclock, uint64_t timestamp,
2614 uint16_t tmstampres)
2616 mcap_md_sync_set_rsp *rsp;
2619 rsp = g_new0(mcap_md_sync_set_rsp, 1);
2621 rsp->op = MCAP_MD_SYNC_SET_RSP;
2623 rsp->btclock = htonl(btclock);
2624 rsp->timestst = hton64(timestamp);
2625 rsp->timestsa = htons(tmstampres);
2627 sent = send_sync_cmd(mcl, rsp, sizeof(*rsp));
2633 static gboolean get_all_clocks(struct mcap_mcl *mcl, uint32_t *btclock,
2634 struct timespec *base_time,
2635 uint64_t *timestamp)
2645 latency = caps(mcl)->preempt_thresh + 1;
2647 while (latency > caps(mcl)->preempt_thresh && --retry >= 0) {
2649 if (clock_gettime(CLK, &t0) < 0)
2652 if (!read_btclock(mcl, btclock, &btres))
2655 if (clock_gettime(CLK, base_time) < 0)
2659 * Tries to detect preemption between clock_gettime
2660 * and read_btclock by measuring transaction time
2662 latency = time_us(base_time) - time_us(&t0);
2668 *timestamp = mcap_get_timestamp(mcl, base_time);
2673 static gboolean sync_send_indication(gpointer user_data)
2675 struct mcap_mcl *mcl;
2676 mcap_md_sync_info_ind *cmd;
2679 struct timespec base_time;
2691 if (!get_all_clocks(mcl, &btclock, &base_time, &tmstamp))
2694 cmd = g_new0(mcap_md_sync_info_ind, 1);
2696 cmd->op = MCAP_MD_SYNC_INFO_IND;
2697 cmd->btclock = htonl(btclock);
2698 cmd->timestst = hton64(tmstamp);
2699 cmd->timestsa = htons(caps(mcl)->latency);
2701 sent = send_sync_cmd(mcl, cmd, sizeof(*cmd));
2707 static gboolean proc_sync_set_req_phase2(gpointer user_data)
2709 struct mcap_mcl *mcl;
2710 struct sync_set_data *data;
2712 uint32_t sched_btclock;
2713 uint64_t new_tmstamp;
2718 struct timespec base_time;
2719 uint16_t tmstampacc;
2728 if (!mcl->csp->set_data)
2732 data = mcl->csp->set_data;
2733 update = data->update;
2734 sched_btclock = data->sched_btclock;
2735 new_tmstamp = data->timestamp;
2736 ind_freq = data->ind_freq;
2740 send_sync_set_rsp(mcl, MCAP_UNSPECIFIED_ERROR, 0, 0, 0);
2744 if (!get_all_clocks(mcl, &btclock, &base_time, &tmstamp)) {
2745 send_sync_set_rsp(mcl, MCAP_UNSPECIFIED_ERROR, 0, 0, 0);
2749 if (get_btrole(mcl) != role) {
2750 send_sync_set_rsp(mcl, MCAP_INVALID_OPERATION, 0, 0, 0);
2754 reset = (new_tmstamp != MCAP_TMSTAMP_DONTSET);
2757 if (sched_btclock != MCAP_BTCLOCK_IMMEDIATE) {
2758 delay = bt2us(btdiff(sched_btclock, btclock));
2759 if (delay >= 0 || ((new_tmstamp - delay) > 0)) {
2760 new_tmstamp += delay;
2761 DBG("CSP: reset w/ delay %dus, compensated",
2764 DBG("CSP: reset w/ delay %dus, uncompensated",
2768 reset_tmstamp(mcl->csp, &base_time, new_tmstamp);
2769 tmstamp = new_tmstamp;
2772 tmstampacc = caps(mcl)->latency + caps(mcl)->ts_acc;
2774 if (mcl->csp->ind_timer) {
2775 g_source_remove(mcl->csp->ind_timer);
2776 mcl->csp->ind_timer = 0;
2780 int when = ind_freq + caps(mcl)->syncleadtime_ms;
2781 mcl->csp->ind_timer = g_timeout_add(when,
2782 sync_send_indication,
2786 send_sync_set_rsp(mcl, MCAP_SUCCESS, btclock, tmstamp, tmstampacc);
2788 /* First indication after set is immediate */
2790 sync_send_indication(mcl);
2795 static void proc_sync_set_req(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t len)
2797 mcap_md_sync_set_req *req;
2798 uint32_t sched_btclock, cur_btclock;
2802 struct sync_set_data *set_data;
2803 int phase2_delay, ind_freq, when;
2805 if (len != sizeof(mcap_md_sync_set_req)) {
2806 send_sync_set_rsp(mcl, MCAP_INVALID_PARAM_VALUE, 0, 0, 0);
2810 req = (mcap_md_sync_set_req *) cmd;
2811 sched_btclock = ntohl(req->btclock);
2812 update = req->timestui;
2813 timestamp = ntoh64(req->timestst);
2816 if (sched_btclock != MCAP_BTCLOCK_IMMEDIATE &&
2817 !valid_btclock(sched_btclock)) {
2818 send_sync_set_rsp(mcl, MCAP_INVALID_PARAM_VALUE, 0, 0, 0);
2823 send_sync_set_rsp(mcl, MCAP_INVALID_PARAM_VALUE, 0, 0, 0);
2827 if (!mcl->csp->remote_caps) {
2828 /* Remote side did not ask our capabilities yet */
2829 send_sync_set_rsp(mcl, MCAP_INVALID_PARAM_VALUE, 0, 0, 0);
2834 send_sync_set_rsp(mcl, MCAP_UNSPECIFIED_ERROR, 0, 0, 0);
2838 if (!read_btclock_retry(mcl, &cur_btclock, &btres)) {
2839 send_sync_set_rsp(mcl, MCAP_UNSPECIFIED_ERROR, 0, 0, 0);
2843 if (sched_btclock == MCAP_BTCLOCK_IMMEDIATE)
2846 phase2_delay = btdiff(cur_btclock, sched_btclock);
2848 if (phase2_delay < 0) {
2849 /* can not reset in the past tense */
2850 send_sync_set_rsp(mcl, MCAP_INVALID_PARAM_VALUE,
2855 /* Convert to miliseconds */
2856 phase2_delay = bt2ms(phase2_delay);
2858 if (phase2_delay > 61*1000) {
2859 /* More than 60 seconds in the future */
2860 send_sync_set_rsp(mcl, MCAP_INVALID_PARAM_VALUE,
2863 } else if (phase2_delay < caps(mcl)->latency / 1000) {
2864 /* Too fast for us to do in time */
2865 send_sync_set_rsp(mcl, MCAP_INVALID_PARAM_VALUE,
2873 * Indication frequency: required accuracy divided by ours
2874 * Converted to milisseconds
2876 ind_freq = (1000 * mcl->csp->rem_req_acc) / caps(mcl)->ts_acc;
2878 if (ind_freq < MAX(caps(mcl)->latency * 2 / 1000, 100)) {
2879 /* Too frequent, we can't handle */
2880 send_sync_set_rsp(mcl, MCAP_INVALID_PARAM_VALUE,
2885 DBG("CSP: indication every %dms", ind_freq);
2889 if (mcl->csp->ind_timer) {
2890 /* Old indications are no longer sent */
2891 g_source_remove(mcl->csp->ind_timer);
2892 mcl->csp->ind_timer = 0;
2895 if (!mcl->csp->set_data)
2896 mcl->csp->set_data = g_new0(struct sync_set_data, 1);
2898 set_data = (struct sync_set_data *) mcl->csp->set_data;
2900 set_data->update = update;
2901 set_data->sched_btclock = sched_btclock;
2902 set_data->timestamp = timestamp;
2903 set_data->ind_freq = ind_freq;
2904 set_data->role = get_btrole(mcl);
2907 * TODO is there some way to schedule a call based directly on
2908 * a BT clock value, instead of this estimation that uses
2912 if (phase2_delay > 0) {
2913 when = phase2_delay + caps(mcl)->syncleadtime_ms;
2914 mcl->csp->set_timer = g_timeout_add(when,
2915 proc_sync_set_req_phase2,
2918 proc_sync_set_req_phase2(mcl);
2920 /* First indication is immediate */
2922 sync_send_indication(mcl);
2925 static void proc_sync_cap_rsp(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t len)
2927 mcap_md_sync_cap_rsp *rsp;
2931 uint16_t tmstampres;
2932 uint16_t tmstampacc;
2933 struct mcap_sync_cap_cbdata *cbdata;
2934 mcap_sync_cap_cb cb;
2937 if (mcl->csp->csp_req != MCAP_MD_SYNC_CAP_REQ) {
2938 DBG("CSP: got unexpected cap respose");
2942 if (!mcl->csp->csp_priv_data) {
2943 DBG("CSP: no priv data for cap respose");
2947 cbdata = mcl->csp->csp_priv_data;
2949 user_data = cbdata->user_data;
2952 mcl->csp->csp_priv_data = NULL;
2953 mcl->csp->csp_req = 0;
2955 if (len != sizeof(mcap_md_sync_cap_rsp)) {
2956 DBG("CSP: got corrupted cap respose");
2960 rsp = (mcap_md_sync_cap_rsp *) cmd;
2962 btclockres = rsp->btclock;
2963 synclead = ntohs(rsp->sltime);
2964 tmstampres = ntohs(rsp->timestnr);
2965 tmstampacc = ntohs(rsp->timestna);
2968 mcl->csp->local_caps = TRUE;
2970 cb(mcl, mcap_err, btclockres, synclead, tmstampres, tmstampacc, NULL,
2974 static void proc_sync_set_rsp(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t len)
2976 mcap_md_sync_set_rsp *rsp;
2981 struct mcap_sync_set_cbdata *cbdata;
2982 mcap_sync_set_cb cb;
2985 if (mcl->csp->csp_req != MCAP_MD_SYNC_SET_REQ) {
2986 DBG("CSP: got unexpected set respose");
2990 if (!mcl->csp->csp_priv_data) {
2991 DBG("CSP: no priv data for set respose");
2995 cbdata = mcl->csp->csp_priv_data;
2997 user_data = cbdata->user_data;
3000 mcl->csp->csp_priv_data = NULL;
3001 mcl->csp->csp_req = 0;
3003 if (len != sizeof(mcap_md_sync_set_rsp)) {
3004 DBG("CSP: got corrupted set respose");
3008 rsp = (mcap_md_sync_set_rsp *) cmd;
3010 btclock = ntohl(rsp->btclock);
3011 timestamp = ntoh64(rsp->timestst);
3012 accuracy = ntohs(rsp->timestsa);
3014 if (!mcap_err && !valid_btclock(btclock))
3015 mcap_err = MCAP_ERROR_INVALID_ARGS;
3017 cb(mcl, mcap_err, btclock, timestamp, accuracy, NULL, user_data);
3020 static void proc_sync_info_ind(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t len)
3022 mcap_md_sync_info_ind *req;
3023 struct sync_info_ind_data data;
3026 if (!mcl->csp->ind_expected) {
3027 DBG("CSP: received unexpected info indication");
3031 if (len != sizeof(mcap_md_sync_info_ind))
3034 req = (mcap_md_sync_info_ind *) cmd;
3036 btclock = ntohl(req->btclock);
3038 if (!valid_btclock(btclock))
3041 data.btclock = btclock;
3042 data.timestamp = ntoh64(req->timestst);
3043 data.accuracy = ntohs(req->timestsa);
3045 if (mcl->mi->mcl_sync_infoind_cb)
3046 mcl->mi->mcl_sync_infoind_cb(mcl, &data);
3049 void proc_sync_cmd(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t len)
3051 if (!mcl->mi->csp_enabled || !mcl->csp) {
3053 case MCAP_MD_SYNC_CAP_REQ:
3054 send_unsupported_cap_req(mcl);
3056 case MCAP_MD_SYNC_SET_REQ:
3057 send_unsupported_set_req(mcl);
3064 case MCAP_MD_SYNC_CAP_REQ:
3065 proc_sync_cap_req(mcl, cmd, len);
3067 case MCAP_MD_SYNC_CAP_RSP:
3068 proc_sync_cap_rsp(mcl, cmd, len);
3070 case MCAP_MD_SYNC_SET_REQ:
3071 proc_sync_set_req(mcl, cmd, len);
3073 case MCAP_MD_SYNC_SET_RSP:
3074 proc_sync_set_rsp(mcl, cmd, len);
3076 case MCAP_MD_SYNC_INFO_IND:
3077 proc_sync_info_ind(mcl, cmd, len);
3082 void mcap_sync_cap_req(struct mcap_mcl *mcl, uint16_t reqacc,
3083 mcap_sync_cap_cb cb, gpointer user_data,
3086 struct mcap_sync_cap_cbdata *cbdata;
3087 mcap_md_sync_cap_req *cmd;
3089 if (!mcl->mi->csp_enabled || !mcl->csp) {
3092 MCAP_ERROR_RESOURCE_UNAVAILABLE,
3093 "CSP not enabled for the instance");
3097 if (mcl->csp->csp_req) {
3100 MCAP_ERROR_RESOURCE_UNAVAILABLE,
3101 "Pending CSP request");
3105 mcl->csp->csp_req = MCAP_MD_SYNC_CAP_REQ;
3106 cmd = g_new0(mcap_md_sync_cap_req, 1);
3108 cmd->op = MCAP_MD_SYNC_CAP_REQ;
3109 cmd->timest = htons(reqacc);
3111 cbdata = g_new0(struct mcap_sync_cap_cbdata, 1);
3113 cbdata->user_data = user_data;
3114 mcl->csp->csp_priv_data = cbdata;
3116 send_sync_cmd(mcl, cmd, sizeof(*cmd));
3121 void mcap_sync_set_req(struct mcap_mcl *mcl, uint8_t update, uint32_t btclock,
3122 uint64_t timestamp, mcap_sync_set_cb cb,
3123 gpointer user_data, GError **err)
3125 mcap_md_sync_set_req *cmd;
3126 struct mcap_sync_set_cbdata *cbdata;
3128 if (!mcl->mi->csp_enabled || !mcl->csp) {
3131 MCAP_ERROR_RESOURCE_UNAVAILABLE,
3132 "CSP not enabled for the instance");
3136 if (!mcl->csp->local_caps) {
3139 MCAP_ERROR_RESOURCE_UNAVAILABLE,
3140 "Did not get CSP caps from peripheral yet");
3144 if (mcl->csp->csp_req) {
3147 MCAP_ERROR_RESOURCE_UNAVAILABLE,
3148 "Pending CSP request");
3152 mcl->csp->csp_req = MCAP_MD_SYNC_SET_REQ;
3153 cmd = g_new0(mcap_md_sync_set_req, 1);
3155 cmd->op = MCAP_MD_SYNC_SET_REQ;
3156 cmd->timestui = update;
3157 cmd->btclock = htonl(btclock);
3158 cmd->timestst = hton64(timestamp);
3160 mcl->csp->ind_expected = update;
3162 cbdata = g_new0(struct mcap_sync_set_cbdata, 1);
3164 cbdata->user_data = user_data;
3165 mcl->csp->csp_priv_data = cbdata;
3167 send_sync_cmd(mcl, cmd, sizeof(*cmd));
3172 void mcap_enable_csp(struct mcap_instance *mi)
3174 mi->csp_enabled = TRUE;
3177 void mcap_disable_csp(struct mcap_instance *mi)
3179 mi->csp_enabled = FALSE;