4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * Implements upper edge functions for Bridge message module.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
18 #include <linux/types.h>
20 /* ----------------------------------- DSP/BIOS Bridge */
21 #include <dspbridge/dbdefs.h>
23 /* ----------------------------------- Trace & Debug */
24 #include <dspbridge/dbc.h>
26 /* ----------------------------------- OS Adaptation Layer */
27 #include <dspbridge/list.h>
28 #include <dspbridge/sync.h>
30 /* ----------------------------------- Platform Manager */
31 #include <dspbridge/dev.h>
33 /* ----------------------------------- Others */
34 #include <dspbridge/io_sm.h>
36 /* ----------------------------------- This */
38 #include <dspbridge/dspmsg.h>
40 /* ----------------------------------- Function Prototypes */
41 static int add_new_msg(struct lst_list *msg_list);
42 static void delete_msg_mgr(struct msg_mgr *hmsg_mgr);
43 static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp);
44 static void free_msg_list(struct lst_list *msg_list);
47 * ======== bridge_msg_create ========
48 * Create an object to manage message queues. Only one of these objects
49 * can exist per device object.
51 int bridge_msg_create(struct msg_mgr **msg_man,
52 struct dev_object *hdev_obj,
53 msg_onexit msg_callback)
55 struct msg_mgr *msg_mgr_obj;
56 struct io_mgr *hio_mgr;
59 if (!msg_man || !msg_callback || !hdev_obj) {
63 dev_get_io_mgr(hdev_obj, &hio_mgr);
69 /* Allocate msg_ctrl manager object */
70 msg_mgr_obj = kzalloc(sizeof(struct msg_mgr), GFP_KERNEL);
73 msg_mgr_obj->on_exit = msg_callback;
74 msg_mgr_obj->hio_mgr = hio_mgr;
75 /* List of MSG_QUEUEs */
76 msg_mgr_obj->queue_list = kzalloc(sizeof(struct lst_list),
78 /* Queues of message frames for messages to the DSP. Message
79 * frames will only be added to the free queue when a
80 * msg_queue object is created. */
81 msg_mgr_obj->msg_free_list = kzalloc(sizeof(struct lst_list),
83 msg_mgr_obj->msg_used_list = kzalloc(sizeof(struct lst_list),
85 if (msg_mgr_obj->queue_list == NULL ||
86 msg_mgr_obj->msg_free_list == NULL ||
87 msg_mgr_obj->msg_used_list == NULL) {
90 INIT_LIST_HEAD(&msg_mgr_obj->queue_list->head);
91 INIT_LIST_HEAD(&msg_mgr_obj->msg_free_list->head);
92 INIT_LIST_HEAD(&msg_mgr_obj->msg_used_list->head);
93 spin_lock_init(&msg_mgr_obj->msg_mgr_lock);
96 /* Create an event to be used by bridge_msg_put() in waiting
97 * for an available free frame from the message manager. */
98 msg_mgr_obj->sync_event =
99 kzalloc(sizeof(struct sync_object), GFP_KERNEL);
100 if (!msg_mgr_obj->sync_event)
103 sync_init_event(msg_mgr_obj->sync_event);
106 *msg_man = msg_mgr_obj;
108 delete_msg_mgr(msg_mgr_obj);
118 * ======== bridge_msg_create_queue ========
119 * Create a msg_queue for sending/receiving messages to/from a node
122 int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
123 struct msg_queue **msgq,
124 u32 msgq_id, u32 max_msgs, void *arg)
127 u32 num_allocated = 0;
128 struct msg_queue *msg_q;
131 if (!hmsg_mgr || msgq == NULL || !hmsg_mgr->msg_free_list) {
137 /* Allocate msg_queue object */
138 msg_q = kzalloc(sizeof(struct msg_queue), GFP_KERNEL);
143 lst_init_elem((struct list_head *)msg_q);
144 msg_q->max_msgs = max_msgs;
145 msg_q->hmsg_mgr = hmsg_mgr;
146 msg_q->arg = arg; /* Node handle */
147 msg_q->msgq_id = msgq_id; /* Node env (not valid yet) */
148 /* Queues of Message frames for messages from the DSP */
149 msg_q->msg_free_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
150 msg_q->msg_used_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
151 if (msg_q->msg_free_list == NULL || msg_q->msg_used_list == NULL)
154 INIT_LIST_HEAD(&msg_q->msg_free_list->head);
155 INIT_LIST_HEAD(&msg_q->msg_used_list->head);
158 /* Create event that will be signalled when a message from
159 * the DSP is available. */
161 msg_q->sync_event = kzalloc(sizeof(struct sync_object),
163 if (msg_q->sync_event)
164 sync_init_event(msg_q->sync_event);
169 /* Create a notification list for message ready notification. */
171 msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
174 ntfy_init(msg_q->ntfy_obj);
179 /* Create events that will be used to synchronize cleanup
180 * when the object is deleted. sync_done will be set to
181 * unblock threads in MSG_Put() or MSG_Get(). sync_done_ack
182 * will be set by the unblocked thread to signal that it
183 * is unblocked and will no longer reference the object. */
185 msg_q->sync_done = kzalloc(sizeof(struct sync_object),
187 if (msg_q->sync_done)
188 sync_init_event(msg_q->sync_done);
194 msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object),
196 if (msg_q->sync_done_ack)
197 sync_init_event(msg_q->sync_done_ack);
203 /* Enter critical section */
204 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
205 /* Initialize message frames and put in appropriate queues */
206 for (i = 0; i < max_msgs && !status; i++) {
207 status = add_new_msg(hmsg_mgr->msg_free_list);
210 status = add_new_msg(msg_q->msg_free_list);
214 /* Stay inside CS to prevent others from taking any
215 * of the newly allocated message frames. */
216 delete_msg_queue(msg_q, num_allocated);
218 lst_put_tail(hmsg_mgr->queue_list,
219 (struct list_head *)msg_q);
221 /* Signal that free frames are now available */
222 if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list))
223 sync_set_event(hmsg_mgr->sync_event);
226 /* Exit critical section */
227 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
229 delete_msg_queue(msg_q, 0);
236 * ======== bridge_msg_delete ========
237 * Delete a msg_ctrl manager allocated in bridge_msg_create().
239 void bridge_msg_delete(struct msg_mgr *hmsg_mgr)
242 delete_msg_mgr(hmsg_mgr);
246 * ======== bridge_msg_delete_queue ========
247 * Delete a msg_ctrl queue allocated in bridge_msg_create_queue.
249 void bridge_msg_delete_queue(struct msg_queue *msg_queue_obj)
251 struct msg_mgr *hmsg_mgr;
254 if (!msg_queue_obj || !msg_queue_obj->hmsg_mgr)
257 hmsg_mgr = msg_queue_obj->hmsg_mgr;
258 msg_queue_obj->done = true;
259 /* Unblock all threads blocked in MSG_Get() or MSG_Put(). */
260 io_msg_pend = msg_queue_obj->io_msg_pend;
261 while (io_msg_pend) {
263 sync_set_event(msg_queue_obj->sync_done);
264 /* Wait for acknowledgement */
265 sync_wait_on_event(msg_queue_obj->sync_done_ack, SYNC_INFINITE);
266 io_msg_pend = msg_queue_obj->io_msg_pend;
268 /* Remove message queue from hmsg_mgr->queue_list */
269 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
270 lst_remove_elem(hmsg_mgr->queue_list,
271 (struct list_head *)msg_queue_obj);
272 /* Free the message queue object */
273 delete_msg_queue(msg_queue_obj, msg_queue_obj->max_msgs);
274 if (!hmsg_mgr->msg_free_list)
276 if (LST_IS_EMPTY(hmsg_mgr->msg_free_list))
277 sync_reset_event(hmsg_mgr->sync_event);
279 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
285 * ======== bridge_msg_get ========
286 * Get a message from a msg_ctrl queue.
288 int bridge_msg_get(struct msg_queue *msg_queue_obj,
289 struct dsp_msg *pmsg, u32 utimeout)
291 struct msg_frame *msg_frame_obj;
292 struct msg_mgr *hmsg_mgr;
293 bool got_msg = false;
294 struct sync_object *syncs[2];
298 if (!msg_queue_obj || pmsg == NULL) {
303 hmsg_mgr = msg_queue_obj->hmsg_mgr;
304 if (!msg_queue_obj->msg_used_list) {
309 /* Enter critical section */
310 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
311 /* If a message is already there, get it */
312 if (!LST_IS_EMPTY(msg_queue_obj->msg_used_list)) {
313 msg_frame_obj = (struct msg_frame *)
314 lst_get_head(msg_queue_obj->msg_used_list);
315 if (msg_frame_obj != NULL) {
316 *pmsg = msg_frame_obj->msg_data.msg;
317 lst_put_tail(msg_queue_obj->msg_free_list,
318 (struct list_head *)msg_frame_obj);
319 if (LST_IS_EMPTY(msg_queue_obj->msg_used_list))
320 sync_reset_event(msg_queue_obj->sync_event);
325 if (msg_queue_obj->done)
328 msg_queue_obj->io_msg_pend++;
331 /* Exit critical section */
332 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
333 if (!status && !got_msg) {
334 /* Wait til message is available, timeout, or done. We don't
335 * have to schedule the DPC, since the DSP will send messages
336 * when they are available. */
337 syncs[0] = msg_queue_obj->sync_event;
338 syncs[1] = msg_queue_obj->sync_done;
339 status = sync_wait_on_multiple_events(syncs, 2, utimeout,
341 /* Enter critical section */
342 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
343 if (msg_queue_obj->done) {
344 msg_queue_obj->io_msg_pend--;
345 /* Exit critical section */
346 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
347 /* Signal that we're not going to access msg_queue_obj
348 * anymore, so it can be deleted. */
349 (void)sync_set_event(msg_queue_obj->sync_done_ack);
353 DBC_ASSERT(!LST_IS_EMPTY
354 (msg_queue_obj->msg_used_list));
355 /* Get msg from used list */
356 msg_frame_obj = (struct msg_frame *)
357 lst_get_head(msg_queue_obj->msg_used_list);
358 /* Copy message into pmsg and put frame on the
360 if (msg_frame_obj != NULL) {
361 *pmsg = msg_frame_obj->msg_data.msg;
363 (msg_queue_obj->msg_free_list,
368 msg_queue_obj->io_msg_pend--;
369 /* Reset the event if there are still queued messages */
370 if (!LST_IS_EMPTY(msg_queue_obj->msg_used_list))
371 sync_set_event(msg_queue_obj->sync_event);
373 /* Exit critical section */
374 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
382 * ======== bridge_msg_put ========
383 * Put a message onto a msg_ctrl queue.
385 int bridge_msg_put(struct msg_queue *msg_queue_obj,
386 const struct dsp_msg *pmsg, u32 utimeout)
388 struct msg_frame *msg_frame_obj;
389 struct msg_mgr *hmsg_mgr;
390 bool put_msg = false;
391 struct sync_object *syncs[2];
395 if (!msg_queue_obj || !pmsg || !msg_queue_obj->hmsg_mgr) {
399 hmsg_mgr = msg_queue_obj->hmsg_mgr;
400 if (!hmsg_mgr->msg_free_list) {
405 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
407 /* If a message frame is available, use it */
408 if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
410 (struct msg_frame *)lst_get_head(hmsg_mgr->msg_free_list);
411 if (msg_frame_obj != NULL) {
412 msg_frame_obj->msg_data.msg = *pmsg;
413 msg_frame_obj->msg_data.msgq_id =
414 msg_queue_obj->msgq_id;
415 lst_put_tail(hmsg_mgr->msg_used_list,
416 (struct list_head *)msg_frame_obj);
417 hmsg_mgr->msgs_pending++;
420 if (LST_IS_EMPTY(hmsg_mgr->msg_free_list))
421 sync_reset_event(hmsg_mgr->sync_event);
423 /* Release critical section before scheduling DPC */
424 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
425 /* Schedule a DPC, to do the actual data transfer: */
426 iosm_schedule(hmsg_mgr->hio_mgr);
428 if (msg_queue_obj->done)
431 msg_queue_obj->io_msg_pend++;
433 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
435 if (!status && !put_msg) {
436 /* Wait til a free message frame is available, timeout,
438 syncs[0] = hmsg_mgr->sync_event;
439 syncs[1] = msg_queue_obj->sync_done;
440 status = sync_wait_on_multiple_events(syncs, 2, utimeout,
444 /* Enter critical section */
445 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
446 if (msg_queue_obj->done) {
447 msg_queue_obj->io_msg_pend--;
448 /* Exit critical section */
449 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
450 /* Signal that we're not going to access msg_queue_obj
451 * anymore, so it can be deleted. */
452 (void)sync_set_event(msg_queue_obj->sync_done_ack);
455 if (LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
459 /* Get msg from free list */
460 msg_frame_obj = (struct msg_frame *)
461 lst_get_head(hmsg_mgr->msg_free_list);
463 * Copy message into pmsg and put frame on the
467 msg_frame_obj->msg_data.msg = *pmsg;
468 msg_frame_obj->msg_data.msgq_id =
469 msg_queue_obj->msgq_id;
470 lst_put_tail(hmsg_mgr->msg_used_list,
471 (struct list_head *)msg_frame_obj);
472 hmsg_mgr->msgs_pending++;
474 * Schedule a DPC, to do the actual
477 iosm_schedule(hmsg_mgr->hio_mgr);
480 msg_queue_obj->io_msg_pend--;
481 /* Reset event if there are still frames available */
482 if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list))
483 sync_set_event(hmsg_mgr->sync_event);
485 /* Exit critical section */
486 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
494 * ======== bridge_msg_register_notify ========
496 int bridge_msg_register_notify(struct msg_queue *msg_queue_obj,
497 u32 event_mask, u32 notify_type,
498 struct dsp_notification *hnotification)
502 if (!msg_queue_obj || !hnotification) {
507 if (!(event_mask == DSP_NODEMESSAGEREADY || event_mask == 0)) {
512 if (notify_type != DSP_SIGNALEVENT) {
518 status = ntfy_register(msg_queue_obj->ntfy_obj, hnotification,
519 event_mask, notify_type);
521 status = ntfy_unregister(msg_queue_obj->ntfy_obj,
524 if (status == -EINVAL) {
525 /* Not registered. Ok, since we couldn't have known. Node
526 * notifications are split between node state change handled
527 * by NODE, and message ready handled by msg_ctrl. */
535 * ======== bridge_msg_set_queue_id ========
537 void bridge_msg_set_queue_id(struct msg_queue *msg_queue_obj, u32 msgq_id)
540 * A message queue must be created when a node is allocated,
541 * so that node_register_notify() can be called before the node
542 * is created. Since we don't know the node environment until the
543 * node is created, we need this function to set msg_queue_obj->msgq_id
544 * to the node environment, after the node is created.
547 msg_queue_obj->msgq_id = msgq_id;
551 * ======== add_new_msg ========
552 * Must be called in message manager critical section.
554 static int add_new_msg(struct lst_list *msg_list)
556 struct msg_frame *pmsg;
559 pmsg = kzalloc(sizeof(struct msg_frame), GFP_ATOMIC);
561 lst_init_elem((struct list_head *)pmsg);
562 lst_put_tail(msg_list, (struct list_head *)pmsg);
571 * ======== delete_msg_mgr ========
573 static void delete_msg_mgr(struct msg_mgr *hmsg_mgr)
578 if (hmsg_mgr->queue_list) {
579 if (LST_IS_EMPTY(hmsg_mgr->queue_list)) {
580 kfree(hmsg_mgr->queue_list);
581 hmsg_mgr->queue_list = NULL;
585 if (hmsg_mgr->msg_free_list) {
586 free_msg_list(hmsg_mgr->msg_free_list);
587 hmsg_mgr->msg_free_list = NULL;
590 if (hmsg_mgr->msg_used_list) {
591 free_msg_list(hmsg_mgr->msg_used_list);
592 hmsg_mgr->msg_used_list = NULL;
595 kfree(hmsg_mgr->sync_event);
603 * ======== delete_msg_queue ========
605 static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp)
607 struct msg_mgr *hmsg_mgr;
608 struct msg_frame *pmsg;
611 if (!msg_queue_obj ||
612 !msg_queue_obj->hmsg_mgr || !msg_queue_obj->hmsg_mgr->msg_free_list)
615 hmsg_mgr = msg_queue_obj->hmsg_mgr;
617 /* Pull off num_to_dsp message frames from Msg manager and free */
618 for (i = 0; i < num_to_dsp; i++) {
620 if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
621 pmsg = (struct msg_frame *)
622 lst_get_head(hmsg_mgr->msg_free_list);
625 /* Cannot free all of the message frames */
630 if (msg_queue_obj->msg_free_list) {
631 free_msg_list(msg_queue_obj->msg_free_list);
632 msg_queue_obj->msg_free_list = NULL;
635 if (msg_queue_obj->msg_used_list) {
636 free_msg_list(msg_queue_obj->msg_used_list);
637 msg_queue_obj->msg_used_list = NULL;
640 if (msg_queue_obj->ntfy_obj) {
641 ntfy_delete(msg_queue_obj->ntfy_obj);
642 kfree(msg_queue_obj->ntfy_obj);
645 kfree(msg_queue_obj->sync_event);
646 kfree(msg_queue_obj->sync_done);
647 kfree(msg_queue_obj->sync_done_ack);
649 kfree(msg_queue_obj);
656 * ======== free_msg_list ========
658 static void free_msg_list(struct lst_list *msg_list)
660 struct msg_frame *pmsg;
665 while ((pmsg = (struct msg_frame *)lst_get_head(msg_list)) != NULL)
668 DBC_ASSERT(LST_IS_EMPTY(msg_list));