4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * The Communication(Shared) Memory Management(CMM) module provides
7 * shared memory management services for DSP/BIOS Bridge data streaming
10 * Multiple shared memory segments can be registered with CMM.
11 * Each registered SM segment is represented by a SM "allocator" that
12 * describes a block of physically contiguous shared memory used for
13 * future allocations by CMM.
15 * Memory is coelesced back to the appropriate heap when a buffer is
19 * Va: Virtual address.
20 * Pa: Physical or kernel system address.
22 * Copyright (C) 2005-2006 Texas Instruments, Inc.
24 * This package is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License version 2 as
26 * published by the Free Software Foundation.
28 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
29 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
30 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
32 #include <linux/types.h>
34 /* ----------------------------------- DSP/BIOS Bridge */
35 #include <dspbridge/dbdefs.h>
37 /* ----------------------------------- Trace & Debug */
38 #include <dspbridge/dbc.h>
40 /* ----------------------------------- OS Adaptation Layer */
41 #include <dspbridge/cfg.h>
42 #include <dspbridge/list.h>
43 #include <dspbridge/sync.h>
44 #include <dspbridge/utildefs.h>
46 /* ----------------------------------- Platform Manager */
47 #include <dspbridge/dev.h>
48 #include <dspbridge/proc.h>
50 /* ----------------------------------- This */
51 #include <dspbridge/cmm.h>
53 /* ----------------------------------- Defines, Data Structures, Typedefs */
54 #define NEXT_PA(pnode) (pnode->dw_pa + pnode->ul_size)
56 /* Other bus/platform translations */
57 #define DSPPA2GPPPA(base, x, y) ((x)+(y))
58 #define GPPPA2DSPPA(base, x, y) ((x)-(y))
61 * Allocators define a block of contiguous memory used for future allocations.
63 * sma - shared memory allocator.
64 * vma - virtual memory allocator.(not used).
66 struct cmm_allocator { /* sma */
67 unsigned int shm_base; /* Start of physical SM block */
68 u32 ul_sm_size; /* Size of SM block in bytes */
69 unsigned int dw_vm_base; /* Start of VM block. (Dev driver
70 * context for 'sma') */
71 u32 dw_dsp_phys_addr_offset; /* DSP PA to GPP PA offset for this
73 s8 c_factor; /* DSPPa to GPPPa Conversion Factor */
74 unsigned int dw_dsp_base; /* DSP virt base byte address */
75 u32 ul_dsp_size; /* DSP seg size in bytes */
76 struct cmm_object *hcmm_mgr; /* back ref to parent mgr */
77 /* node list of available memory */
78 struct lst_list *free_list_head;
79 /* node list of memory in use */
80 struct lst_list *in_use_list_head;
83 struct cmm_xlator { /* Pa<->Va translator object */
84 /* CMM object this translator associated */
85 struct cmm_object *hcmm_mgr;
87 * Client process virtual base address that corresponds to phys SM
88 * base address for translator's ul_seg_id.
89 * Only 1 segment ID currently supported.
91 unsigned int dw_virt_base; /* virtual base address */
92 u32 ul_virt_size; /* size of virt space in bytes */
93 u32 ul_seg_id; /* Segment Id */
99 * Cmm Lock is used to serialize access mem manager for multi-threads.
101 struct mutex cmm_lock; /* Lock to access cmm mgr */
102 struct lst_list *node_free_list_head; /* Free list of memory nodes */
103 u32 ul_min_block_size; /* Min SM block; default 16 bytes */
104 u32 dw_page_size; /* Memory Page size (1k/4k) */
105 /* GPP SM segment ptrs */
106 struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS];
109 /* Default CMM Mgr attributes */
110 static struct cmm_mgrattrs cmm_dfltmgrattrs = {
111 /* ul_min_block_size, min block size(bytes) allocated by cmm mgr */
115 /* Default allocation attributes */
116 static struct cmm_attrs cmm_dfltalctattrs = {
117 1 /* ul_seg_id, default segment Id for allocator */
120 /* Address translator default attrs */
121 static struct cmm_xlatorattrs cmm_dfltxlatorattrs = {
122 /* ul_seg_id, does not have to match cmm_dfltalctattrs ul_seg_id */
125 0, /* dw_dsp_buf_size */
130 /* SM node representing a block of memory. */
132 struct list_head link; /* must be 1st element */
133 u32 dw_pa; /* Phys addr */
134 u32 dw_va; /* Virtual address in device process context */
135 u32 ul_size; /* SM block size in bytes */
136 u32 client_proc; /* Process that allocated this mem block */
139 /* ----------------------------------- Globals */
140 static u32 refs; /* module reference count */
142 /* ----------------------------------- Function Prototypes */
143 static void add_to_free_list(struct cmm_allocator *allocator,
144 struct cmm_mnode *pnode);
145 static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
147 static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
149 static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
150 u32 dw_va, u32 ul_size);
151 /* get available slot for new allocator */
152 static s32 get_slot(struct cmm_object *cmm_mgr_obj);
153 static void un_register_gppsm_seg(struct cmm_allocator *psma);
156 * ======== cmm_calloc_buf ========
158 * Allocate a SM buffer, zero contents, and return the physical address
159 * and optional driver context virtual address(pp_buf_va).
161 * The freelist is sorted in increasing size order. Get the first
162 * block that satifies the request and sort the remaining back on
163 * the freelist; if large enough. The kept block is placed on the
166 void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize,
167 struct cmm_attrs *pattrs, void **pp_buf_va)
169 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
171 struct cmm_mnode *pnode = NULL;
172 struct cmm_mnode *new_node = NULL;
173 struct cmm_allocator *allocator = NULL;
179 pattrs = &cmm_dfltalctattrs;
181 if (pp_buf_va != NULL)
184 if (cmm_mgr_obj && (usize != 0)) {
185 if (pattrs->ul_seg_id > 0) {
186 /* SegId > 0 is SM */
187 /* get the allocator object for this segment id */
189 get_allocator(cmm_mgr_obj, pattrs->ul_seg_id);
190 /* keep block size a multiple of ul_min_block_size */
192 ((usize - 1) & ~(cmm_mgr_obj->ul_min_block_size -
194 + cmm_mgr_obj->ul_min_block_size;
195 mutex_lock(&cmm_mgr_obj->cmm_lock);
196 pnode = get_free_block(allocator, usize);
199 delta_size = (pnode->ul_size - usize);
200 if (delta_size >= cmm_mgr_obj->ul_min_block_size) {
201 /* create a new block with the leftovers and
204 get_node(cmm_mgr_obj, pnode->dw_pa + usize,
205 pnode->dw_va + usize,
207 /* leftovers go free */
208 add_to_free_list(allocator, new_node);
209 /* adjust our node's size */
210 pnode->ul_size = usize;
212 /* Tag node with client process requesting allocation
213 * We'll need to free up a process's alloc'd SM if the
214 * client process goes away.
216 /* Return TGID instead of process handle */
217 pnode->client_proc = current->tgid;
219 /* put our node on InUse list */
220 lst_put_tail(allocator->in_use_list_head,
221 (struct list_head *)pnode);
222 buf_pa = (void *)pnode->dw_pa; /* physical address */
224 pbyte = (u8 *) pnode->dw_va;
225 for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++)
228 if (pp_buf_va != NULL) {
229 /* Virtual address */
230 *pp_buf_va = (void *)pnode->dw_va;
233 mutex_unlock(&cmm_mgr_obj->cmm_lock);
239 * ======== cmm_create ========
241 * Create a communication memory manager object.
243 int cmm_create(struct cmm_object **ph_cmm_mgr,
244 struct dev_object *hdev_obj,
245 const struct cmm_mgrattrs *mgr_attrts)
247 struct cmm_object *cmm_obj = NULL;
249 struct util_sysinfo sys_info;
251 DBC_REQUIRE(refs > 0);
252 DBC_REQUIRE(ph_cmm_mgr != NULL);
255 /* create, zero, and tag a cmm mgr object */
256 cmm_obj = kzalloc(sizeof(struct cmm_object), GFP_KERNEL);
257 if (cmm_obj != NULL) {
258 if (mgr_attrts == NULL)
259 mgr_attrts = &cmm_dfltmgrattrs; /* set defaults */
261 /* 4 bytes minimum */
262 DBC_ASSERT(mgr_attrts->ul_min_block_size >= 4);
263 /* save away smallest block allocation for this cmm mgr */
264 cmm_obj->ul_min_block_size = mgr_attrts->ul_min_block_size;
265 /* save away the systems memory page size */
266 sys_info.dw_page_size = PAGE_SIZE;
267 sys_info.dw_allocation_granularity = PAGE_SIZE;
268 sys_info.dw_number_of_processors = 1;
270 cmm_obj->dw_page_size = sys_info.dw_page_size;
272 /* Note: DSP SM seg table(aDSPSMSegTab[]) zero'd by
273 * MEM_ALLOC_OBJECT */
275 /* create node free list */
276 cmm_obj->node_free_list_head =
277 kzalloc(sizeof(struct lst_list),
279 if (cmm_obj->node_free_list_head == NULL) {
281 cmm_destroy(cmm_obj, true);
283 INIT_LIST_HEAD(&cmm_obj->
284 node_free_list_head->head);
285 mutex_init(&cmm_obj->cmm_lock);
286 *ph_cmm_mgr = cmm_obj;
295 * ======== cmm_destroy ========
297 * Release the communication memory manager resources.
299 int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
301 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
302 struct cmm_info temp_info;
305 struct cmm_mnode *pnode;
307 DBC_REQUIRE(refs > 0);
312 mutex_lock(&cmm_mgr_obj->cmm_lock);
313 /* If not force then fail if outstanding allocations exist */
315 /* Check for outstanding memory allocations */
316 status = cmm_get_info(hcmm_mgr, &temp_info);
318 if (temp_info.ul_total_in_use_cnt > 0) {
319 /* outstanding allocations */
325 /* UnRegister SM allocator */
326 for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
327 if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] != NULL) {
328 un_register_gppsm_seg
329 (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg]);
330 /* Set slot to NULL for future reuse */
331 cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = NULL;
335 if (cmm_mgr_obj->node_free_list_head != NULL) {
336 /* Free the free nodes */
337 while (!LST_IS_EMPTY(cmm_mgr_obj->node_free_list_head)) {
338 pnode = (struct cmm_mnode *)
339 lst_get_head(cmm_mgr_obj->node_free_list_head);
342 /* delete NodeFreeList list */
343 kfree(cmm_mgr_obj->node_free_list_head);
345 mutex_unlock(&cmm_mgr_obj->cmm_lock);
347 /* delete CS & cmm mgr object */
348 mutex_destroy(&cmm_mgr_obj->cmm_lock);
355 * ======== cmm_exit ========
357 * Discontinue usage of module; free resources when reference count
362 DBC_REQUIRE(refs > 0);
368 * ======== cmm_free_buf ========
370 * Free the given buffer.
372 int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa,
375 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
376 int status = -EFAULT;
377 struct cmm_mnode *mnode_obj = NULL;
378 struct cmm_allocator *allocator = NULL;
379 struct cmm_attrs *pattrs;
381 DBC_REQUIRE(refs > 0);
382 DBC_REQUIRE(buf_pa != NULL);
384 if (ul_seg_id == 0) {
385 pattrs = &cmm_dfltalctattrs;
386 ul_seg_id = pattrs->ul_seg_id;
388 if (!hcmm_mgr || !(ul_seg_id > 0)) {
392 /* get the allocator for this segment id */
393 allocator = get_allocator(cmm_mgr_obj, ul_seg_id);
394 if (allocator != NULL) {
395 mutex_lock(&cmm_mgr_obj->cmm_lock);
397 (struct cmm_mnode *)lst_first(allocator->in_use_list_head);
399 if ((u32) buf_pa == mnode_obj->dw_pa) {
401 lst_remove_elem(allocator->in_use_list_head,
402 (struct list_head *)mnode_obj);
403 /* back to freelist */
404 add_to_free_list(allocator, mnode_obj);
405 status = 0; /* all right! */
409 mnode_obj = (struct cmm_mnode *)
410 lst_next(allocator->in_use_list_head,
411 (struct list_head *)mnode_obj);
413 mutex_unlock(&cmm_mgr_obj->cmm_lock);
419 * ======== cmm_get_handle ========
421 * Return the communication memory manager object for this device.
422 * This is typically called from the client process.
424 int cmm_get_handle(void *hprocessor, struct cmm_object ** ph_cmm_mgr)
427 struct dev_object *hdev_obj;
429 DBC_REQUIRE(refs > 0);
430 DBC_REQUIRE(ph_cmm_mgr != NULL);
431 if (hprocessor != NULL)
432 status = proc_get_dev_object(hprocessor, &hdev_obj);
434 hdev_obj = dev_get_first(); /* default */
437 status = dev_get_cmm_mgr(hdev_obj, ph_cmm_mgr);
443 * ======== cmm_get_info ========
445 * Return the current memory utilization information.
447 int cmm_get_info(struct cmm_object *hcmm_mgr,
448 struct cmm_info *cmm_info_obj)
450 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
453 struct cmm_allocator *altr;
454 struct cmm_mnode *mnode_obj = NULL;
456 DBC_REQUIRE(cmm_info_obj != NULL);
462 mutex_lock(&cmm_mgr_obj->cmm_lock);
463 cmm_info_obj->ul_num_gppsm_segs = 0; /* # of SM segments */
464 /* Total # of outstanding alloc */
465 cmm_info_obj->ul_total_in_use_cnt = 0;
467 cmm_info_obj->ul_min_block_size = cmm_mgr_obj->ul_min_block_size;
468 /* check SM memory segments */
469 for (ul_seg = 1; ul_seg <= CMM_MAXGPPSEGS; ul_seg++) {
470 /* get the allocator object for this segment id */
471 altr = get_allocator(cmm_mgr_obj, ul_seg);
473 cmm_info_obj->ul_num_gppsm_segs++;
474 cmm_info_obj->seg_info[ul_seg - 1].dw_seg_base_pa =
475 altr->shm_base - altr->ul_dsp_size;
476 cmm_info_obj->seg_info[ul_seg - 1].ul_total_seg_size =
477 altr->ul_dsp_size + altr->ul_sm_size;
478 cmm_info_obj->seg_info[ul_seg - 1].dw_gpp_base_pa =
480 cmm_info_obj->seg_info[ul_seg - 1].ul_gpp_size =
482 cmm_info_obj->seg_info[ul_seg - 1].dw_dsp_base_va =
484 cmm_info_obj->seg_info[ul_seg - 1].ul_dsp_size =
486 cmm_info_obj->seg_info[ul_seg - 1].dw_seg_base_va =
487 altr->dw_vm_base - altr->ul_dsp_size;
488 cmm_info_obj->seg_info[ul_seg - 1].ul_in_use_cnt = 0;
489 mnode_obj = (struct cmm_mnode *)
490 lst_first(altr->in_use_list_head);
491 /* Count inUse blocks */
493 cmm_info_obj->ul_total_in_use_cnt++;
494 cmm_info_obj->seg_info[ul_seg -
497 mnode_obj = (struct cmm_mnode *)
498 lst_next(altr->in_use_list_head,
499 (struct list_head *)mnode_obj);
503 mutex_unlock(&cmm_mgr_obj->cmm_lock);
508 * ======== cmm_init ========
510 * Initializes private state of CMM module.
516 DBC_REQUIRE(refs >= 0);
520 DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
526 * ======== cmm_register_gppsm_seg ========
528 * Register a block of SM with the CMM to be used for later GPP SM
531 int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
532 u32 dw_gpp_base_pa, u32 ul_size,
533 u32 dsp_addr_offset, s8 c_factor,
534 u32 dw_dsp_base, u32 ul_dsp_size,
535 u32 *sgmt_id, u32 gpp_base_va)
537 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
538 struct cmm_allocator *psma = NULL;
540 struct cmm_mnode *new_node;
543 DBC_REQUIRE(ul_size > 0);
544 DBC_REQUIRE(sgmt_id != NULL);
545 DBC_REQUIRE(dw_gpp_base_pa != 0);
546 DBC_REQUIRE(gpp_base_va != 0);
547 DBC_REQUIRE((c_factor <= CMM_ADDTODSPPA) &&
548 (c_factor >= CMM_SUBFROMDSPPA));
549 dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x "
550 "dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n", __func__,
551 dw_gpp_base_pa, ul_size, dsp_addr_offset, dw_dsp_base,
552 ul_dsp_size, gpp_base_va);
557 /* make sure we have room for another allocator */
558 mutex_lock(&cmm_mgr_obj->cmm_lock);
559 slot_seg = get_slot(cmm_mgr_obj);
561 /* get a slot number */
565 /* Check if input ul_size is big enough to alloc at least one block */
566 if (ul_size < cmm_mgr_obj->ul_min_block_size) {
571 /* create, zero, and tag an SM allocator object */
572 psma = kzalloc(sizeof(struct cmm_allocator), GFP_KERNEL);
574 psma->hcmm_mgr = hcmm_mgr; /* ref to parent */
575 psma->shm_base = dw_gpp_base_pa; /* SM Base phys */
576 psma->ul_sm_size = ul_size; /* SM segment size in bytes */
577 psma->dw_vm_base = gpp_base_va;
578 psma->dw_dsp_phys_addr_offset = dsp_addr_offset;
579 psma->c_factor = c_factor;
580 psma->dw_dsp_base = dw_dsp_base;
581 psma->ul_dsp_size = ul_dsp_size;
582 if (psma->dw_vm_base == 0) {
586 /* return the actual segment identifier */
587 *sgmt_id = (u32) slot_seg + 1;
588 /* create memory free list */
589 psma->free_list_head = kzalloc(sizeof(struct lst_list),
591 if (psma->free_list_head == NULL) {
595 INIT_LIST_HEAD(&psma->free_list_head->head);
597 /* create memory in-use list */
598 psma->in_use_list_head = kzalloc(sizeof(struct
599 lst_list), GFP_KERNEL);
600 if (psma->in_use_list_head == NULL) {
604 INIT_LIST_HEAD(&psma->in_use_list_head->head);
606 /* Get a mem node for this hunk-o-memory */
607 new_node = get_node(cmm_mgr_obj, dw_gpp_base_pa,
608 psma->dw_vm_base, ul_size);
609 /* Place node on the SM allocator's free list */
611 lst_put_tail(psma->free_list_head,
612 (struct list_head *)new_node);
622 cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = psma;
625 if (status && psma) {
626 /* Cleanup allocator */
627 un_register_gppsm_seg(psma);
630 mutex_unlock(&cmm_mgr_obj->cmm_lock);
635 * ======== cmm_un_register_gppsm_seg ========
637 * UnRegister GPP SM segments with the CMM.
639 int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr,
642 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
644 struct cmm_allocator *psma;
645 u32 ul_id = ul_seg_id;
647 DBC_REQUIRE(ul_seg_id > 0);
649 if (ul_seg_id == CMM_ALLSEGMENTS)
652 if ((ul_id > 0) && (ul_id <= CMM_MAXGPPSEGS)) {
653 while (ul_id <= CMM_MAXGPPSEGS) {
654 mutex_lock(&cmm_mgr_obj->cmm_lock);
655 /* slot = seg_id-1 */
656 psma = cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1];
658 un_register_gppsm_seg(psma);
659 /* Set alctr ptr to NULL for future
661 cmm_mgr_obj->pa_gppsm_seg_tab[ul_id -
663 } else if (ul_seg_id != CMM_ALLSEGMENTS) {
666 mutex_unlock(&cmm_mgr_obj->cmm_lock);
667 if (ul_seg_id != CMM_ALLSEGMENTS)
682 * ======== un_register_gppsm_seg ========
684 * UnRegister the SM allocator by freeing all its resources and
685 * nulling cmm mgr table entry.
687 * This routine is always called within cmm lock crit sect.
689 static void un_register_gppsm_seg(struct cmm_allocator *psma)
691 struct cmm_mnode *mnode_obj = NULL;
692 struct cmm_mnode *next_node = NULL;
694 DBC_REQUIRE(psma != NULL);
695 if (psma->free_list_head != NULL) {
696 /* free nodes on free list */
697 mnode_obj = (struct cmm_mnode *)lst_first(psma->free_list_head);
700 (struct cmm_mnode *)lst_next(psma->free_list_head,
703 lst_remove_elem(psma->free_list_head,
704 (struct list_head *)mnode_obj);
705 kfree((void *)mnode_obj);
707 mnode_obj = next_node;
709 kfree(psma->free_list_head); /* delete freelist */
710 /* free nodes on InUse list */
712 (struct cmm_mnode *)lst_first(psma->in_use_list_head);
715 (struct cmm_mnode *)lst_next(psma->in_use_list_head,
718 lst_remove_elem(psma->in_use_list_head,
719 (struct list_head *)mnode_obj);
720 kfree((void *)mnode_obj);
722 mnode_obj = next_node;
724 kfree(psma->in_use_list_head); /* delete InUse list */
726 if ((void *)psma->dw_vm_base != NULL)
727 MEM_UNMAP_LINEAR_ADDRESS((void *)psma->dw_vm_base);
729 /* Free allocator itself */
734 * ======== get_slot ========
736 * An available slot # is returned. Returns negative on failure.
738 static s32 get_slot(struct cmm_object *cmm_mgr_obj)
740 s32 slot_seg = -1; /* neg on failure */
741 DBC_REQUIRE(cmm_mgr_obj != NULL);
742 /* get first available slot in cmm mgr SMSegTab[] */
743 for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
744 if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] == NULL)
748 if (slot_seg == CMM_MAXGPPSEGS)
749 slot_seg = -1; /* failed */
755 * ======== get_node ========
757 * Get a memory node from freelist or create a new one.
759 static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
760 u32 dw_va, u32 ul_size)
762 struct cmm_mnode *pnode = NULL;
764 DBC_REQUIRE(cmm_mgr_obj != NULL);
765 DBC_REQUIRE(dw_pa != 0);
766 DBC_REQUIRE(dw_va != 0);
767 DBC_REQUIRE(ul_size != 0);
768 /* Check cmm mgr's node freelist */
769 if (LST_IS_EMPTY(cmm_mgr_obj->node_free_list_head)) {
770 pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL);
772 /* surely a valid element */
773 pnode = (struct cmm_mnode *)
774 lst_get_head(cmm_mgr_obj->node_free_list_head);
777 lst_init_elem((struct list_head *)pnode); /* set self */
778 pnode->dw_pa = dw_pa; /* Physical addr of start of block */
779 pnode->dw_va = dw_va; /* Virtual " " */
780 pnode->ul_size = ul_size; /* Size of block */
786 * ======== delete_node ========
788 * Put a memory node on the cmm nodelist for later use.
789 * Doesn't actually delete the node. Heap thrashing friendly.
791 static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode)
793 DBC_REQUIRE(pnode != NULL);
794 lst_init_elem((struct list_head *)pnode); /* init .self ptr */
795 lst_put_tail(cmm_mgr_obj->node_free_list_head,
796 (struct list_head *)pnode);
800 * ====== get_free_block ========
802 * Scan the free block list and return the first block that satisfies
805 static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
809 struct cmm_mnode *mnode_obj = (struct cmm_mnode *)
810 lst_first(allocator->free_list_head);
812 if (usize <= (u32) mnode_obj->ul_size) {
813 lst_remove_elem(allocator->free_list_head,
814 (struct list_head *)mnode_obj);
818 mnode_obj = (struct cmm_mnode *)
819 lst_next(allocator->free_list_head,
820 (struct list_head *)mnode_obj);
827 * ======== add_to_free_list ========
829 * Coelesce node into the freelist in ascending size order.
831 static void add_to_free_list(struct cmm_allocator *allocator,
832 struct cmm_mnode *pnode)
834 struct cmm_mnode *node_prev = NULL;
835 struct cmm_mnode *node_next = NULL;
836 struct cmm_mnode *mnode_obj;
840 DBC_REQUIRE(pnode != NULL);
841 DBC_REQUIRE(allocator != NULL);
842 dw_this_pa = pnode->dw_pa;
843 dw_next_pa = NEXT_PA(pnode);
844 mnode_obj = (struct cmm_mnode *)lst_first(allocator->free_list_head);
846 if (dw_this_pa == NEXT_PA(mnode_obj)) {
847 /* found the block ahead of this one */
848 node_prev = mnode_obj;
849 } else if (dw_next_pa == mnode_obj->dw_pa) {
850 node_next = mnode_obj;
852 if ((node_prev == NULL) || (node_next == NULL)) {
854 mnode_obj = (struct cmm_mnode *)
855 lst_next(allocator->free_list_head,
856 (struct list_head *)mnode_obj);
862 if (node_prev != NULL) {
863 /* combine with previous block */
864 lst_remove_elem(allocator->free_list_head,
865 (struct list_head *)node_prev);
866 /* grow node to hold both */
867 pnode->ul_size += node_prev->ul_size;
868 pnode->dw_pa = node_prev->dw_pa;
869 pnode->dw_va = node_prev->dw_va;
870 /* place node on mgr nodeFreeList */
871 delete_node((struct cmm_object *)allocator->hcmm_mgr,
874 if (node_next != NULL) {
875 /* combine with next block */
876 lst_remove_elem(allocator->free_list_head,
877 (struct list_head *)node_next);
879 pnode->ul_size += node_next->ul_size;
880 /* place node on mgr nodeFreeList */
881 delete_node((struct cmm_object *)allocator->hcmm_mgr,
884 /* Now, let's add to freelist in increasing size order */
885 mnode_obj = (struct cmm_mnode *)lst_first(allocator->free_list_head);
887 if (pnode->ul_size <= mnode_obj->ul_size)
892 (struct cmm_mnode *)lst_next(allocator->free_list_head,
893 (struct list_head *)mnode_obj);
895 /* if mnode_obj is NULL then add our pnode to the end of the freelist */
896 if (mnode_obj == NULL) {
897 lst_put_tail(allocator->free_list_head,
898 (struct list_head *)pnode);
900 /* insert our node before the current traversed node */
901 lst_insert_before(allocator->free_list_head,
902 (struct list_head *)pnode,
903 (struct list_head *)mnode_obj);
908 * ======== get_allocator ========
910 * Return the allocator for the given SM Segid.
911 * SegIds: 1,2,3..max.
913 static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
916 struct cmm_allocator *allocator = NULL;
918 DBC_REQUIRE(cmm_mgr_obj != NULL);
919 DBC_REQUIRE((ul_seg_id > 0) && (ul_seg_id <= CMM_MAXGPPSEGS));
920 allocator = cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1];
921 if (allocator != NULL) {
922 /* make sure it's for real */
932 * The CMM_Xlator[xxx] routines below are used by Node and Stream
933 * to perform SM address translation to the client process address space.
934 * A "translator" object is created by a node/stream for each SM seg used.
938 * ======== cmm_xlator_create ========
940 * Create an address translator object.
942 int cmm_xlator_create(struct cmm_xlatorobject **xlator,
943 struct cmm_object *hcmm_mgr,
944 struct cmm_xlatorattrs *xlator_attrs)
946 struct cmm_xlator *xlator_object = NULL;
949 DBC_REQUIRE(refs > 0);
950 DBC_REQUIRE(xlator != NULL);
951 DBC_REQUIRE(hcmm_mgr != NULL);
954 if (xlator_attrs == NULL)
955 xlator_attrs = &cmm_dfltxlatorattrs; /* set defaults */
957 xlator_object = kzalloc(sizeof(struct cmm_xlator), GFP_KERNEL);
958 if (xlator_object != NULL) {
959 xlator_object->hcmm_mgr = hcmm_mgr; /* ref back to CMM */
961 xlator_object->ul_seg_id = xlator_attrs->ul_seg_id;
966 *xlator = (struct cmm_xlatorobject *)xlator_object;
972 * ======== cmm_xlator_delete ========
974 * Free the Xlator resources.
975 * VM gets freed later.
977 int cmm_xlator_delete(struct cmm_xlatorobject *xlator, bool force)
979 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
981 DBC_REQUIRE(refs > 0);
989 * ======== cmm_xlator_alloc_buf ========
991 void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator, void *va_buf,
994 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
997 struct cmm_attrs attrs;
999 DBC_REQUIRE(refs > 0);
1000 DBC_REQUIRE(xlator != NULL);
1001 DBC_REQUIRE(xlator_obj->hcmm_mgr != NULL);
1002 DBC_REQUIRE(va_buf != NULL);
1003 DBC_REQUIRE(pa_size > 0);
1004 DBC_REQUIRE(xlator_obj->ul_seg_id > 0);
1007 attrs.ul_seg_id = xlator_obj->ul_seg_id;
1008 __raw_writel(0, va_buf);
1011 cmm_calloc_buf(xlator_obj->hcmm_mgr, pa_size, &attrs, NULL);
1013 /* convert to translator(node/strm) process Virtual
1015 tmp_va_buff = cmm_xlator_translate(xlator,
1017 __raw_writel((u32)tmp_va_buff, va_buf);
1024 * ======== cmm_xlator_free_buf ========
1026 * Free the given SM buffer and descriptor.
1027 * Does not free virtual memory.
1029 int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va)
1031 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
1032 int status = -EPERM;
1033 void *buf_pa = NULL;
1035 DBC_REQUIRE(refs > 0);
1036 DBC_REQUIRE(buf_va != NULL);
1037 DBC_REQUIRE(xlator_obj->ul_seg_id > 0);
1040 /* convert Va to Pa so we can free it. */
1041 buf_pa = cmm_xlator_translate(xlator, buf_va, CMM_VA2PA);
1043 status = cmm_free_buf(xlator_obj->hcmm_mgr, buf_pa,
1044 xlator_obj->ul_seg_id);
1046 /* Uh oh, this shouldn't happen. Descriptor
1048 DBC_ASSERT(false); /* CMM is leaking mem */
1056 * ======== cmm_xlator_info ========
1058 * Set/Get translator info.
1060 int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 ** paddr,
1061 u32 ul_size, u32 segm_id, bool set_info)
1063 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
1066 DBC_REQUIRE(refs > 0);
1067 DBC_REQUIRE(paddr != NULL);
1068 DBC_REQUIRE((segm_id > 0) && (segm_id <= CMM_MAXGPPSEGS));
1072 /* set translators virtual address range */
1073 xlator_obj->dw_virt_base = (u32) *paddr;
1074 xlator_obj->ul_virt_size = ul_size;
1075 } else { /* return virt base address */
1076 *paddr = (u8 *) xlator_obj->dw_virt_base;
1085 * ======== cmm_xlator_translate ========
1087 void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
1088 enum cmm_xlatetype xtype)
1090 u32 dw_addr_xlate = 0;
1091 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
1092 struct cmm_object *cmm_mgr_obj = NULL;
1093 struct cmm_allocator *allocator = NULL;
1096 DBC_REQUIRE(refs > 0);
1097 DBC_REQUIRE(paddr != NULL);
1098 DBC_REQUIRE((xtype >= CMM_VA2PA) && (xtype <= CMM_DSPPA2PA));
1103 cmm_mgr_obj = (struct cmm_object *)xlator_obj->hcmm_mgr;
1104 /* get this translator's default SM allocator */
1105 DBC_ASSERT(xlator_obj->ul_seg_id > 0);
1106 allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->ul_seg_id - 1];
1110 if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_VA2PA) ||
1111 (xtype == CMM_PA2VA)) {
1112 if (xtype == CMM_PA2VA) {
1113 /* Gpp Va = Va Base + offset */
1114 dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base -
1117 dw_addr_xlate = xlator_obj->dw_virt_base + dw_offset;
1118 /* Check if translated Va base is in range */
1119 if ((dw_addr_xlate < xlator_obj->dw_virt_base) ||
1121 (xlator_obj->dw_virt_base +
1122 xlator_obj->ul_virt_size))) {
1123 dw_addr_xlate = 0; /* bad address */
1126 /* Gpp PA = Gpp Base + offset */
1128 (u8 *) paddr - (u8 *) xlator_obj->dw_virt_base;
1130 allocator->shm_base - allocator->ul_dsp_size +
1134 dw_addr_xlate = (u32) paddr;
1136 /*Now convert address to proper target physical address if needed */
1137 if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_PA2DSPPA)) {
1138 /* Got Gpp Pa now, convert to DSP Pa */
1140 GPPPA2DSPPA((allocator->shm_base - allocator->ul_dsp_size),
1142 allocator->dw_dsp_phys_addr_offset *
1143 allocator->c_factor);
1144 } else if (xtype == CMM_DSPPA2PA) {
1145 /* Got DSP Pa, convert to GPP Pa */
1147 DSPPA2GPPPA(allocator->shm_base - allocator->ul_dsp_size,
1149 allocator->dw_dsp_phys_addr_offset *
1150 allocator->c_factor);
1153 return (void *)dw_addr_xlate;