4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * The Dynamic Memory Manager (DMM) module manages the DSP Virtual address
7 * space that can be directly mapped to any MPU buffer or memory region
10 * Region: Generic memory entitiy having a start address and a size
11 * Chunk: Reserved region
13 * Copyright (C) 2005-2006 Texas Instruments, Inc.
15 * This package is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License version 2 as
17 * published by the Free Software Foundation.
19 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
21 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
23 #include <linux/types.h>
25 /* ----------------------------------- Host OS */
26 #include <dspbridge/host_os.h>
28 /* ----------------------------------- DSP/BIOS Bridge */
29 #include <dspbridge/dbdefs.h>
31 /* ----------------------------------- OS Adaptation Layer */
32 #include <dspbridge/sync.h>
34 /* ----------------------------------- Platform Manager */
35 #include <dspbridge/dev.h>
36 #include <dspbridge/proc.h>
38 /* ----------------------------------- This */
39 #include <dspbridge/dmm.h>
41 /* ----------------------------------- Defines, Data Structures, Typedefs */
42 #define DMM_ADDR_VIRTUAL(a) \
43 (((struct map_page *)(a) - virtual_mapping_table) * PG_SIZE4K +\
45 #define DMM_ADDR_TO_INDEX(a) (((a) - dyn_mem_map_beg) / PG_SIZE4K)
49 /* Dmm Lock is used to serialize access mem manager for
51 spinlock_t dmm_lock; /* Lock to access dmm mgr */
61 /* Create the free list */
62 static struct map_page *virtual_mapping_table;
63 static u32 free_region; /* The index of free region */
65 static u32 dyn_mem_map_beg; /* The Beginning of dynamic memory mapping */
66 static u32 table_size; /* The size of virt and phys pages tables */
68 /* ----------------------------------- Function Prototypes */
69 static struct map_page *get_region(u32 addr);
70 static struct map_page *get_free_region(u32 len);
71 static struct map_page *get_mapped_region(u32 addrs);
73 /* ======== dmm_create_tables ========
75 * Create table to hold the information of physical address
76 * the buffer pages that is passed by the user, and the table
77 * to hold the information of the virtual memory that is reserved
80 int dmm_create_tables(struct dmm_object *dmm_mgr, u32 addr, u32 size)
82 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
85 status = dmm_delete_tables(dmm_obj);
87 dyn_mem_map_beg = addr;
88 table_size = PG_ALIGN_HIGH(size, PG_SIZE4K) / PG_SIZE4K;
89 /* Create the free list */
90 virtual_mapping_table = __vmalloc(table_size *
91 sizeof(struct map_page), GFP_KERNEL |
92 __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
93 if (virtual_mapping_table == NULL)
96 /* On successful allocation,
97 * all entries are zero ('free') */
99 free_size = table_size * PG_SIZE4K;
100 virtual_mapping_table[0].region_size = table_size;
105 pr_err("%s: failure, status 0x%x\n", __func__, status);
111 * ======== dmm_create ========
113 * Create a dynamic memory manager object.
115 int dmm_create(struct dmm_object **dmm_manager,
116 struct dev_object *hdev_obj,
117 const struct dmm_mgrattrs *mgr_attrts)
119 struct dmm_object *dmm_obj = NULL;
123 /* create, zero, and tag a cmm mgr object */
124 dmm_obj = kzalloc(sizeof(struct dmm_object), GFP_KERNEL);
125 if (dmm_obj != NULL) {
126 spin_lock_init(&dmm_obj->dmm_lock);
127 *dmm_manager = dmm_obj;
136 * ======== dmm_destroy ========
138 * Release the communication memory manager resources.
140 int dmm_destroy(struct dmm_object *dmm_mgr)
142 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
146 status = dmm_delete_tables(dmm_obj);
156 * ======== dmm_delete_tables ========
160 int dmm_delete_tables(struct dmm_object *dmm_mgr)
164 /* Delete all DMM tables */
166 vfree(virtual_mapping_table);
173 * ======== dmm_get_handle ========
175 * Return the dynamic memory manager object for this device.
176 * This is typically called from the client process.
178 int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager)
181 struct dev_object *hdev_obj;
183 if (hprocessor != NULL)
184 status = proc_get_dev_object(hprocessor, &hdev_obj);
186 hdev_obj = dev_get_first(); /* default */
189 status = dev_get_dmm_mgr(hdev_obj, dmm_manager);
195 * ======== dmm_map_memory ========
197 * Add a mapping block to the reserved chunk. DMM assumes that this block
198 * will be mapped in the DSP/IVA's address space. DMM returns an error if a
199 * mapping overlaps another one. This function stores the info that will be
200 * required later while unmapping the block.
202 int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 size)
204 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
205 struct map_page *chunk;
208 spin_lock(&dmm_obj->dmm_lock);
209 /* Find the Reserved memory chunk containing the DSP block to
211 chunk = (struct map_page *)get_region(addr);
213 /* Mark the region 'mapped', leave the 'reserved' info as-is */
214 chunk->mapped = true;
215 chunk->mapped_size = (size / PG_SIZE4K);
218 spin_unlock(&dmm_obj->dmm_lock);
220 dev_dbg(bridge, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, "
221 "chunk %p", __func__, dmm_mgr, addr, size, status, chunk);
227 * ======== dmm_reserve_memory ========
229 * Reserve a chunk of virtually contiguous DSP/IVA address space.
231 int dmm_reserve_memory(struct dmm_object *dmm_mgr, u32 size,
235 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
236 struct map_page *node;
240 spin_lock(&dmm_obj->dmm_lock);
242 /* Try to get a DSP chunk from the free list */
243 node = get_free_region(size);
245 /* DSP chunk of given size is available. */
246 rsv_addr = DMM_ADDR_VIRTUAL(node);
247 /* Calculate the number entries to use */
248 rsv_size = size / PG_SIZE4K;
249 if (rsv_size < node->region_size) {
250 /* Mark remainder of free region */
251 node[rsv_size].mapped = false;
252 node[rsv_size].reserved = false;
253 node[rsv_size].region_size =
254 node->region_size - rsv_size;
255 node[rsv_size].mapped_size = 0;
257 /* get_region will return first fit chunk. But we only use what
259 node->mapped = false;
260 node->reserved = true;
261 node->region_size = rsv_size;
262 node->mapped_size = 0;
263 /* Return the chunk's starting address */
264 *prsv_addr = rsv_addr;
266 /*dSP chunk of given size is not available */
269 spin_unlock(&dmm_obj->dmm_lock);
271 dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, "
272 "rsv_addr %x, rsv_size %x\n", __func__, dmm_mgr, size,
273 prsv_addr, status, rsv_addr, rsv_size);
279 * ======== dmm_un_map_memory ========
281 * Remove the mapped block from the reserved chunk.
283 int dmm_un_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 *psize)
285 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
286 struct map_page *chunk;
289 spin_lock(&dmm_obj->dmm_lock);
290 chunk = get_mapped_region(addr);
295 /* Unmap the region */
296 *psize = chunk->mapped_size * PG_SIZE4K;
297 chunk->mapped = false;
298 chunk->mapped_size = 0;
300 spin_unlock(&dmm_obj->dmm_lock);
302 dev_dbg(bridge, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, "
303 "chunk %p\n", __func__, dmm_mgr, addr, psize, status, chunk);
309 * ======== dmm_un_reserve_memory ========
311 * Free a chunk of reserved DSP/IVA address space.
313 int dmm_un_reserve_memory(struct dmm_object *dmm_mgr, u32 rsv_addr)
315 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
316 struct map_page *chunk;
321 spin_lock(&dmm_obj->dmm_lock);
323 /* Find the chunk containing the reserved address */
324 chunk = get_mapped_region(rsv_addr);
329 /* Free all the mapped pages for this reserved region */
331 while (i < chunk->region_size) {
332 if (chunk[i].mapped) {
333 /* Remove mapping from the page tables. */
334 chunk_size = chunk[i].mapped_size;
335 /* Clear the mapping flags */
336 chunk[i].mapped = false;
337 chunk[i].mapped_size = 0;
342 /* Clear the flags (mark the region 'free') */
343 chunk->reserved = false;
344 /* NOTE: We do NOT coalesce free regions here.
345 * Free regions are coalesced in get_region(), as it traverses
346 *the whole mapping table
349 spin_unlock(&dmm_obj->dmm_lock);
351 dev_dbg(bridge, "%s: dmm_mgr %p, rsv_addr %x\n\tstatus %x chunk %p",
352 __func__, dmm_mgr, rsv_addr, status, chunk);
358 * ======== get_region ========
360 * Returns a region containing the specified memory region
362 static struct map_page *get_region(u32 addr)
364 struct map_page *curr_region = NULL;
367 if (virtual_mapping_table != NULL) {
368 /* find page mapped by this address */
369 i = DMM_ADDR_TO_INDEX(addr);
371 curr_region = virtual_mapping_table + i;
374 dev_dbg(bridge, "%s: curr_region %p, free_region %d, free_size %d\n",
375 __func__, curr_region, free_region, free_size);
380 * ======== get_free_region ========
382 * Returns the requested free region
384 static struct map_page *get_free_region(u32 len)
386 struct map_page *curr_region = NULL;
391 if (virtual_mapping_table == NULL)
393 if (len > free_size) {
394 /* Find the largest free region
395 * (coalesce during the traversal) */
396 while (i < table_size) {
397 region_size = virtual_mapping_table[i].region_size;
398 next_i = i + region_size;
399 if (virtual_mapping_table[i].reserved == false) {
400 /* Coalesce, if possible */
401 if (next_i < table_size &&
402 virtual_mapping_table[next_i].reserved
404 virtual_mapping_table[i].region_size +=
405 virtual_mapping_table
406 [next_i].region_size;
409 region_size *= PG_SIZE4K;
410 if (region_size > free_size) {
412 free_size = region_size;
418 if (len <= free_size) {
419 curr_region = virtual_mapping_table + free_region;
420 free_region += (len / PG_SIZE4K);
427 * ======== get_mapped_region ========
429 * Returns the requestedmapped region
431 static struct map_page *get_mapped_region(u32 addrs)
434 struct map_page *curr_region = NULL;
436 if (virtual_mapping_table == NULL)
439 i = DMM_ADDR_TO_INDEX(addrs);
440 if (i < table_size && (virtual_mapping_table[i].mapped ||
441 virtual_mapping_table[i].reserved))
442 curr_region = virtual_mapping_table + i;
447 u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr)
449 struct map_page *curr_node = NULL;
454 spin_lock(&dmm_mgr->dmm_lock);
456 if (virtual_mapping_table != NULL) {
457 for (i = 0; i < table_size; i +=
458 virtual_mapping_table[i].region_size) {
459 curr_node = virtual_mapping_table + i;
460 if (curr_node->reserved) {
461 /*printk("RESERVED size = 0x%x, "
463 (curr_node->region_size * PG_SIZE4K),
464 (curr_node->mapped == false) ? 0 :
465 (curr_node->mapped_size * PG_SIZE4K));
468 /* printk("UNRESERVED size = 0x%x\n",
469 (curr_node->region_size * PG_SIZE4K));
471 freemem += (curr_node->region_size * PG_SIZE4K);
472 if (curr_node->region_size > bigsize)
473 bigsize = curr_node->region_size;
477 spin_unlock(&dmm_mgr->dmm_lock);
478 printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n",
479 freemem / (1024 * 1024));
480 printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n",
481 (((table_size * PG_SIZE4K) - freemem)) / (1024 * 1024));
482 printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n",
483 (bigsize * PG_SIZE4K / (1024 * 1024)));