2 *-----------------------------------------------------------------------------
5 *-----------------------------------------------------------------------------
6 * Copyright (c) 2002-2010, Intel Corporation.
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 *-----------------------------------------------------------------------------
28 * Very basic video memory managment functions required by HAL.
29 *-----------------------------------------------------------------------------
32 #define MODULE_NAME hal.gmm
34 #include <igd_debug.h>
40 #include <linux/module.h>
41 #include <linux/init.h>
45 #define AGP_PHYS_MEMORY 2 /* Physical contigous memory */
46 struct emgd_ci_surface_t{
48 unsigned int v4l2_offset;
51 unsigned long gtt_offset;
53 #define MAX_CI_LIST_SIZE 14
54 struct emgd_ci_surface_t ci_surfaces[MAX_CI_LIST_SIZE];
57 gmm_context_t gmm_context;
59 gmm_chunk_t *gmm_get_chunk(igd_context_t *context, unsigned long offset);
60 static int gmm_flush_cache(void);
61 static int gmm_alloc_linear_surface(unsigned long *offset,
62 unsigned long pixel_format,
71 static int gmm_alloc_chunk_space(gmm_context_t *gmm_context,
72 unsigned long *offset,
77 static int gmm_import_pages(void **pagelist,
78 unsigned long *gtt_offset,
79 unsigned long numpages);
81 static int gmm_get_page_list(unsigned long offset,
82 unsigned long **pages,
83 unsigned long *page_cnt);
85 gmm_mem_buffer_t *emgd_alloc_pages(unsigned long num_pages, int type);
86 void emgd_free_pages(gmm_mem_buffer_t *mem);
87 void emgd_gtt_remove(igd_context_t *context, gmm_mem_buffer_t *mem,
88 unsigned long offset);
89 void emgd_gtt_insert(igd_context_t *context, gmm_mem_buffer_t *mem,
90 unsigned long offset);
93 static int gmm_map_ci(unsigned long *gtt_offset,
94 unsigned long ci_param,
95 unsigned long *virt_addr,
96 unsigned int map_method,
100 static int gmm_unmap_ci(unsigned long virt_addr);
102 static void gmm_free(unsigned long offset)
106 EMGD_DEBUG("Enter gmm_free(0x%lx)", offset);
108 /* Walk the chunk list */
109 chunk = gmm_context.head_chunk;
111 if (chunk->offset == offset) {
112 switch (chunk->usage) {
114 EMGD_DEBUG("WARNING: The chunk 0x%lx is already freed", offset);
118 EMGD_DEBUG("WARNING: The chunk 0x%lx was allocated externally", offset);
120 case INUSE_ALLOCATED:
121 EMGD_DEBUG("Freeing the chunk 0x%lx", offset);
124 EMGD_DEBUG("Unknown usage %d for chunk 0x%lx. Memory manager corrupt?",
125 chunk->usage, offset);
130 * What to do if the ref count is > 0? Unmapping is
131 * probably the right thing since nothing should try
132 * to use this. If something does, it should probably
135 if (chunk->ref_cnt > 0 && chunk->addr) {
136 EMGD_DEBUG("WARNING: The chunk 0x%lx is mapped", offset);
137 /* chunk->addr will be freed by gmm_shutdown */
141 /* Free the array of page address, if applicable: */
142 if (chunk->page_addresses != NULL) {
143 EMGD_DEBUG("About to free chunk->page_addresses = 0x%p",
144 chunk->page_addresses);
145 OS_FREE(chunk->page_addresses);
146 chunk->page_addresses = NULL;
149 chunk->usage = FREE_ALLOCATED; /* mark as free */
155 EMGD_ERROR("gmm_free() did not find the chunk 0x%lx to free", offset);
159 static void gmm_release_import(unsigned long offset)
163 EMGD_DEBUG("Enter gmm_release_import(0x%lx)", offset);
165 /* Walk the chunk list */
166 chunk = gmm_context.head_chunk;
168 if (chunk->offset == offset) {
169 switch (chunk->usage) {
171 case INUSE_ALLOCATED:
172 EMGD_DEBUG("WARNING: The chunk 0x%lx was not an imported chunk", offset);
175 EMGD_DEBUG("Releasing the chunk 0x%lx", offset);
178 EMGD_DEBUG("WARNING: The chunk 0x%lx has already been released", offset);
181 EMGD_DEBUG("Unknown usage %d for chunk 0x%lx. Memory manager corrupt?",
182 chunk->usage, offset);
187 * What to do if the ref count is > 0? Unmapping is
188 * probably the right thing since nothing should try
189 * to use this. If something does, it should probably
192 if (chunk->ref_cnt > 0) {
193 EMGD_DEBUG("WARNING: The chunk 0x%lx is mapped", offset);
198 /* Free the array of page address, if applicable: */
199 if (chunk->page_addresses != NULL) {
200 EMGD_DEBUG("About to free chunk->page_addresses = 0x%p",
201 chunk->page_addresses);
202 OS_FREE(chunk->page_addresses);
203 chunk->page_addresses = NULL;
206 /* Zero out the gmm_mem_buffer_t */
207 OS_MEMSET(chunk->gtt_mem, 0, sizeof(gmm_mem_buffer_t));
209 /* Mark address space as free */
210 chunk->usage = FREE_IMPORTED;
216 EMGD_ERROR("gmm_free() did not find the chunk 0x%lx to free", offset);
220 static int gmm_alloc_region(unsigned long *offset,
226 unsigned long aligned_size;
227 unsigned long phys = 0;
230 EMGD_DEBUG("Parameters: size=%lu, type=%d, flags=0x%lx", *size, type, flags);
235 case IGD_GMM_REGION_TYPE_OVLREG:
236 flags |= IGD_GMM_REGION_ALIGN_MMAP;
239 case IGD_GMM_REGION_TYPE_OVLREG64:
240 flags |= IGD_GMM_REGION_ALIGN_64K;
243 case IGD_GMM_REGION_TYPE_HWSTATUS:
244 flags |= IGD_GMM_REGION_ALIGN_MMAP;
248 case IGD_GMM_REGION_TYPE_DMA:
249 flags |= IGD_GMM_REGION_ALIGN_MMAP;
251 case IGD_GMM_REGION_TYPE_PERSISTENT:
252 flags |= IGD_GMM_REGION_ALIGN_MMAP;
254 case IGD_GMM_REGION_TYPE_BPL:
255 flags |= IGD_GMM_REGION_ALIGN_MMAP;
258 case IGD_GMM_REGION_TYPE_CONTEXT:
259 flags |= IGD_GMM_REGION_ALIGN_CONTEXT | IGD_GMM_REGION_ALIGN_MMAP;
264 EMGD_ERROR_EXIT("Invalid Region type requested: 0x%8.8x", type);
268 aligned_size = (*size + 4095) & ~4095;
269 EMGD_DEBUG("aligned_size=%lu", aligned_size);
272 ret = gmm_alloc_chunk_space(&gmm_context, offset, aligned_size, phys,
274 } while ((ret == -IGD_ERROR_NOMEM) && gmm_flush_cache());
276 EMGD_DEBUG("EXIT Returning %d", ret);
280 static int gmm_get_num_surface(unsigned long *count)
286 /* Walk the chunk list */
287 chunk = gmm_context.head_chunk;
298 static int gmm_get_surface_list(unsigned long allocated_size,
299 unsigned long *list_size,
300 igd_surface_list_t **surface_list)
303 igd_surface_list_t *tmp_list;
306 gmm_get_num_surface(list_size);
309 *surface_list = vmalloc(*list_size * sizeof(igd_surface_list_t));
311 /* Walk the chunk list */
312 chunk = gmm_context.head_chunk;
313 tmp_list = *surface_list;
316 tmp_list->offset = chunk->offset;
317 tmp_list->size = chunk->size;
327 static int gmm_alloc_surface(unsigned long *offset,
328 unsigned long pixel_format,
330 unsigned int *height,
334 unsigned long *flags)
340 EMGD_DEBUG("Parameters: pixel_format=0x%08lx,", pixel_format);
341 EMGD_DEBUG(" width=%u, height=%u", *width, *height);
342 EMGD_DEBUG(" pitch=%u, type=%d, flags=0x%08lx", *pitch, type, *flags);
346 if (! (*flags & IGD_MIN_PITCH)) {
350 if (*flags & IGD_SURFACE_CURSOR) {
356 ret = gmm_alloc_linear_surface(offset, pixel_format, width, height, pitch,
357 size, type, *flags, phys);
359 EMGD_DEBUG("EXIT Returning %d", ret);
365 * Given an offset, find the chunk and return the physical address.
367 static int gmm_virt_to_phys(unsigned long offset,
368 unsigned long *physical)
373 EMGD_DEBUG("Looking for offset=0x%lx", offset);
375 /* Walk the chunk list */
376 chunk = gmm_context.head_chunk;
378 if (chunk->offset == offset) {
379 *physical = chunk->gtt_mem->physical;
380 EMGD_DEBUG("Physical address = 0x%08lx", *physical);
387 /* offset not found */
388 EMGD_ERROR_EXIT("Did not find offset (0x%lx); returning %d",
389 offset, -IGD_ERROR_NOMEM);
390 return -IGD_ERROR_NOMEM;
394 static int gmm_flush_cache(void)
396 EMGD_DEBUG("Enter gmm_flush_cache(), which is stubbed");
400 static void gmm_save(igd_context_t *context, void **state)
402 EMGD_DEBUG("Enter gmm_save(), which is stubbed");
406 static void gmm_restore(igd_context_t *context, void *state)
408 EMGD_DEBUG("Enter gmm_restore(), which is stubbed");
413 * Create a virtual address mapping for a block of video memory.
415 static void *gmm_map(unsigned long offset)
418 struct page **page_map;
421 unsigned long num_pages;
424 EMGD_DEBUG("Parameter: offset=0x%lx", offset);
426 chunk = gmm_get_chunk(gmm_context.context, offset);
429 printk(KERN_ERR"[EMGD] gmm_map: Failed to find chunk: 0x%lx\n", offset);
434 * Check if this as been mapped already and return that map instead
439 EMGD_DEBUG("This chunk is already mapped!");
444 * Read the physical addresses of the allocation from the GTT
445 * and convert that to a page list.
448 num_pages = chunk->gtt_mem->page_count;
449 page_map = vmalloc(num_pages * sizeof(struct page *));
450 if (page_map == NULL) {
451 printk(KERN_ERR"[EMGD] gmm_map: vmalloc failed.\n");
455 for (i = 0; i < num_pages; i++) {
456 page_map[i] = chunk->gtt_mem->pages[i];
459 addr = vmap(page_map, num_pages, VM_MAP, PAGE_KERNEL_UC_MINUS);
464 EMGD_DEBUG("Mapped address = 0x%p", addr);
471 static void gmm_unmap(void *addr)
476 EMGD_DEBUG("Parameter: addr=0x%p", addr);
478 /* Look up the chunk that was mapped to this address */
479 chunk = gmm_context.head_chunk;
481 if (chunk->addr == addr) {
482 EMGD_DEBUG("The chunk with addr=0x%p has the offset = 0x%08lx", addr,
485 if (chunk->ref_cnt == 0) {
486 EMGD_DEBUG("About to call vunmap(0x%p)", addr);
498 int gmm_init(igd_context_t *context,
499 unsigned long scratch_mem,
500 unsigned long max_fb_mem)
503 EMGD_DEBUG("Parameters: scratch_mem=0x%lx, max_fb_mem=%lu",
504 scratch_mem, max_fb_mem);
506 context->dispatch.gmm_alloc_surface = gmm_alloc_surface;
507 context->dispatch.gmm_alloc_region = gmm_alloc_region;
508 context->dispatch.gmm_import_pages = gmm_import_pages;
509 context->dispatch.gmm_virt_to_phys = gmm_virt_to_phys;
510 context->dispatch.gmm_free = gmm_free;
511 context->dispatch.gmm_release_import = gmm_release_import;
512 context->dispatch.gmm_memstat = NULL;
513 context->dispatch.gmm_alloc_cached = NULL;
514 context->dispatch.gmm_free_cached = NULL;
515 context->dispatch.gmm_alloc_cached_region = NULL;
516 context->dispatch.gmm_free_cached_region = NULL;
517 context->dispatch.gmm_flush_cache = gmm_flush_cache;
518 context->dispatch.gmm_alloc_reservation = NULL;
519 context->dispatch.gmm_alloc_heap = NULL;
520 context->dispatch.gmm_alloc_heap_block = NULL;
521 context->dispatch.gmm_free_heap_block = NULL;
522 context->dispatch.gmm_get_heap_from_block = NULL;
523 context->dispatch.gmm_get_pvtheap_size = NULL;
524 context->dispatch.gmm_get_cache_mem = NULL;
525 context->dispatch.gmm_alloc_persistent_region = NULL;
526 context->dispatch.gmm_free_persistent_region = NULL;
527 context->dispatch.gmm_map = gmm_map;
528 context->dispatch.gmm_unmap = gmm_unmap;
529 context->dispatch.gmm_get_page_list = gmm_get_page_list;
530 context->dispatch.gmm_get_num_surface = gmm_get_num_surface;
531 context->dispatch.gmm_get_surface_list = gmm_get_surface_list;
532 context->dispatch.gmm_map_ci = gmm_map_ci;
533 context->dispatch.gmm_unmap_ci = gmm_unmap_ci;
535 context->mod_dispatch.gmm_save = gmm_save;
536 context->mod_dispatch.gmm_restore = gmm_restore;
538 gmm_context.context = context;
539 gmm_context.head_chunk = NULL;
540 gmm_context.tail_chunk = NULL;
542 /* Reserve memory for framebuffer ??? */
544 EMGD_DEBUG("EXIT Returning %d", 0);
549 void gmm_shutdown(igd_context_t *context)
551 gmm_chunk_t *chunk, *del;
552 struct drm_device *dev;
556 dev = (struct drm_device *)context->drm_dev;
558 /* Walk the chunk list */
559 chunk = gmm_context.head_chunk;
561 EMGD_DEBUG("process chunk at 0x%lx", chunk->offset);
562 if (chunk->usage == INUSE_ALLOCATED || chunk->usage == INUSE_IMPORTED) {
563 EMGD_ERROR("Chunk at 0x%lx not properly freed", chunk->offset);
566 if (chunk->addr != NULL) {
571 emgd_gtt_remove(context, chunk->gtt_mem, chunk->offset);
574 if (chunk->usage == INUSE_ALLOCATED || chunk->usage == FREE_ALLOCATED) {
575 emgd_free_pages(chunk->gtt_mem);
578 /* Free the array of page address, if applicable: */
579 if (chunk->page_addresses != NULL) {
580 EMGD_DEBUG("About to free chunk->page_addresses = 0x%p",
581 chunk->page_addresses);
582 OS_FREE(chunk->page_addresses);
595 gmm_chunk_t *gmm_get_chunk(igd_context_t *context, unsigned long offset)
599 chunk = gmm_context.head_chunk;
601 if (chunk->offset == offset) {
607 printk(KERN_ERR "[EMGD] gmm_get_chunk: Failed to find chunk 0x%lx\n",
614 static int gmm_alloc_linear_surface(unsigned long *offset,
615 unsigned long pixel_format,
617 unsigned int *height,
626 unsigned long min_pitch;
629 EMGD_DEBUG("Parameters: pixel_format=0x%08lx,", pixel_format);
630 EMGD_DEBUG(" width=%u, height=%u", *width, *height);
631 EMGD_DEBUG(" pitch=%u, size=%lu, type=%lu", *pitch, *size, type);
632 EMGD_DEBUG(" flags=0x%08lx; phys=%lu", flags, phys);
634 /* Validate surface */
643 /* Set the minimum surface pitch */
644 min_pitch = (IGD_PF_DEPTH(pixel_format) * *width) >> 3;
645 if (min_pitch < *pitch) {
649 /* Pitch for both PLB and TNC requires 64-byte alignment */
650 min_pitch = ALIGN(min_pitch, 64);
653 * Size should be based on pixel format and pitch, not just pitch.
654 * For YUV surfaces, it is smaller than RGB surfaces.
656 switch (IGD_PF_TYPE(pixel_format)) {
657 case PF_TYPE_YUV_PLANAR:
658 *size = min_pitch * (*height + (*height>>1));
660 case PF_TYPE_YUV_PACKED:
661 /* FIXME: What should this really be? */
662 *size = min_pitch * *height;
665 *size = min_pitch * *height;
671 /* Page align size */
672 align = (*size + 4095) & ~4095;
675 * Flags provide information on the type of surface being requested
676 * 0x04 = cursor surface
677 * 0x08 = overlay surface
678 * 0x10 = display surface
679 * 0x40 = video surface
682 ret = gmm_alloc_chunk_space(&gmm_context, offset, *size, phys, flags);
683 } while ((ret == -IGD_ERROR_NOMEM) && gmm_flush_cache());
685 EMGD_DEBUG("EXIT Returning %d", ret);
693 * gmm_contig_page_list(): Create the page list for a previously-allocated
694 * block of contiguous memory. (This is needed for GTT insertion, and normally
695 * created by the emgd_alloc_pages() function.)
697 static gmm_mem_buffer_t *gmm_contig_page_list(unsigned long num_pages,
698 unsigned long phys_addr)
700 gmm_mem_buffer_t *mem;
703 void *virt_addr = phys_to_virt(phys_addr);
705 mem = (gmm_mem_buffer_t *)kzalloc(sizeof(gmm_mem_buffer_t), GFP_KERNEL);
707 printk(KERN_ERR "[EMGD] Cannot allocate gmm_mem_buffer_t ");
708 EMGD_ERROR_EXIT("Returning NULL\n");
712 /* First allocate page array */
713 list_size = num_pages * sizeof(struct page *);
714 mem->vmalloc_flag = false;
716 if (list_size <= (2 * PAGE_SIZE)) {
717 mem->pages = kmalloc(list_size, GFP_KERNEL | __GFP_NORETRY);
720 if (mem->pages == NULL) {
721 mem->pages = vmalloc(list_size);
722 mem->vmalloc_flag = true;
725 if (mem->pages == NULL) {
727 printk(KERN_ERR "Failed to allocate memory info struct.\n");
728 EMGD_ERROR_EXIT("Returning NULL\n");
732 mem->pages[0] = virt_to_page(virt_addr);
734 for (i = 1; i < num_pages; i++) {
735 mem->pages[i] = mem->pages[i-1] + 1;
738 mem->physical = page_to_phys(mem->pages[0]);
739 mem->page_count = num_pages;
745 * gmm_map_contig_buffer(): Map a previously-allocated contiguous SDRAM memory
746 * block into a graphics-accessible memory.
749 static int gmm_map_contig_buffer(gmm_context_t *gmm_context,
750 unsigned long phys_addr,
752 unsigned long *offset)
759 /* Check for a free contiguous chunk of sufficent size */
760 chunk = gmm_context->head_chunk;
762 if ((chunk->usage== FREE_ALLOCATED) && (chunk->size >= size)
763 && (chunk->type == AGP_PHYS_MEMORY)) {
764 /* Re-use this chunk */
765 chunk->usage = INUSE_ALLOCATED;
766 EMGD_DEBUG("Re-using old chunk with offset=0x%lx",
768 EMGD_DEBUG("EXIT Returning %d", 0);
773 /* Allocate a new chunk list element */
775 chunk = (gmm_chunk_t *)OS_ALLOC(sizeof(gmm_chunk_t));
777 printk(KERN_ERR "[EMGD] Cannot allocate gmm_chunk_t element");
778 EMGD_ERROR_EXIT("Returning %d", -IGD_ERROR_NOMEM);
779 return -IGD_ERROR_NOMEM;
782 /*if want to reuse the chunk, no need memset it here.
783 * we need the previous gtt offset of chunk.
785 OS_MEMSET(chunk, 0, sizeof(gmm_chunk_t));
789 /* Determine the offset value for this chunk
790 * if this chunk is new allocated, then we set tailk_chunk->offset+size to chunk->offset
791 * if this chunk is reused, then no need to assign the chunk->offset again.
792 * also no need to insert this chunk into list again, because this chunk is already in the list.
796 if (gmm_context->tail_chunk == NULL) {
801 chunk->offset = gmm_context->tail_chunk->offset +
802 gmm_context->tail_chunk->size;
806 /* Adjust the offset since display surfaces require 256KB alignment */
807 chunk->offset = (chunk->offset + 0x3ffff) & ~0x3ffff;
809 /* Insert this chunk in the list */
811 if (gmm_context->head_chunk == NULL) {
813 gmm_context->head_chunk = chunk;
815 gmm_context->tail_chunk->next = chunk;
817 gmm_context->tail_chunk = chunk;
818 chunk->usage = INUSE_ALLOCATED;
823 /* Contiguous memory is needed, so set the type to AGP_PHYS_MEMORY */
825 chunk->pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
826 chunk->type = AGP_PHYS_MEMORY;
828 /* Create the GTT page list for this contiguous memory block */
829 chunk->gtt_mem = gmm_contig_page_list(chunk->pages, phys_addr);
830 if (chunk->gtt_mem == NULL) {
831 printk(KERN_ERR "[EMGD] Cannot allocate gmm_chunk_t element");
832 EMGD_ERROR_EXIT("Returning %d", -IGD_ERROR_NOMEM);
833 return -IGD_ERROR_NOMEM;
836 /* Assign the specified memory block to this chunk */
838 chunk->page_addresses = NULL;
843 /* Now update the GTT so the display HW can access this memory */
844 emgd_gtt_insert(gmm_context->context, chunk->gtt_mem, chunk->offset);
846 /* Bind the gart memory to the offset */
849 /* For contiguous pages, physical is the address of the first allocated page */
850 if (chunk->gtt_mem->physical == 0x0) {
851 chunk->gtt_mem->physical = page_to_phys(chunk->gtt_mem->pages[0]);
854 /* Return the offset associated with this contiguous block */
855 *offset = chunk->offset;
862 * gmm_map_to_graphics(): Facilitates direct display of contiguous video input
863 * buffers by mapping the specified block into the "graphics aperture" via the
866 int gmm_map_to_graphics(unsigned long phys_addr,
868 unsigned long *offset)
874 if (phys_addr && size) {
875 ret = gmm_map_contig_buffer(&gmm_context, phys_addr, size,
879 printk(KERN_ERR "Invalid address (0x%lx) and/or size (0x%lx) !",
881 printk(KERN_ERR "EXIT Returning %d", -EINVAL);
890 * find gtt_offset and virtual address from ci_surface list according to the same v4l2_offset
893 static int gmm_map_ci(unsigned long *gtt_offset,
894 unsigned long ci_param, /* virtaddr or v4l2_offset */
895 unsigned long *virt_addr,
896 unsigned int map_method,
904 ret = gmm_map_to_graphics(virt_to_phys((unsigned long *)ci_param),size,gtt_offset);
908 for(i=0;i<MAX_CI_LIST_SIZE;i++){
910 if(!ci_surfaces[i].used){
912 ci_surfaces[i].used = 1;
913 ci_surfaces[i].virt = ci_param;
914 ci_surfaces[i].size = size;
915 ci_surfaces[i].gtt_offset = *gtt_offset;
916 *virt_addr = ci_param;
924 for(i=0;i<MAX_CI_LIST_SIZE;i++){
925 if(ci_surfaces[i].used && (ci_surfaces[i].v4l2_offset ==ci_param)){
927 *gtt_offset = ci_surfaces[i].gtt_offset;
928 *virt_addr = ci_surfaces[i].virt;
938 * gmm_unmap_contig_buffer(): Un-map a previously-allocated contiguous SDRAM
939 * memory block into graphics memory.
942 static int gmm_unmap_contig_buffer(gmm_context_t *gmm_context,
943 unsigned long offset,
950 /* Locate the specified chunk and mark it as unused */
951 chunk = gmm_context->head_chunk;
953 if ((chunk->usage == INUSE_ALLOCATED) && (chunk->size >= size) &&
954 (chunk->type == AGP_PHYS_MEMORY) &&
955 chunk->offset == offset) {
957 emgd_gtt_remove(gmm_context->context, chunk->gtt_mem, chunk->offset);
959 chunk->usage = FREE_ALLOCATED;
961 /* no need release the chunk from chunk list.
962 * Because need reuse the offset in gtt table of this chunk
964 if(chunk->gtt_mem !=NULL)
965 kfree(chunk->gtt_mem);
967 /* Free the array of page address, if applicable: */
968 if (chunk->page_addresses != NULL) {
969 EMGD_DEBUG("About to free chunk->page_addresses = 0x%p",
970 chunk->page_addresses);
971 OS_FREE(chunk->page_addresses);
978 printk(KERN_ERR "Buffer @ 0x%lx (size 0x%lu) not found !", offset, size);
979 printk(KERN_ERR "EXIT Returning %d", -EINVAL);
986 * gmm_unmap_from_graphics(): Disables direct display of DMA video input buffers
987 * by unmapping the specified block from the "graphics aperture" via the GTT.
989 int gmm_unmap_from_graphics(unsigned long offset, unsigned long size)
994 if (offset && size) {
995 /* Mark the GTT chunk as currently unused */
996 ret = gmm_unmap_contig_buffer(&gmm_context, offset, size);
998 printk(KERN_ERR "Invalid offset (0x%lx) and/or size (0x%lx) !",
1000 printk(KERN_ERR "EXIT Returning %d", -EINVAL);
1006 EXPORT_SYMBOL(gmm_unmap_from_graphics);
1010 * Maintain a very simple linear linked list of memory allocations. Try
1011 * to re-use freed blocks. No error checking is done and alignment is
1015 static int gmm_alloc_chunk_space(gmm_context_t *gmm_context,
1016 unsigned long *offset,
1019 unsigned long flags)
1022 struct drm_device *dev;
1025 EMGD_DEBUG("Parameters: size=%lu; phys=%lu", size, phys);
1026 EMGD_DEBUG(" flags=0x%08lx", flags);
1028 /* Check for a free chunk of sufficent size */
1029 chunk = gmm_context->head_chunk;
1031 if ((chunk->usage == FREE_ALLOCATED) && (chunk->size >= size) &&
1032 (chunk->type == (phys ? AGP_PHYS_MEMORY : AGP_NORMAL_MEMORY))) {
1033 chunk->usage = INUSE_ALLOCATED;
1034 *offset = chunk->offset;
1035 EMGD_DEBUG("Re-using old chunk with offset=0x%lx", chunk->offset);
1036 EMGD_DEBUG("EXIT Returning %d", 0);
1039 chunk = chunk->next;
1042 /* Allocate a new chunk */
1043 chunk = (gmm_chunk_t *)OS_ALLOC(sizeof(gmm_chunk_t));
1045 printk(KERN_ALERT "[EMGD] Cannot allocate gmm_chunk_t");
1046 EMGD_ERROR_EXIT("Returning %d", -IGD_ERROR_NOMEM);
1047 return -IGD_ERROR_NOMEM;
1049 OS_MEMSET(chunk, 0, sizeof(gmm_chunk_t));
1052 * First allocate the memory from the gart driver. If this failes,
1053 * don't bother allocating a new chunk.
1055 dev = (struct drm_device *)gmm_context->context->drm_dev;
1057 chunk->pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
1059 * If we need phyical contiguous memory, then we need to
1060 * set the type to AGP_PHYS_MEMORY, otherwise use AGP_NORMAL_MEMORY
1063 chunk->type = AGP_PHYS_MEMORY;
1064 EMGD_DEBUG("Allocate AGP_PHYS; size = 0x%08lx", chunk->size);
1066 chunk->type = AGP_NORMAL_MEMORY;
1067 EMGD_DEBUG("Allocate AGP_NORMAL; size = 0x%08lx", chunk->size);
1072 EMGD_ERROR_EXIT("drm device is NULL; Returning %d", -IGD_ERROR_NOMEM);
1073 return -IGD_ERROR_NOMEM;
1077 /* Allocate memory from the AGPGART */
1078 chunk->gtt_mem = emgd_alloc_pages(chunk->pages, chunk->type);
1079 if (!chunk->gtt_mem) {
1081 printk(KERN_ALERT "[EMGD] Failed to allocated AGP memory.\n");
1082 EMGD_DEBUG("gmm_alloc_chunk_space() returning %d", -IGD_ERROR_NOMEM);
1083 return -IGD_ERROR_NOMEM;
1087 chunk->usage = INUSE_ALLOCATED;
1089 chunk->page_addresses = NULL;
1092 * Get the next available offset that works for this allocation.
1093 * Currently this just uses the next linear offset available. No
1094 * attempt is made to keep track of or utilize gaps introduced
1095 * because of alignments.
1097 * Eventually, this should mainting different "heaps" of offsets
1098 * for different types of allocations. For example, display vs.
1101 * See igd_gmm.h for the different surface types supported. Below
1102 * are the ones of interest
1104 * #define IGD_SURFACE_RENDER 0x00000001
1105 * #define IGD_SURFACE_CURSOR 0x00000004
1106 * #define IGD_SURFACE_OVERLAY 0x00000008
1107 * #define IGD_SURFACE_DISPLAY 0x00000010
1108 * #define IGD_SURFACE_VIDEO 0x00000040
1109 * #define IGD_SURFACE_VIDEO_ENCODE 0x00000080
1110 * #define IGD_SURFACE_SYS_MEM 0x00008000
1111 * #define IGD_SURFACE_PHYS_PTR 0x00010000
1114 if (gmm_context->tail_chunk == NULL) {
1117 chunk->offset = gmm_context->tail_chunk->offset +
1118 gmm_context->tail_chunk->size;
1120 EMGD_DEBUG("- Before alignment: offset=0x%lx", chunk->offset);
1123 * Alignment varies depending on the type of surface being allocated.
1125 if (flags & IGD_SURFACE_DISPLAY) {
1127 chunk->offset = (chunk->offset + 0x3ffff) & ~0x3ffff;
1130 chunk->offset = (chunk->offset + 0x0fff) & ~0x0fff;
1133 EMGD_DEBUG("- After alignment: offset=0x%lx", chunk->offset);
1136 if (gmm_context->head_chunk == NULL) {
1137 gmm_context->head_chunk = chunk;
1139 gmm_context->tail_chunk->next = chunk;
1141 gmm_context->tail_chunk = chunk;
1143 /* Bind the gart memory to the offset */
1145 * This updates the GTT table with the actual allocated pages
1146 * so the display hardware can access the memory.
1148 * TODO: Add ability to use other MMU's depending on the
1149 * type of memory requested.
1151 emgd_gtt_insert(gmm_context->context, chunk->gtt_mem, chunk->offset);
1155 * Physical is only meaningfull for single page or contiguous pages.
1156 * It represents the physical address of the first allocated page.
1158 if (chunk->gtt_mem->physical == 0x0) {
1159 chunk->gtt_mem->physical = page_to_phys(chunk->gtt_mem->pages[0]);
1162 *offset = chunk->offset;
1164 EMGD_DEBUG("Allocated chunk @ 0x%lx (0x%lx)", chunk->offset,
1165 (unsigned long)chunk->gtt_mem->physical);
1172 * Imports a list of pages allocated by an external source (i.e., the PVR
1173 * services) into the GMM and maps the pages into the GTT. Note that
1174 * this function is as dumb as gmm_alloc_chunk_space about reusing
1175 * previous allocations that have been freed; it will happily use a large
1176 * hole in the GTT for a tiny allocation if it's the first hole it finds.
1178 * pagelist is a live page list; it should not be modified or freed by
1180 * gtt_offset is an output only; this is the offset of the beginning of
1181 * the first page from the start of the GTT. If the actual surface
1182 * data starts partway through a page, the caller may need to add an
1183 * addition offset to where the surface data starts.
1185 static int gmm_import_pages(void **pagelist,
1186 unsigned long *gtt_offset,
1187 unsigned long numpages)
1193 EMGD_DEBUG("Importing %lu pages into GTT\n", numpages);
1196 * Check for a free chunk of sufficent size that does not have allocated
1197 * pages attached to it (i.e., a chunk from a previous import region that's
1200 chunk = gmm_context.head_chunk;
1202 if ((chunk->usage == FREE_IMPORTED) && (chunk->pages >= numpages))
1204 chunk->usage = INUSE_ALLOCATED;
1205 EMGD_DEBUG("Re-using old chunk with offset=0x%lx", chunk->offset);
1208 chunk = chunk->next;
1211 /* Allocate a new chunk if we didn't find any that we could reuse */
1213 chunk = (gmm_chunk_t *)OS_ALLOC(sizeof(gmm_chunk_t));
1215 printk(KERN_ALERT "[EMGD] Cannot allocate gmm_chunk_t");
1216 EMGD_ERROR_EXIT("Returning %d", -IGD_ERROR_NOMEM);
1217 return -IGD_ERROR_NOMEM;
1219 OS_MEMSET(chunk, 0, sizeof(gmm_chunk_t));
1221 chunk->pages = numpages;
1222 chunk->size = numpages * PAGE_SIZE;
1225 /* Create a gmm_mem_buffer_t for the imported memory */
1226 chunk->gtt_mem = OS_ALLOC(sizeof(gmm_mem_buffer_t));
1227 if (chunk->gtt_mem == NULL) {
1229 return -IGD_ERROR_NOMEM;
1232 /* Stick this chunk after all other GTT chunks */
1233 if (gmm_context.tail_chunk == NULL) {
1234 /* First chunk ever! */
1235 gmm_context.head_chunk = chunk;
1238 chunk->offset = gmm_context.tail_chunk->offset +
1239 gmm_context.tail_chunk->size;
1240 gmm_context.tail_chunk->next = chunk;
1242 gmm_context.tail_chunk = chunk;
1245 * Since we're making this a displayable surface, we need to make sure
1246 * it's 256k-aligned.
1248 chunk->offset = (chunk->offset + 0x3ffff) & ~0x3ffff;
1250 EMGD_DEBUG("Setting up a new GMM chunk for imported pages");
1253 *gtt_offset = chunk->offset;
1255 chunk->usage = INUSE_IMPORTED;
1257 chunk->page_addresses = NULL;
1260 * Note that the underlying gmm_mem_buffer may have a smaller size and
1261 * number of pages if we're reusing a larger chunk than we really needed.
1263 chunk->gtt_mem->size = numpages * PAGE_SIZE;
1264 chunk->gtt_mem->pages = (struct page**)pagelist;
1265 chunk->gtt_mem->page_count = numpages;
1268 * These fields should never be needed since responsibility for actually
1269 * freeing these pages and the page list itself lies with the external
1270 * code that allocated the pages.
1272 chunk->type = AGP_NORMAL_MEMORY;
1273 chunk->gtt_mem->type = AGP_NORMAL_MEMORY;
1274 chunk->gtt_mem->vmalloc_flag = 0;
1277 * This updates the GTT table with the actual imported pages
1278 * so the display hardware can access the memory.
1280 emgd_gtt_insert(gmm_context.context, chunk->gtt_mem, chunk->offset);
1284 * Physical is only meaningfull for single page or contiguous pages.
1285 * It represents the physical address of the first allocated page.
1287 if (chunk->gtt_mem->physical == 0x0) {
1288 chunk->gtt_mem->physical = page_to_phys(chunk->gtt_mem->pages[0]);
1291 EMGD_DEBUG("Imported chunk @ 0x%lx (0x%lx)", chunk->offset,
1292 (unsigned long)chunk->gtt_mem->physical);
1298 static int gmm_get_page_list(unsigned long offset,
1299 unsigned long **pages,
1300 unsigned long *page_cnt)
1306 EMGD_DEBUG("Parameters: offset=0x%08lx", offset);
1307 EMGD_DEBUG(" pages=0x%p, *pages=0x%p", pages, *pages);
1308 chunk = gmm_get_chunk(gmm_context.context, offset);
1310 if (chunk == NULL) {
1311 printk(KERN_ERR"[EMGD] gmm_get_page_list: Failed to find chunk: "
1313 return -IGD_ERROR_NOMEM;
1316 *page_cnt = chunk->gtt_mem->page_count;
1318 /* Allocate an array of page addresses: */
1319 if (chunk->page_addresses == NULL) {
1320 chunk->page_addresses = OS_ALLOC(sizeof(unsigned long) * *page_cnt);
1321 EMGD_DEBUG("Allocated chunk->page_addresses = 0x%p",
1322 chunk->page_addresses);
1323 if (chunk->page_addresses == NULL) {
1324 printk(KERN_ERR "[EMGD] gmm_get_page_list: failed to allocate the "
1325 "array of page addresses for chunk: 0x%lx\n", offset);
1326 return -IGD_ERROR_NOMEM;
1329 EMGD_DEBUG("Re-using chunk->page_addresses = 0x%p",
1330 chunk->page_addresses);
1333 /* Populate the array with the starting addresses of the pages: */
1334 for (i = 0; i < *page_cnt; i++) {
1335 chunk->page_addresses[i] = page_to_phys(chunk->gtt_mem->pages[i]);
1338 *pages = chunk->page_addresses;
1340 EMGD_DEBUG("*pages=0x%p", *pages);
1341 EMGD_DEBUG("page_count=%lu", *page_cnt);
1346 struct emgd_ci_meminfo_t {
1347 unsigned long v4l2_offset;
1352 int emgd_map_ci_buf(struct emgd_ci_meminfo_t * ci_meminfo)
1355 unsigned long gtt_offset;
1357 ret = gmm_map_to_graphics(virt_to_phys((unsigned long *)ci_meminfo->virt), ci_meminfo->size, >t_offset);
1360 return ret;/*error handling*/
1362 /* save meminfo into our context */
1363 for(i=0;i<MAX_CI_LIST_SIZE;i++){
1364 if(!ci_surfaces[i].used){
1365 ci_surfaces[i].used = 1;
1366 ci_surfaces[i].virt = virt_to_phys((unsigned long *)ci_meminfo->virt);
1367 ci_surfaces[i].size = ci_meminfo->size;
1368 ci_surfaces[i].gtt_offset = gtt_offset;
1374 EXPORT_SYMBOL(emgd_map_ci_buf);
1375 int emgd_unmap_ci_buf(unsigned long virt_addr)
1379 for(i=0;i<MAX_CI_LIST_SIZE;i++)
1381 if(ci_surfaces[i].used && (ci_surfaces[i].virt == virt_addr))
1383 ret = gmm_unmap_from_graphics(ci_surfaces[i].gtt_offset, ci_surfaces[i].size);
1384 ci_surfaces[i].used = 0;
1385 ci_surfaces[i].gtt_offset = 0;
1389 printk(KERN_ERR"[gmm]ci unmap failed\n");
1393 EXPORT_SYMBOL(emgd_unmap_ci_buf);
1396 static int gmm_unmap_ci(unsigned long virt_addr)
1399 ret =emgd_unmap_ci_buf(virt_addr);