packaging: update the changelog
[profile/ivi/intel-emgd-kmod.git] / drivers / emgd / gmm / gmm.c
1 /*
2  *-----------------------------------------------------------------------------
3  * Filename: gmm.c
4  * $Revision: 1.53 $
5  *-----------------------------------------------------------------------------
6  * Copyright (c) 2002-2010, Intel Corporation.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a copy
9  * of this software and associated documentation files (the "Software"), to deal
10  * in the Software without restriction, including without limitation the rights
11  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12  * copies of the Software, and to permit persons to whom the Software is
13  * furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24  * THE SOFTWARE.
25  *
26  *-----------------------------------------------------------------------------
27  * Description:
28  *  Very basic video memory managment functions required by HAL.
29  *-----------------------------------------------------------------------------
30  */
31
32 #define MODULE_NAME hal.gmm
33
34 #include <igd_debug.h>
35 #include <drmP.h>
36 #include <memlist.h>
37 #include <io.h>
38 #include <memory.h>
39
40 #include <linux/module.h>
41 #include <linux/init.h>
42
43 #include <asm/agp.h>
44
45 #define AGP_PHYS_MEMORY 2 /* Physical contigous memory */
46 struct emgd_ci_surface_t{
47         unsigned int used;
48         unsigned int v4l2_offset;
49         unsigned int virt;
50         unsigned long size;
51         unsigned long gtt_offset;
52         };
53 #define MAX_CI_LIST_SIZE 14
54 struct emgd_ci_surface_t ci_surfaces[MAX_CI_LIST_SIZE];
55
56
57 gmm_context_t gmm_context;
58
59 gmm_chunk_t *gmm_get_chunk(igd_context_t *context, unsigned long offset);
60 static int gmm_flush_cache(void);
61 static int gmm_alloc_linear_surface(unsigned long *offset,
62                 unsigned long pixel_format,
63                 unsigned int *width,
64                 unsigned int *height,
65                 unsigned int *pitch,
66                 unsigned long *size,
67                 unsigned long type,
68                 unsigned long flags,
69                 unsigned long phys);
70
71 static int gmm_alloc_chunk_space(gmm_context_t *gmm_context,
72                 unsigned long *offset,
73                 unsigned long size,
74                 unsigned long phys,
75                 unsigned long flags);
76
77 static int gmm_import_pages(void **pagelist,
78                 unsigned long *gtt_offset,
79                 unsigned long numpages);
80
81 static int gmm_get_page_list(unsigned long offset,
82                 unsigned long **pages,
83                 unsigned long *page_cnt);
84
85 gmm_mem_buffer_t *emgd_alloc_pages(unsigned long num_pages, int type);
86 void emgd_free_pages(gmm_mem_buffer_t *mem);
87 void emgd_gtt_remove(igd_context_t *context, gmm_mem_buffer_t *mem,
88                 unsigned long offset);
89 void emgd_gtt_insert(igd_context_t *context, gmm_mem_buffer_t *mem,
90                 unsigned long offset);
91
92
93 static int gmm_map_ci(unsigned long *gtt_offset,
94                         unsigned long ci_param,
95                         unsigned long *virt_addr,
96                         unsigned int map_method,
97                         unsigned long size);
98
99
100 static int gmm_unmap_ci(unsigned long virt_addr);
101
102 static void gmm_free(unsigned long offset)
103 {
104         gmm_chunk_t *chunk;
105
106         EMGD_DEBUG("Enter gmm_free(0x%lx)", offset);
107
108         /* Walk the chunk list */
109         chunk = gmm_context.head_chunk;
110         while (chunk) {
111                 if (chunk->offset == offset) {
112                         switch (chunk->usage) {
113                         case FREE_ALLOCATED:
114                                 EMGD_DEBUG("WARNING: The chunk 0x%lx is already freed", offset);
115                                 break;
116                         case INUSE_IMPORTED:
117                         case FREE_IMPORTED:
118                                 EMGD_DEBUG("WARNING: The chunk 0x%lx was allocated externally", offset);
119                                 return;
120                         case INUSE_ALLOCATED:
121                                 EMGD_DEBUG("Freeing the chunk 0x%lx", offset);
122                                 break;
123                         default:
124                                 EMGD_DEBUG("Unknown usage %d for chunk 0x%lx.  Memory manager corrupt?",
125                                         chunk->usage, offset);
126                                 break;
127                         }
128
129                         /*
130                          * What to do if the ref count is > 0?  Unmapping is
131                          * probably the right thing since nothing should try
132                          * to use this. If something does, it should probably
133                          * fail.
134                          */
135                         if (chunk->ref_cnt > 0 && chunk->addr) {
136                                 EMGD_DEBUG("WARNING: The chunk 0x%lx is mapped", offset);
137                                 /* chunk->addr will be freed by gmm_shutdown */
138                         }
139
140
141                         /* Free the array of page address, if applicable: */
142                         if (chunk->page_addresses != NULL) {
143                                 EMGD_DEBUG("About to free chunk->page_addresses = 0x%p",
144                                         chunk->page_addresses);
145                                 OS_FREE(chunk->page_addresses);
146                                 chunk->page_addresses = NULL;
147                         }
148
149                         chunk->usage = FREE_ALLOCATED;  /* mark as free */
150                         return;
151                 }
152                 chunk = chunk->next;
153         }
154
155         EMGD_ERROR("gmm_free() did not find the chunk 0x%lx to free", offset);
156         return;
157 }
158
159 static void gmm_release_import(unsigned long offset)
160 {
161         gmm_chunk_t *chunk;
162
163         EMGD_DEBUG("Enter gmm_release_import(0x%lx)", offset);
164
165         /* Walk the chunk list */
166         chunk = gmm_context.head_chunk;
167         while (chunk) {
168                 if (chunk->offset == offset) {
169                         switch (chunk->usage) {
170                         case FREE_ALLOCATED:
171                         case INUSE_ALLOCATED:
172                                 EMGD_DEBUG("WARNING: The chunk 0x%lx was not an imported chunk", offset);
173                                 break;
174                         case INUSE_IMPORTED:
175                                 EMGD_DEBUG("Releasing the chunk 0x%lx", offset);
176                                 break;
177                         case FREE_IMPORTED:
178                                 EMGD_DEBUG("WARNING: The chunk 0x%lx has already been released", offset);
179                                 return;
180                         default:
181                                 EMGD_DEBUG("Unknown usage %d for chunk 0x%lx.  Memory manager corrupt?",
182                                         chunk->usage, offset);
183                                 break;
184                         }
185
186                         /*
187                          * What to do if the ref count is > 0?  Unmapping is
188                          * probably the right thing since nothing should try
189                          * to use this. If something does, it should probably
190                          * fail.
191                          */
192                         if (chunk->ref_cnt > 0) {
193                                 EMGD_DEBUG("WARNING: The chunk 0x%lx is mapped", offset);
194                                 chunk->ref_cnt = 0;
195                                 vunmap(chunk->addr);
196                                 chunk->addr = NULL;
197                         }
198                         /* Free the array of page address, if applicable: */
199                         if (chunk->page_addresses != NULL) {
200                                 EMGD_DEBUG("About to free chunk->page_addresses = 0x%p",
201                                         chunk->page_addresses);
202                                 OS_FREE(chunk->page_addresses);
203                                 chunk->page_addresses = NULL;
204                         }
205
206                         /* Zero out the gmm_mem_buffer_t */
207                         OS_MEMSET(chunk->gtt_mem, 0, sizeof(gmm_mem_buffer_t));
208
209                         /* Mark address space as free */
210                         chunk->usage = FREE_IMPORTED;
211                         return;
212                 }
213                 chunk = chunk->next;
214         }
215
216         EMGD_ERROR("gmm_free() did not find the chunk 0x%lx to free", offset);
217         return;
218 }
219
220 static int gmm_alloc_region(unsigned long *offset,
221         unsigned long *size,
222         unsigned int type,
223         unsigned long flags)
224 {
225         int ret;
226         unsigned long aligned_size;
227         unsigned long phys = 0;
228
229         EMGD_TRACE_ENTER;
230         EMGD_DEBUG("Parameters: size=%lu, type=%d, flags=0x%lx", *size, type, flags);
231
232         *offset = 0;
233
234         switch(type) {
235         case IGD_GMM_REGION_TYPE_OVLREG:
236                 flags |= IGD_GMM_REGION_ALIGN_MMAP;
237                 phys = 1;
238                 break;
239         case IGD_GMM_REGION_TYPE_OVLREG64:
240                 flags |= IGD_GMM_REGION_ALIGN_64K;
241                 phys = 1;
242                 break;
243         case IGD_GMM_REGION_TYPE_HWSTATUS:
244                 flags |= IGD_GMM_REGION_ALIGN_MMAP;
245                 *size = 4096;
246                 phys = 1;
247                 break;
248         case IGD_GMM_REGION_TYPE_DMA:
249                 flags |= IGD_GMM_REGION_ALIGN_MMAP;
250                 break;
251         case IGD_GMM_REGION_TYPE_PERSISTENT:
252                 flags |= IGD_GMM_REGION_ALIGN_MMAP;
253                 break;
254         case IGD_GMM_REGION_TYPE_BPL:
255                 flags |= IGD_GMM_REGION_ALIGN_MMAP;
256                 phys = 1;
257                 break;
258         case IGD_GMM_REGION_TYPE_CONTEXT:
259                 flags |= IGD_GMM_REGION_ALIGN_CONTEXT | IGD_GMM_REGION_ALIGN_MMAP;
260                 *size = 4096;
261                 phys = 1;
262                 break;
263         default:
264                 EMGD_ERROR_EXIT("Invalid Region type requested: 0x%8.8x", type);
265                 return -IGD_INVAL;
266         }
267
268         aligned_size = (*size + 4095) & ~4095;
269         EMGD_DEBUG("aligned_size=%lu", aligned_size);
270
271         do {
272                 ret = gmm_alloc_chunk_space(&gmm_context, offset, aligned_size, phys,
273                                 flags);
274         } while ((ret == -IGD_ERROR_NOMEM) && gmm_flush_cache());
275
276         EMGD_DEBUG("EXIT  Returning %d", ret);
277         return ret;
278 }
279
280 static int gmm_get_num_surface(unsigned long *count)
281 {
282         gmm_chunk_t *chunk;
283
284         EMGD_TRACE_ENTER;
285
286         /* Walk the chunk list */
287         chunk = gmm_context.head_chunk;
288         *count = 0;
289         while (chunk) {
290                 (*count)++;
291                 chunk = chunk->next;
292         }
293
294         EMGD_TRACE_EXIT;
295         return 0;
296 }
297
298 static int gmm_get_surface_list(unsigned long allocated_size,
299         unsigned long *list_size,
300         igd_surface_list_t **surface_list)
301 {
302         gmm_chunk_t *chunk;
303         igd_surface_list_t *tmp_list;
304
305         EMGD_TRACE_ENTER;
306         gmm_get_num_surface(list_size);
307
308         if (*list_size > 0){
309                 *surface_list = vmalloc(*list_size * sizeof(igd_surface_list_t));
310
311                 /* Walk the chunk list */
312                 chunk = gmm_context.head_chunk;
313                 tmp_list = *surface_list;
314
315                 while (chunk){
316                         tmp_list->offset = chunk->offset;
317                         tmp_list->size = chunk->size;
318
319                         chunk = chunk->next;
320                         tmp_list++;
321                 }
322         }
323         EMGD_TRACE_EXIT;
324         return 0;
325 }
326
327 static int gmm_alloc_surface(unsigned long *offset,
328         unsigned long pixel_format,
329         unsigned int *width,
330         unsigned int *height,
331         unsigned int *pitch,
332         unsigned long *size,
333         unsigned int type,
334         unsigned long *flags)
335 {
336         int ret;
337         unsigned long phys;
338
339         EMGD_TRACE_ENTER;
340         EMGD_DEBUG("Parameters: pixel_format=0x%08lx,", pixel_format);
341         EMGD_DEBUG("  width=%u, height=%u", *width, *height);
342         EMGD_DEBUG("  pitch=%u, type=%d, flags=0x%08lx", *pitch, type, *flags);
343
344         *offset = 0;
345         *size = 0;
346         if (! (*flags & IGD_MIN_PITCH)) {
347                 *pitch = 0;
348         }
349
350         if (*flags & IGD_SURFACE_CURSOR) {
351                 phys = 1;
352         } else {
353                 phys = 0;
354         }
355
356         ret = gmm_alloc_linear_surface(offset, pixel_format, width, height, pitch,
357                         size, type, *flags, phys);
358
359         EMGD_DEBUG("EXIT  Returning %d", ret);
360         return ret;
361 }
362
363
364 /*
365  * Given an offset, find the chunk and return the physical address.
366  */
367 static int gmm_virt_to_phys(unsigned long offset,
368         unsigned long *physical)
369 {
370         gmm_chunk_t *chunk;
371
372         EMGD_TRACE_ENTER;
373         EMGD_DEBUG("Looking for offset=0x%lx", offset);
374
375         /* Walk the chunk list */
376         chunk = gmm_context.head_chunk;
377         while (chunk) {
378                 if (chunk->offset == offset) {
379                         *physical = chunk->gtt_mem->physical;
380                         EMGD_DEBUG("Physical address = 0x%08lx", *physical);
381                         EMGD_TRACE_EXIT;
382                         return 0;
383                 }
384                 chunk = chunk->next;
385         }
386
387         /* offset not found */
388         EMGD_ERROR_EXIT("Did not find offset (0x%lx); returning %d",
389                 offset, -IGD_ERROR_NOMEM);
390         return -IGD_ERROR_NOMEM;
391 }
392
393
394 static int gmm_flush_cache(void)
395 {
396         EMGD_DEBUG("Enter gmm_flush_cache(), which is stubbed");
397         return 0;
398 }
399
400 static void gmm_save(igd_context_t *context, void **state)
401 {
402         EMGD_DEBUG("Enter gmm_save(), which is stubbed");
403         return;
404 }
405
406 static void gmm_restore(igd_context_t *context, void *state)
407 {
408         EMGD_DEBUG("Enter gmm_restore(), which is stubbed");
409         return;
410 }
411
412 /*
413  * Create a virtual address mapping for a block of video memory.
414  */
415 static void *gmm_map(unsigned long offset)
416 {
417         gmm_chunk_t *chunk;
418         struct page **page_map;
419         int i;
420         void *addr = NULL;
421         unsigned long num_pages;
422
423         EMGD_TRACE_ENTER;
424         EMGD_DEBUG("Parameter: offset=0x%lx", offset);
425
426         chunk = gmm_get_chunk(gmm_context.context, offset);
427
428         if (chunk == NULL) {
429                 printk(KERN_ERR"[EMGD] gmm_map: Failed to find chunk: 0x%lx\n", offset);
430                 return NULL;
431         }
432
433         /*
434          * Check if this as been mapped already and return that map instead
435          * of remapping it.
436          */
437         chunk->ref_cnt++;
438         if (chunk->addr) {
439                 EMGD_DEBUG("This chunk is already mapped!");
440                 return chunk->addr;
441         }
442
443         /*
444          * Read the physical addresses of the allocation from the GTT
445          * and convert that to a page list.
446          */
447
448         num_pages = chunk->gtt_mem->page_count;
449         page_map = vmalloc(num_pages * sizeof(struct page *));
450         if (page_map == NULL) {
451                 printk(KERN_ERR"[EMGD] gmm_map: vmalloc failed.\n");
452                 return NULL;
453         }
454
455         for (i = 0; i < num_pages; i++) {
456                 page_map[i] = chunk->gtt_mem->pages[i];
457         }
458
459         addr = vmap(page_map, num_pages, VM_MAP, PAGE_KERNEL_UC_MINUS);
460
461         vfree(page_map);
462         chunk->addr = addr;
463
464         EMGD_DEBUG("Mapped address = 0x%p", addr);
465         EMGD_TRACE_EXIT;
466
467         return addr;
468 }
469
470
471 static void gmm_unmap(void *addr)
472 {
473         gmm_chunk_t *chunk;
474
475         EMGD_TRACE_ENTER;
476         EMGD_DEBUG("Parameter: addr=0x%p", addr);
477
478         /* Look up the chunk that was mapped to this address */
479         chunk = gmm_context.head_chunk;
480         while (chunk) {
481                 if (chunk->addr == addr) {
482                         EMGD_DEBUG("The chunk with addr=0x%p has the offset = 0x%08lx", addr,
483                                 chunk->offset);
484                         chunk->ref_cnt--;
485                         if (chunk->ref_cnt == 0) {
486                                 EMGD_DEBUG("About to call vunmap(0x%p)", addr);
487                                 vunmap(addr);
488                                 chunk->addr = NULL;
489                         }
490                         return;
491                 }
492                 chunk = chunk->next;
493         }
494
495         EMGD_TRACE_EXIT;
496 }
497
498 int gmm_init(igd_context_t *context,
499         unsigned long scratch_mem,
500         unsigned long max_fb_mem)
501 {
502         EMGD_TRACE_ENTER;
503         EMGD_DEBUG("Parameters: scratch_mem=0x%lx, max_fb_mem=%lu",
504                 scratch_mem, max_fb_mem);
505
506         context->dispatch.gmm_alloc_surface = gmm_alloc_surface;
507         context->dispatch.gmm_alloc_region = gmm_alloc_region;
508         context->dispatch.gmm_import_pages = gmm_import_pages;
509         context->dispatch.gmm_virt_to_phys = gmm_virt_to_phys;
510         context->dispatch.gmm_free = gmm_free;
511         context->dispatch.gmm_release_import = gmm_release_import;
512         context->dispatch.gmm_memstat = NULL;
513         context->dispatch.gmm_alloc_cached = NULL;
514         context->dispatch.gmm_free_cached = NULL;
515         context->dispatch.gmm_alloc_cached_region = NULL;
516         context->dispatch.gmm_free_cached_region = NULL;
517         context->dispatch.gmm_flush_cache = gmm_flush_cache;
518         context->dispatch.gmm_alloc_reservation = NULL;
519         context->dispatch.gmm_alloc_heap = NULL;
520         context->dispatch.gmm_alloc_heap_block = NULL;
521         context->dispatch.gmm_free_heap_block = NULL;
522         context->dispatch.gmm_get_heap_from_block = NULL;
523         context->dispatch.gmm_get_pvtheap_size = NULL;
524         context->dispatch.gmm_get_cache_mem = NULL;
525         context->dispatch.gmm_alloc_persistent_region = NULL;
526         context->dispatch.gmm_free_persistent_region = NULL;
527         context->dispatch.gmm_map = gmm_map;
528         context->dispatch.gmm_unmap = gmm_unmap;
529         context->dispatch.gmm_get_page_list = gmm_get_page_list;
530         context->dispatch.gmm_get_num_surface = gmm_get_num_surface;
531         context->dispatch.gmm_get_surface_list = gmm_get_surface_list;
532         context->dispatch.gmm_map_ci = gmm_map_ci;
533         context->dispatch.gmm_unmap_ci = gmm_unmap_ci;
534
535         context->mod_dispatch.gmm_save = gmm_save;
536         context->mod_dispatch.gmm_restore = gmm_restore;
537
538         gmm_context.context = context;
539         gmm_context.head_chunk = NULL;
540         gmm_context.tail_chunk = NULL;
541
542         /* Reserve memory for framebuffer ??? */
543
544         EMGD_DEBUG("EXIT  Returning %d", 0);
545         return 0;
546 }
547
548
549 void gmm_shutdown(igd_context_t *context)
550 {
551         gmm_chunk_t *chunk, *del;
552         struct drm_device *dev;
553
554         EMGD_TRACE_ENTER;
555
556         dev = (struct drm_device *)context->drm_dev;
557
558         /* Walk the chunk list */
559         chunk = gmm_context.head_chunk;
560         while (chunk) {
561                 EMGD_DEBUG("process chunk at 0x%lx", chunk->offset);
562                 if (chunk->usage == INUSE_ALLOCATED || chunk->usage == INUSE_IMPORTED) {
563                         EMGD_ERROR("Chunk at 0x%lx not properly freed", chunk->offset);
564                 }
565
566                 if (chunk->addr != NULL) {
567                         vunmap(chunk->addr);
568                 }
569
570                 if (chunk->bound) {
571                         emgd_gtt_remove(context, chunk->gtt_mem, chunk->offset);
572                 }
573
574                 if (chunk->usage == INUSE_ALLOCATED || chunk->usage == FREE_ALLOCATED) {
575                         emgd_free_pages(chunk->gtt_mem);
576                 }
577
578                 /* Free the array of page address, if applicable: */
579                 if (chunk->page_addresses != NULL) {
580                         EMGD_DEBUG("About to free chunk->page_addresses = 0x%p",
581                                 chunk->page_addresses);
582                         OS_FREE(chunk->page_addresses);
583                 }
584
585                 /* Free the chunk */
586                 del = chunk;
587                 chunk = chunk->next;
588                 OS_FREE(del);
589         }
590
591         EMGD_TRACE_EXIT;
592         return;
593 }
594
595 gmm_chunk_t *gmm_get_chunk(igd_context_t *context, unsigned long offset)
596 {
597         gmm_chunk_t *chunk;
598
599         chunk = gmm_context.head_chunk;
600         while (chunk) {
601                 if (chunk->offset == offset) {
602                         return chunk;
603                 }
604                 chunk = chunk->next;
605         }
606
607         printk(KERN_ERR "[EMGD] gmm_get_chunk: Failed to find chunk 0x%lx\n",
608                 offset);
609         return NULL;
610 }
611
612
613
614 static int gmm_alloc_linear_surface(unsigned long *offset,
615         unsigned long pixel_format,
616         unsigned int *width,
617         unsigned int *height,
618         unsigned int *pitch,
619         unsigned long *size,
620         unsigned long type,
621         unsigned long flags,
622         unsigned long phys)
623 {
624         int ret;
625         unsigned long align;
626         unsigned long min_pitch;
627
628         EMGD_TRACE_ENTER;
629         EMGD_DEBUG("Parameters: pixel_format=0x%08lx,", pixel_format);
630         EMGD_DEBUG("  width=%u, height=%u", *width, *height);
631         EMGD_DEBUG("  pitch=%u, size=%lu, type=%lu", *pitch, *size, type);
632         EMGD_DEBUG("  flags=0x%08lx; phys=%lu", flags, phys);
633
634         /* Validate surface */
635         if (! *width) {
636                 *width = 1;
637         }
638
639         if (! *height) {
640                 *height = 1;
641         }
642
643         /* Set the minimum surface pitch */
644         min_pitch = (IGD_PF_DEPTH(pixel_format) * *width) >> 3;
645         if (min_pitch < *pitch) {
646                 min_pitch = *pitch;
647         }
648
649         /* Pitch for both PLB and TNC requires 64-byte alignment */
650         min_pitch = ALIGN(min_pitch, 64);
651
652         /*
653          * Size should be based on pixel format and pitch, not just pitch.
654          * For YUV surfaces, it is smaller than RGB surfaces.
655          */
656         switch (IGD_PF_TYPE(pixel_format)) {
657         case PF_TYPE_YUV_PLANAR:
658                 *size = min_pitch * (*height + (*height>>1));
659                 break;
660         case PF_TYPE_YUV_PACKED:
661                 /* FIXME: What should this really be? */
662                 *size = min_pitch * *height;
663                 break;
664         default:
665                 *size = min_pitch * *height;
666                 break;
667         }
668
669         *pitch = min_pitch;
670
671         /* Page align size */
672         align = (*size + 4095) & ~4095;
673
674         /*
675          * Flags provide information on the type of surface being requested
676          *   0x04 = cursor surface
677          *   0x08 = overlay surface
678          *   0x10 = display surface
679          *   0x40 = video surface
680          */
681         do {
682                 ret = gmm_alloc_chunk_space(&gmm_context, offset, *size, phys, flags);
683         } while ((ret == -IGD_ERROR_NOMEM) && gmm_flush_cache());
684
685         EMGD_DEBUG("EXIT  Returning %d", ret);
686         return ret;
687 }
688
689
690
691
692 /*
693  * gmm_contig_page_list(): Create the page list for a previously-allocated
694  * block of contiguous memory. (This is needed for GTT insertion, and normally
695  * created by the emgd_alloc_pages() function.)
696  */
697 static gmm_mem_buffer_t *gmm_contig_page_list(unsigned long num_pages,
698                         unsigned long phys_addr)
699 {
700         gmm_mem_buffer_t *mem;
701         size_t list_size;
702         int i;
703         void *virt_addr = phys_to_virt(phys_addr);
704
705         mem = (gmm_mem_buffer_t *)kzalloc(sizeof(gmm_mem_buffer_t), GFP_KERNEL);
706         if (mem == NULL) {
707                 printk(KERN_ERR "[EMGD] Cannot allocate gmm_mem_buffer_t ");
708                 EMGD_ERROR_EXIT("Returning NULL\n");
709                 return NULL;
710         }
711
712         /* First allocate page array */
713         list_size = num_pages * sizeof(struct page *);
714         mem->vmalloc_flag = false;
715
716         if (list_size <= (2 * PAGE_SIZE)) {
717                 mem->pages = kmalloc(list_size, GFP_KERNEL | __GFP_NORETRY);
718         }
719
720         if (mem->pages == NULL) {
721                 mem->pages = vmalloc(list_size);
722                 mem->vmalloc_flag = true;
723         }
724
725         if (mem->pages == NULL) {
726                 kfree(mem);
727                 printk(KERN_ERR "Failed to allocate memory info struct.\n");
728                 EMGD_ERROR_EXIT("Returning NULL\n");
729                 return NULL;
730         }
731
732         mem->pages[0] = virt_to_page(virt_addr);
733         if (num_pages > 1) {
734                 for (i = 1; i < num_pages; i++) {
735                         mem->pages[i] = mem->pages[i-1] + 1;
736                 }
737         }
738         mem->physical = page_to_phys(mem->pages[0]);
739         mem->page_count = num_pages;
740
741         return mem;
742 }
743
744 /*
745  * gmm_map_contig_buffer(): Map a previously-allocated contiguous SDRAM memory
746  * block into a graphics-accessible memory.
747  */
748
749 static int gmm_map_contig_buffer(gmm_context_t *gmm_context,
750                 unsigned long phys_addr,
751                 unsigned long size,
752                 unsigned long *offset)
753 {
754         gmm_chunk_t *chunk;
755
756         EMGD_TRACE_ENTER;
757
758
759         /* Check for a free contiguous chunk of sufficent size */
760         chunk = gmm_context->head_chunk;
761         while (chunk) {
762                 if ((chunk->usage== FREE_ALLOCATED) && (chunk->size >= size)
763                 && (chunk->type == AGP_PHYS_MEMORY)) {
764                         /* Re-use this chunk */
765                         chunk->usage = INUSE_ALLOCATED;
766                         EMGD_DEBUG("Re-using old chunk with offset=0x%lx",
767                                 chunk->offset);
768                         EMGD_DEBUG("EXIT  Returning %d", 0);
769                         break;
770                 }
771                 chunk = chunk->next;
772         }
773         /* Allocate a new chunk list element */
774         if(chunk == NULL){
775                 chunk = (gmm_chunk_t *)OS_ALLOC(sizeof(gmm_chunk_t));
776                 if (!chunk) {
777                         printk(KERN_ERR "[EMGD] Cannot allocate gmm_chunk_t element");
778                         EMGD_ERROR_EXIT("Returning %d", -IGD_ERROR_NOMEM);
779                         return -IGD_ERROR_NOMEM;
780                 }
781
782                 /*if want to reuse the chunk, no need memset it here.
783                   * we need the previous gtt offset of chunk.
784                   */
785                 OS_MEMSET(chunk, 0, sizeof(gmm_chunk_t));
786
787                 chunk->size = size;
788
789                 /* Determine the offset value for this chunk
790                   * if this chunk is new allocated, then we set tailk_chunk->offset+size to chunk->offset
791                   * if this chunk is reused, then no need to assign the chunk->offset again.
792                   * also no need to insert this chunk into list again, because this chunk is already in the list.
793                   */
794
795
796                 if (gmm_context->tail_chunk == NULL) {
797
798                         chunk->offset = 0;
799
800                 } else {
801                         chunk->offset = gmm_context->tail_chunk->offset +
802                                 gmm_context->tail_chunk->size;
803
804
805                 }
806                 /* Adjust the offset since display surfaces require 256KB alignment */
807                 chunk->offset = (chunk->offset + 0x3ffff) & ~0x3ffff;
808
809                 /* Insert this chunk in the list */
810                 chunk->next = NULL;
811                 if (gmm_context->head_chunk == NULL) {
812
813                         gmm_context->head_chunk = chunk;
814                 } else {
815                         gmm_context->tail_chunk->next = chunk;
816                 }
817                 gmm_context->tail_chunk = chunk;
818                 chunk->usage = INUSE_ALLOCATED;
819         }
820
821
822
823         /* Contiguous memory is needed, so set the type to AGP_PHYS_MEMORY */
824
825         chunk->pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
826         chunk->type = AGP_PHYS_MEMORY;
827
828         /* Create the GTT page list for this contiguous memory block */
829         chunk->gtt_mem = gmm_contig_page_list(chunk->pages, phys_addr);
830         if (chunk->gtt_mem == NULL) {
831                 printk(KERN_ERR "[EMGD] Cannot allocate gmm_chunk_t element");
832                 EMGD_ERROR_EXIT("Returning %d", -IGD_ERROR_NOMEM);
833                 return -IGD_ERROR_NOMEM;
834         }
835
836         /* Assign the specified memory block to this chunk */
837         chunk->ref_cnt = 0;
838         chunk->page_addresses = NULL;
839
840
841
842
843         /* Now update the GTT so the display HW can access this memory */
844         emgd_gtt_insert(gmm_context->context, chunk->gtt_mem, chunk->offset);
845
846         /* Bind the gart memory to the offset */
847         chunk->bound = 1;
848
849         /* For contiguous pages, physical is the address of the first allocated page */
850         if (chunk->gtt_mem->physical == 0x0) {
851                 chunk->gtt_mem->physical = page_to_phys(chunk->gtt_mem->pages[0]);
852         }
853
854         /* Return the offset associated with this contiguous block */
855         *offset = chunk->offset;
856
857         EMGD_TRACE_EXIT;
858         return 0;
859 }
860
861 /*
862  * gmm_map_to_graphics(): Facilitates direct display of contiguous video input
863  * buffers by mapping the specified block into the "graphics aperture" via the
864  * GTT.
865  */
866 int gmm_map_to_graphics(unsigned long phys_addr,
867         unsigned long size,
868         unsigned long *offset)
869 {
870         int ret;
871
872         EMGD_TRACE_ENTER;
873
874         if (phys_addr && size) {
875                 ret = gmm_map_contig_buffer(&gmm_context, phys_addr, size,
876                         offset);
877
878         } else {
879                 printk(KERN_ERR "Invalid address (0x%lx) and/or size (0x%lx) !",
880                         phys_addr, size);
881                 printk(KERN_ERR "EXIT  Returning %d", -EINVAL);
882                 ret = -EINVAL;
883         }
884
885         EMGD_TRACE_EXIT;
886         return ret;
887 }
888
889 /*
890  * find gtt_offset and virtual address from ci_surface list according to the same v4l2_offset
891  */
892
893 static int gmm_map_ci(unsigned long *gtt_offset,
894                         unsigned long ci_param, /* virtaddr or v4l2_offset */
895                         unsigned long *virt_addr,
896                         unsigned int map_method,
897                         unsigned long size)
898
899 {
900         unsigned char i;
901         int ret;
902
903         if(map_method){
904                 ret = gmm_map_to_graphics(virt_to_phys((unsigned long *)ci_param),size,gtt_offset);
905                 if(ret)
906                         return ret;
907                 else{
908                         for(i=0;i<MAX_CI_LIST_SIZE;i++){
909
910                                 if(!ci_surfaces[i].used){
911
912                                         ci_surfaces[i].used = 1;
913                                         ci_surfaces[i].virt = ci_param;
914                                         ci_surfaces[i].size = size;
915                                         ci_surfaces[i].gtt_offset = *gtt_offset;
916                                         *virt_addr = ci_param;
917                                         break;
918                                 }
919                         }
920                 }
921         }
922         else{
923
924                 for(i=0;i<MAX_CI_LIST_SIZE;i++){
925                         if(ci_surfaces[i].used && (ci_surfaces[i].v4l2_offset ==ci_param)){
926
927                                 *gtt_offset = ci_surfaces[i].gtt_offset;
928                                 *virt_addr = ci_surfaces[i].virt;
929                                 break;
930                         }
931                 }
932         }
933         return 0;
934 }
935
936
937 /*
938  * gmm_unmap_contig_buffer(): Un-map a previously-allocated contiguous SDRAM
939  * memory block into graphics memory.
940  */
941
942 static int gmm_unmap_contig_buffer(gmm_context_t *gmm_context,
943                 unsigned long offset,
944                 unsigned long size)
945 {
946
947         gmm_chunk_t *chunk;
948         EMGD_TRACE_ENTER;
949
950         /* Locate the specified chunk and mark it as unused */
951         chunk = gmm_context->head_chunk;
952         while (chunk) {
953                 if ((chunk->usage == INUSE_ALLOCATED) && (chunk->size >= size) &&
954                         (chunk->type == AGP_PHYS_MEMORY) &&
955                         chunk->offset == offset) {
956
957                         emgd_gtt_remove(gmm_context->context, chunk->gtt_mem, chunk->offset);
958
959                         chunk->usage = FREE_ALLOCATED;
960
961                         /* no need release the chunk from chunk list.
962                           * Because need reuse the offset in gtt table of this chunk
963                           */
964                         if(chunk->gtt_mem !=NULL)
965                                 kfree(chunk->gtt_mem);
966
967                         /* Free the array of page address, if applicable: */
968                         if (chunk->page_addresses != NULL) {
969                                 EMGD_DEBUG("About to free chunk->page_addresses = 0x%p",
970                                         chunk->page_addresses);
971                                 OS_FREE(chunk->page_addresses);
972                         }
973
974                         return 0;
975                 }
976                 chunk = chunk->next;
977         }
978         printk(KERN_ERR "Buffer @ 0x%lx (size 0x%lu) not found !", offset, size);
979         printk(KERN_ERR "EXIT  Returning %d", -EINVAL);
980         EMGD_TRACE_EXIT;
981         return -EINVAL;
982 }
983
984
985 /*
986  * gmm_unmap_from_graphics(): Disables direct display of DMA video input buffers
987  * by unmapping the specified block from the "graphics aperture" via the GTT.
988  */
989 int gmm_unmap_from_graphics(unsigned long offset, unsigned long size)
990 {
991         int ret;
992
993         EMGD_TRACE_ENTER;
994         if (offset && size) {
995                 /* Mark the GTT chunk as currently unused */
996                 ret = gmm_unmap_contig_buffer(&gmm_context, offset, size);
997         } else {
998                 printk(KERN_ERR "Invalid offset (0x%lx) and/or size (0x%lx) !",
999                         offset, size);
1000                 printk(KERN_ERR "EXIT  Returning %d", -EINVAL);
1001                 ret = -EINVAL;
1002         }
1003         EMGD_TRACE_EXIT;
1004         return ret;
1005 }
1006 EXPORT_SYMBOL(gmm_unmap_from_graphics);
1007
1008
1009 /*
1010  * Maintain a very simple linear linked list of memory allocations. Try
1011  * to re-use freed blocks.  No error checking is done and alignment is
1012  * hard codeded.
1013  */
1014
1015 static int gmm_alloc_chunk_space(gmm_context_t *gmm_context,
1016                 unsigned long *offset,
1017                 unsigned long size,
1018                 unsigned long phys,
1019                 unsigned long flags)
1020 {
1021         gmm_chunk_t *chunk;
1022         struct drm_device *dev;
1023
1024         EMGD_TRACE_ENTER;
1025         EMGD_DEBUG("Parameters: size=%lu; phys=%lu", size, phys);
1026         EMGD_DEBUG("  flags=0x%08lx", flags);
1027
1028         /* Check for a free chunk of sufficent size */
1029         chunk = gmm_context->head_chunk;
1030         while (chunk) {
1031                 if ((chunk->usage == FREE_ALLOCATED) && (chunk->size >= size) &&
1032                         (chunk->type == (phys ? AGP_PHYS_MEMORY : AGP_NORMAL_MEMORY))) {
1033                         chunk->usage = INUSE_ALLOCATED;
1034                         *offset = chunk->offset;
1035                         EMGD_DEBUG("Re-using old chunk with offset=0x%lx", chunk->offset);
1036                         EMGD_DEBUG("EXIT  Returning %d", 0);
1037                         return 0;
1038                 }
1039                 chunk = chunk->next;
1040         }
1041
1042         /* Allocate a new chunk */
1043         chunk = (gmm_chunk_t *)OS_ALLOC(sizeof(gmm_chunk_t));
1044         if (!chunk) {
1045                 printk(KERN_ALERT "[EMGD] Cannot allocate gmm_chunk_t");
1046                 EMGD_ERROR_EXIT("Returning %d", -IGD_ERROR_NOMEM);
1047                 return -IGD_ERROR_NOMEM;
1048         }
1049         OS_MEMSET(chunk, 0, sizeof(gmm_chunk_t));
1050
1051         /*
1052          * First allocate the memory from the gart driver. If this failes,
1053          * don't bother allocating a new chunk.
1054          */
1055         dev = (struct drm_device *)gmm_context->context->drm_dev;
1056         chunk->size = size;
1057         chunk->pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
1058         /*
1059          * If we need phyical contiguous memory, then we need to
1060          * set the type to AGP_PHYS_MEMORY, otherwise use AGP_NORMAL_MEMORY
1061          */
1062         if (phys) {
1063                 chunk->type = AGP_PHYS_MEMORY;
1064                 EMGD_DEBUG("Allocate AGP_PHYS; size = 0x%08lx", chunk->size);
1065         } else {
1066                 chunk->type = AGP_NORMAL_MEMORY;
1067                 EMGD_DEBUG("Allocate AGP_NORMAL; size = 0x%08lx", chunk->size);
1068         }
1069
1070         if (dev == NULL) {
1071                 OS_FREE(chunk);
1072                 EMGD_ERROR_EXIT("drm device is NULL; Returning %d", -IGD_ERROR_NOMEM);
1073                 return -IGD_ERROR_NOMEM;
1074         }
1075
1076
1077         /* Allocate memory from the AGPGART */
1078         chunk->gtt_mem = emgd_alloc_pages(chunk->pages, chunk->type);
1079         if (!chunk->gtt_mem) {
1080                 OS_FREE(chunk);
1081                 printk(KERN_ALERT "[EMGD] Failed to allocated AGP memory.\n");
1082                 EMGD_DEBUG("gmm_alloc_chunk_space() returning %d", -IGD_ERROR_NOMEM);
1083                 return -IGD_ERROR_NOMEM;
1084         }
1085
1086
1087         chunk->usage = INUSE_ALLOCATED;
1088         chunk->ref_cnt = 0;
1089         chunk->page_addresses = NULL;
1090
1091         /*
1092          * Get the next available offset that works for this allocation.
1093          * Currently this just uses the next linear offset available. No
1094          * attempt is made to keep track of or utilize gaps introduced
1095          * because of alignments.
1096          *
1097          * Eventually, this should mainting different "heaps" of offsets
1098          * for different types of allocations.  For example, display vs.
1099          * video.
1100          *
1101          * See igd_gmm.h for the different surface types supported. Below
1102          * are the ones of interest
1103          *
1104          * #define IGD_SURFACE_RENDER           0x00000001
1105          * #define IGD_SURFACE_CURSOR           0x00000004
1106          * #define IGD_SURFACE_OVERLAY          0x00000008
1107          * #define IGD_SURFACE_DISPLAY          0x00000010
1108          * #define IGD_SURFACE_VIDEO            0x00000040
1109          * #define IGD_SURFACE_VIDEO_ENCODE     0x00000080
1110          * #define IGD_SURFACE_SYS_MEM          0x00008000
1111          * #define IGD_SURFACE_PHYS_PTR         0x00010000
1112          *
1113          */
1114         if (gmm_context->tail_chunk == NULL) {
1115                 chunk->offset = 0;
1116         } else {
1117                 chunk->offset = gmm_context->tail_chunk->offset +
1118                         gmm_context->tail_chunk->size;
1119         }
1120         EMGD_DEBUG("- Before alignment: offset=0x%lx", chunk->offset);
1121
1122         /*
1123          * Alignment varies depending on the type of surface being allocated.
1124          */
1125         if (flags & IGD_SURFACE_DISPLAY) {
1126                 /* 256KB aligned */
1127                 chunk->offset = (chunk->offset + 0x3ffff) & ~0x3ffff;
1128         } else {
1129                 /* 4KB aligned */
1130                 chunk->offset = (chunk->offset + 0x0fff) & ~0x0fff;
1131         }
1132
1133         EMGD_DEBUG("-  After alignment: offset=0x%lx", chunk->offset);
1134         chunk->next = NULL;
1135
1136         if (gmm_context->head_chunk == NULL) {
1137                 gmm_context->head_chunk = chunk;
1138         } else {
1139                 gmm_context->tail_chunk->next = chunk;
1140         }
1141         gmm_context->tail_chunk = chunk;
1142
1143         /* Bind the gart memory to the offset */
1144         /*
1145          * This updates the GTT table with the actual allocated pages
1146          * so the display hardware can access the memory.
1147          *
1148          * TODO: Add ability to use other MMU's depending on the
1149          * type of memory requested.
1150          */
1151         emgd_gtt_insert(gmm_context->context, chunk->gtt_mem, chunk->offset);
1152         chunk->bound = 1;
1153
1154         /*
1155          * Physical is only meaningfull for single page or contiguous pages.
1156          * It represents the physical address of the first allocated page.
1157          */
1158         if (chunk->gtt_mem->physical == 0x0) {
1159                 chunk->gtt_mem->physical = page_to_phys(chunk->gtt_mem->pages[0]);
1160         }
1161
1162         *offset = chunk->offset;
1163
1164         EMGD_DEBUG("Allocated chunk @ 0x%lx (0x%lx)", chunk->offset,
1165                 (unsigned long)chunk->gtt_mem->physical);
1166         EMGD_TRACE_EXIT;
1167         return 0;
1168 }
1169
1170
1171 /*
1172  * Imports a list of pages allocated by an external source (i.e., the PVR
1173  * services) into the GMM and maps the pages into the GTT.  Note that
1174  * this function is as dumb as gmm_alloc_chunk_space about reusing
1175  * previous allocations that have been freed; it will happily use a large
1176  * hole in the GTT for a tiny allocation if it's the first hole it finds.
1177  *
1178  * pagelist is a live page list; it should not be modified or freed by
1179  *    the GMM.
1180  * gtt_offset is an output only; this is the offset of the beginning of
1181  *    the first page from the start of the GTT.  If the actual surface
1182  *    data starts partway through a page, the caller may need to add an
1183  *    addition offset to where the surface data starts.
1184  */
1185 static int gmm_import_pages(void **pagelist,
1186                 unsigned long *gtt_offset,
1187                 unsigned long numpages)
1188 {
1189         gmm_chunk_t *chunk;
1190
1191         EMGD_TRACE_ENTER;
1192
1193         EMGD_DEBUG("Importing %lu pages into GTT\n", numpages);
1194
1195         /*
1196          * Check for a free chunk of sufficent size that does not have allocated
1197          * pages attached to it (i.e., a chunk from a previous import region that's
1198          * been freed.
1199          */
1200         chunk = gmm_context.head_chunk;
1201         while (chunk) {
1202                 if ((chunk->usage == FREE_IMPORTED) && (chunk->pages >= numpages))
1203                 {
1204                         chunk->usage = INUSE_ALLOCATED;
1205                         EMGD_DEBUG("Re-using old chunk with offset=0x%lx", chunk->offset);
1206                         break;
1207                 }
1208                 chunk = chunk->next;
1209         }
1210
1211         /* Allocate a new chunk if we didn't find any that we could reuse */
1212         if (!chunk) {
1213                 chunk = (gmm_chunk_t *)OS_ALLOC(sizeof(gmm_chunk_t));
1214                 if (!chunk) {
1215                         printk(KERN_ALERT "[EMGD] Cannot allocate gmm_chunk_t");
1216                         EMGD_ERROR_EXIT("Returning %d", -IGD_ERROR_NOMEM);
1217                         return -IGD_ERROR_NOMEM;
1218                 }
1219                 OS_MEMSET(chunk, 0, sizeof(gmm_chunk_t));
1220
1221                 chunk->pages = numpages;
1222                 chunk->size = numpages * PAGE_SIZE;
1223                 chunk->next = NULL;
1224
1225                 /* Create a gmm_mem_buffer_t for the imported memory */
1226                 chunk->gtt_mem = OS_ALLOC(sizeof(gmm_mem_buffer_t));
1227                 if (chunk->gtt_mem == NULL) {
1228                         OS_FREE(chunk);
1229                         return -IGD_ERROR_NOMEM;
1230                 }
1231
1232                 /* Stick this chunk after all other GTT chunks */
1233                 if (gmm_context.tail_chunk == NULL) {
1234                         /* First chunk ever! */
1235                         gmm_context.head_chunk = chunk;
1236                         chunk->offset = 0;
1237                 } else {
1238                         chunk->offset = gmm_context.tail_chunk->offset +
1239                                 gmm_context.tail_chunk->size;
1240                         gmm_context.tail_chunk->next = chunk;
1241                 }
1242                 gmm_context.tail_chunk = chunk;
1243
1244                 /*
1245                  * Since we're making this a displayable surface, we need to make sure
1246                  * it's 256k-aligned.
1247                  */
1248                 chunk->offset = (chunk->offset + 0x3ffff) & ~0x3ffff;
1249
1250                 EMGD_DEBUG("Setting up a new GMM chunk for imported pages");
1251         }
1252
1253         *gtt_offset = chunk->offset;
1254
1255         chunk->usage = INUSE_IMPORTED;
1256         chunk->ref_cnt = 0;
1257         chunk->page_addresses = NULL;
1258
1259         /*
1260          * Note that the underlying gmm_mem_buffer may have a smaller size and
1261          * number of pages if we're reusing a larger chunk than we really needed.
1262          */
1263         chunk->gtt_mem->size = numpages * PAGE_SIZE;
1264         chunk->gtt_mem->pages = (struct page**)pagelist;
1265         chunk->gtt_mem->page_count = numpages;
1266
1267         /*
1268          * These fields should never be needed since responsibility for actually
1269          * freeing these pages and the page list itself lies with the external
1270          * code that allocated the pages.
1271          */
1272         chunk->type = AGP_NORMAL_MEMORY;
1273         chunk->gtt_mem->type = AGP_NORMAL_MEMORY;
1274         chunk->gtt_mem->vmalloc_flag = 0;
1275
1276         /*
1277          * This updates the GTT table with the actual imported pages
1278          * so the display hardware can access the memory.
1279          */
1280         emgd_gtt_insert(gmm_context.context, chunk->gtt_mem, chunk->offset);
1281         chunk->bound = 1;
1282
1283         /*
1284          * Physical is only meaningfull for single page or contiguous pages.
1285          * It represents the physical address of the first allocated page.
1286          */
1287         if (chunk->gtt_mem->physical == 0x0) {
1288                 chunk->gtt_mem->physical = page_to_phys(chunk->gtt_mem->pages[0]);
1289         }
1290
1291         EMGD_DEBUG("Imported chunk @ 0x%lx (0x%lx)", chunk->offset,
1292                 (unsigned long)chunk->gtt_mem->physical);
1293         EMGD_TRACE_EXIT;
1294         return 0;
1295 }
1296
1297
1298 static int gmm_get_page_list(unsigned long offset,
1299                 unsigned long **pages,
1300                 unsigned long *page_cnt)
1301 {
1302         gmm_chunk_t *chunk;
1303         int i;
1304
1305         EMGD_TRACE_ENTER;
1306         EMGD_DEBUG("Parameters: offset=0x%08lx", offset);
1307         EMGD_DEBUG("  pages=0x%p, *pages=0x%p", pages, *pages);
1308         chunk = gmm_get_chunk(gmm_context.context, offset);
1309
1310         if (chunk == NULL) {
1311                 printk(KERN_ERR"[EMGD] gmm_get_page_list: Failed to find chunk: "
1312                         "0x%lx\n", offset);
1313                 return -IGD_ERROR_NOMEM;
1314         }
1315
1316         *page_cnt = chunk->gtt_mem->page_count;
1317
1318         /* Allocate an array of page addresses: */
1319         if (chunk->page_addresses == NULL) {
1320                 chunk->page_addresses = OS_ALLOC(sizeof(unsigned long) * *page_cnt);
1321                 EMGD_DEBUG("Allocated chunk->page_addresses = 0x%p",
1322                         chunk->page_addresses);
1323                 if (chunk->page_addresses == NULL) {
1324                         printk(KERN_ERR "[EMGD] gmm_get_page_list: failed to allocate the "
1325                                 "array of page addresses for chunk: 0x%lx\n", offset);
1326                         return -IGD_ERROR_NOMEM;
1327                 }
1328         } else {
1329                 EMGD_DEBUG("Re-using chunk->page_addresses = 0x%p",
1330                         chunk->page_addresses);
1331         }
1332
1333         /* Populate the array with the starting addresses of the pages: */
1334         for (i = 0; i < *page_cnt; i++) {
1335                 chunk->page_addresses[i] = page_to_phys(chunk->gtt_mem->pages[i]);
1336         }
1337
1338         *pages = chunk->page_addresses;
1339
1340         EMGD_DEBUG("*pages=0x%p", *pages);
1341         EMGD_DEBUG("page_count=%lu", *page_cnt);
1342         EMGD_TRACE_EXIT;
1343         return 0;
1344 }
1345
1346 struct emgd_ci_meminfo_t {
1347         unsigned long v4l2_offset;
1348         unsigned long virt;
1349         unsigned long  size;
1350         };
1351
1352 int emgd_map_ci_buf(struct emgd_ci_meminfo_t * ci_meminfo)
1353 {
1354         int ret;
1355         unsigned long gtt_offset;
1356         unsigned char i;
1357         ret = gmm_map_to_graphics(virt_to_phys((unsigned long *)ci_meminfo->virt), ci_meminfo->size, &gtt_offset);
1358         if(ret)
1359         {
1360                 return ret;/*error handling*/
1361         }
1362         /* save meminfo into our context */
1363         for(i=0;i<MAX_CI_LIST_SIZE;i++){
1364                 if(!ci_surfaces[i].used){
1365                         ci_surfaces[i].used = 1;
1366                         ci_surfaces[i].virt = virt_to_phys((unsigned long *)ci_meminfo->virt);
1367                         ci_surfaces[i].size =  ci_meminfo->size;
1368                         ci_surfaces[i].gtt_offset =  gtt_offset;
1369                         return 0;
1370                 }
1371         }
1372         return 0;
1373 }
1374 EXPORT_SYMBOL(emgd_map_ci_buf);
1375 int emgd_unmap_ci_buf(unsigned long virt_addr)
1376 {
1377         unsigned char i;
1378         int ret;
1379         for(i=0;i<MAX_CI_LIST_SIZE;i++)
1380         {
1381                 if(ci_surfaces[i].used && (ci_surfaces[i].virt == virt_addr))
1382                         {
1383                                 ret = gmm_unmap_from_graphics(ci_surfaces[i].gtt_offset, ci_surfaces[i].size);
1384                                 ci_surfaces[i].used = 0;
1385                                 ci_surfaces[i].gtt_offset = 0;
1386                                 return 0;
1387                         }
1388         }
1389         printk(KERN_ERR"[gmm]ci unmap failed\n");
1390         return 1;
1391 }
1392
1393 EXPORT_SYMBOL(emgd_unmap_ci_buf);
1394
1395
1396 static int gmm_unmap_ci(unsigned long virt_addr)
1397 {
1398         int ret;
1399         ret =emgd_unmap_ci_buf(virt_addr);
1400         return ret;
1401 }