2 /****************************************************************************
3 * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
5 * All Rights Reserved. *
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation on the rights to use, copy, modify, merge,
11 * publish, distribute, sublicense, and/or sell copies of the Software,
12 * and to permit persons to whom the Software is furnished to do so,
13 * subject to the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial
17 * portions of the Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
22 * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR
23 * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
24 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 ***************************************************************************/
29 #include "xgi_types.h"
30 #include "xgi_linux.h"
36 static struct xgi_pcie_heap *xgi_pcie_heap = NULL;
37 static struct kmem_cache *xgi_pcie_cache_block = NULL;
38 static struct xgi_pcie_block *xgi_pcie_vertex_block = NULL;
39 static struct xgi_pcie_block *xgi_pcie_cmdlist_block = NULL;
40 static struct xgi_pcie_block *xgi_pcie_scratchpad_block = NULL;
41 extern struct list_head xgi_mempid_list;
43 static unsigned long xgi_pcie_lut_alloc(unsigned long page_order)
46 unsigned long page_addr = 0;
47 unsigned long page_count = 0;
50 page_count = (1 << page_order);
51 page_addr = __get_free_pages(GFP_KERNEL, page_order);
53 if (page_addr == 0UL) {
54 XGI_ERROR("Can't get free pages: 0x%lx from system memory !\n",
59 page = virt_to_page(page_addr);
61 for (i = 0; i < page_count; i++, page++) {
62 XGI_INC_PAGE_COUNT(page);
66 XGI_INFO("page_count: 0x%lx page_order: 0x%lx page_addr: 0x%lx \n",
67 page_count, page_order, page_addr);
71 static void xgi_pcie_lut_free(unsigned long page_addr, unsigned long page_order)
74 unsigned long page_count = 0;
77 page_count = (1 << page_order);
78 page = virt_to_page(page_addr);
80 for (i = 0; i < page_count; i++, page++) {
81 XGI_DEC_PAGE_COUNT(page);
85 free_pages(page_addr, page_order);
88 static int xgi_pcie_lut_init(struct xgi_info * info)
90 unsigned char *page_addr = NULL;
91 unsigned long pciePageCount, lutEntryNum, lutPageCount, lutPageOrder;
92 unsigned long count = 0;
96 unsigned long pcie_aperture_size;
98 info->pcie.size = 128 * 1024 * 1024;
100 /* Get current FB aperture size */
102 XGI_INFO("In3x5(0x27): 0x%x \n", temp);
104 if (temp & 0x01) { /* 256MB; Jong 06/05/2006; 0x10000000 */
105 /* Jong 06/06/2006; allocate memory */
106 pcie_aperture_size = 256 * 1024 * 1024;
107 /* info->pcie.base = 256 * 1024 * 1024; *//* pcie base is different from fb base */
108 } else { /* 128MB; Jong 06/05/2006; 0x08000000 */
110 /* Jong 06/06/2006; allocate memory */
111 pcie_aperture_size = 128 * 1024 * 1024;
112 /* info->pcie.base = 128 * 1024 * 1024; */
115 /* Jong 06/06/2006; allocate memory; it can be used for build-in kernel modules */
116 /* info->pcie.base=(unsigned long)alloc_bootmem(pcie_mem_size); */
117 /* total 496 MB; need 256 MB (0x10000000); start from 240 MB (0x0F000000) */
118 /* info->pcie.base=ioremap(0x0F000000, 0x10000000); *//* Cause system hang */
119 info->pcie.base = pcie_aperture_size; /* works */
120 /* info->pcie.base=info->fb.base + info->fb.size; *//* System hang */
121 /* info->pcie.base=128 * 1024 * 1024; *//* System hang */
123 XGI_INFO("Jong06062006-info->pcie.base: 0x%lx \n", info->pcie.base);
125 /* Get current lookup table page size */
126 temp = bReadReg(0xB00C);
127 if (temp & 0x04) { /* 8KB */
128 info->lutPageSize = 8 * 1024;
131 info->lutPageSize = 4 * 1024;
134 XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize);
137 /* Get current lookup table location */
138 temp = bReadReg(0xB00C);
139 if (temp & 0x02) { /* LFB */
140 info->isLUTInLFB = TRUE;
141 /* Current we only support lookup table in LFB */
143 bWriteReg(0xB00C, temp);
144 info->isLUTInLFB = FALSE;
147 info->isLUTInLFB = FALSE;
150 XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize);
152 /* Get current SDFB page size */
153 temp = bReadReg(0xB00C);
154 if (temp & 0x08) { /* 8MB */
155 info->sdfbPageSize = 8 * 1024 * 1024;
158 info->sdfbPageSize = 4 * 1024 * 1024;
161 pciePageCount = (info->pcie.size + PAGE_SIZE - 1) / PAGE_SIZE;
164 * Allocate memory for PCIE GART table;
166 lutEntryNum = pciePageCount;
167 lutPageCount = (lutEntryNum * 4 + PAGE_SIZE - 1) / PAGE_SIZE;
169 /* get page_order base on page_count */
170 count = lutPageCount;
171 for (lutPageOrder = 0; count; count >>= 1, ++lutPageOrder) ;
173 if ((lutPageCount << 1) == (1 << lutPageOrder)) {
177 XGI_INFO("lutEntryNum: 0x%lx lutPageCount: 0x%lx lutPageOrder 0x%lx\n",
178 lutEntryNum, lutPageCount, lutPageOrder);
180 info->lutPageOrder = lutPageOrder;
181 page_addr = (unsigned char *)xgi_pcie_lut_alloc(lutPageOrder);
184 XGI_ERROR("cannot allocate PCIE lut page!\n");
187 info->lut_base = (unsigned long *)page_addr;
189 XGI_INFO("page_addr: 0x%p virt_to_phys(page_virtual): 0x%lx \n",
190 page_addr, virt_to_phys(page_addr));
193 ("info->lut_base: 0x%p __pa(info->lut_base): 0x%lx info->lutPageOrder 0x%lx\n",
194 info->lut_base, __pa(info->lut_base), info->lutPageOrder);
197 * clean all PCIE GART Entry
199 memset(page_addr, 0, PAGE_SIZE << lutPageOrder);
201 #if defined(__i386__) || defined(__x86_64__)
202 asm volatile ("wbinvd":::"memory");
207 /* Set GART in SFB */
208 bWriteReg(0xB00C, bReadReg(0xB00C) & ~0x02);
209 /* Set GART base address to HW */
210 dwWriteReg(0xB034, __pa(info->lut_base));
217 static void xgi_pcie_lut_cleanup(struct xgi_info * info)
219 if (info->lut_base) {
220 XGI_INFO("info->lut_base: 0x%p info->lutPageOrder: 0x%lx \n",
221 info->lut_base, info->lutPageOrder);
222 xgi_pcie_lut_free((unsigned long)info->lut_base,
224 info->lut_base = NULL;
228 static struct xgi_pcie_block *xgi_pcie_new_node(void)
230 struct xgi_pcie_block *block =
231 (struct xgi_pcie_block *) kmem_cache_alloc(xgi_pcie_cache_block,
237 block->offset = 0; /* block's offset in pcie memory, begin from 0 */
238 block->size = 0; /* The block size. */
239 block->bus_addr = 0; /* CPU access address/bus address */
240 block->hw_addr = 0; /* GE access address */
241 block->page_count = 0;
242 block->page_order = 0;
243 block->page_block = NULL;
244 block->page_table = NULL;
245 block->owner = PCIE_INVALID;
250 static void xgi_pcie_block_stuff_free(struct xgi_pcie_block * block)
253 struct xgi_page_block *page_block = block->page_block;
254 struct xgi_page_block *free_block;
255 unsigned long page_count = 0;
258 //XGI_INFO("block->page_block: 0x%p \n", block->page_block);
260 page_count = page_block->page_count;
262 page = virt_to_page(page_block->virt_addr);
263 for (i = 0; i < page_count; i++, page++) {
264 XGI_DEC_PAGE_COUNT(page);
267 free_pages(page_block->virt_addr, page_block->page_order);
269 page_block->phys_addr = 0;
270 page_block->virt_addr = 0;
271 page_block->page_count = 0;
272 page_block->page_order = 0;
274 free_block = page_block;
275 page_block = page_block->next;
276 //XGI_INFO("free free_block: 0x%p \n", free_block);
281 if (block->page_table) {
282 //XGI_INFO("free block->page_table: 0x%p \n", block->page_table);
283 kfree(block->page_table);
284 block->page_table = NULL;
288 int xgi_pcie_heap_init(struct xgi_info * info)
290 struct xgi_pcie_block *block;
292 if (!xgi_pcie_lut_init(info)) {
293 XGI_ERROR("xgi_pcie_lut_init failed\n");
298 (struct xgi_pcie_heap *) kmalloc(sizeof(struct xgi_pcie_heap), GFP_KERNEL);
299 if (!xgi_pcie_heap) {
300 XGI_ERROR("xgi_pcie_heap alloc failed\n");
303 INIT_LIST_HEAD(&xgi_pcie_heap->free_list);
304 INIT_LIST_HEAD(&xgi_pcie_heap->used_list);
305 INIT_LIST_HEAD(&xgi_pcie_heap->sort_list);
307 xgi_pcie_heap->max_freesize = info->pcie.size;
309 xgi_pcie_cache_block =
310 kmem_cache_create("xgi_pcie_block", sizeof(struct xgi_pcie_block), 0,
311 SLAB_HWCACHE_ALIGN, NULL, NULL);
313 if (NULL == xgi_pcie_cache_block) {
314 XGI_ERROR("Fail to creat xgi_pcie_block\n");
318 block = (struct xgi_pcie_block *) xgi_pcie_new_node();
320 XGI_ERROR("xgi_pcie_new_node failed\n");
324 block->offset = 0; /* block's offset in pcie memory, begin from 0 */
325 block->size = info->pcie.size;
327 list_add(&block->list, &xgi_pcie_heap->free_list);
329 XGI_INFO("PCIE start address: 0x%lx, memory size : 0x%lx\n",
330 block->offset, block->size);
333 if (xgi_pcie_cache_block) {
334 kmem_cache_destroy(xgi_pcie_cache_block);
335 xgi_pcie_cache_block = NULL;
340 kfree(xgi_pcie_heap);
341 xgi_pcie_heap = NULL;
344 xgi_pcie_lut_cleanup(info);
348 void xgi_pcie_heap_check(void)
351 struct xgi_pcie_block *block;
352 unsigned int ownerIndex;
353 static const char *const ownerStr[6] =
354 { "2D", "3D", "3D_CMD", "3D_SCR", "3D_TEX", "ELSE" };
356 if (!xgi_pcie_heap) {
360 XGI_INFO("pcie freemax = 0x%lx\n", xgi_pcie_heap->max_freesize);
361 list_for_each_entry(block, &xgi_pcie_heap->used_list, list) {
362 if (block->owner == PCIE_2D)
364 else if (block->owner > PCIE_3D_TEXTURE
365 || block->owner < PCIE_2D
366 || block->owner < PCIE_3D)
369 ownerIndex = block->owner - PCIE_3D + 1;
371 XGI_INFO("Allocated by %s, block offset: 0x%lx, size: 0x%lx \n",
372 ownerStr[ownerIndex], block->offset, block->size);
377 void xgi_pcie_heap_cleanup(struct xgi_info * info)
379 struct list_head *free_list;
380 struct xgi_pcie_block *block;
381 struct xgi_pcie_block *next;
384 xgi_pcie_lut_cleanup(info);
385 XGI_INFO("xgi_pcie_lut_cleanup scceeded\n");
388 free_list = &xgi_pcie_heap->free_list;
389 for (j = 0; j < 3; j++, free_list++) {
390 list_for_each_entry_safe(block, next, free_list, list) {
392 ("No. %d block offset: 0x%lx size: 0x%lx\n",
393 j, block->offset, block->size);
394 xgi_pcie_block_stuff_free(block);
398 //XGI_INFO("No. %d free block: 0x%p \n", j, block);
399 kmem_cache_free(xgi_pcie_cache_block, block);
403 XGI_INFO("free xgi_pcie_heap: 0x%p \n", xgi_pcie_heap);
404 kfree(xgi_pcie_heap);
405 xgi_pcie_heap = NULL;
408 if (xgi_pcie_cache_block) {
409 kmem_cache_destroy(xgi_pcie_cache_block);
410 xgi_pcie_cache_block = NULL;
414 static struct xgi_pcie_block *xgi_pcie_mem_alloc(struct xgi_info * info,
415 unsigned long originalSize,
416 enum PcieOwner owner)
418 struct xgi_pcie_block *block, *used_block, *free_block;
419 struct xgi_page_block *page_block, *prev_page_block;
421 unsigned long page_order = 0, count = 0, index = 0;
422 unsigned long page_addr = 0;
423 unsigned long *lut_addr = NULL;
424 unsigned long lut_id = 0;
425 unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK;
426 int i, j, page_count = 0;
429 XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-Begin\n");
430 XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n",
433 if (owner == PCIE_3D) {
434 if (xgi_pcie_vertex_block) {
436 ("PCIE Vertex has been created, return directly.\n");
437 return xgi_pcie_vertex_block;
441 if (owner == PCIE_3D_CMDLIST) {
442 if (xgi_pcie_cmdlist_block) {
444 ("PCIE Cmdlist has been created, return directly.\n");
445 return xgi_pcie_cmdlist_block;
449 if (owner == PCIE_3D_SCRATCHPAD) {
450 if (xgi_pcie_scratchpad_block) {
452 ("PCIE Scratchpad has been created, return directly.\n");
453 return xgi_pcie_scratchpad_block;
458 XGI_ERROR("size == 0 \n");
462 XGI_INFO("max_freesize: 0x%lx \n", xgi_pcie_heap->max_freesize);
463 if (size > xgi_pcie_heap->max_freesize) {
465 ("size: 0x%lx bigger than PCIE total free size: 0x%lx.\n",
466 size, xgi_pcie_heap->max_freesize);
470 /* Jong 05/30/2006; find next free list which has enough space */
471 list_for_each_entry(block, &xgi_pcie_heap->free_list, list) {
472 if (size <= block->size) {
477 if (&block->list == &xgi_pcie_heap->free_list) {
478 XGI_ERROR("Can't allocate %ldk size from PCIE memory !\n",
484 XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n",
485 size, free_block->offset, free_block->size);
487 if (size == free_block->size) {
488 used_block = free_block;
489 XGI_INFO("size==free_block->size: free_block = 0x%p\n",
491 list_del(&free_block->list);
493 used_block = xgi_pcie_new_node();
494 if (used_block == NULL) {
498 if (used_block == free_block) {
499 XGI_ERROR("used_block == free_block = 0x%p\n",
503 used_block->offset = free_block->offset;
504 used_block->size = size;
506 free_block->offset += size;
507 free_block->size -= size;
510 xgi_pcie_heap->max_freesize -= size;
512 used_block->bus_addr = info->pcie.base + used_block->offset;
513 used_block->hw_addr = info->pcie.base + used_block->offset;
514 used_block->page_count = page_count = size / PAGE_SIZE;
516 /* get page_order base on page_count */
517 for (used_block->page_order = 0; page_count; page_count >>= 1) {
518 ++used_block->page_order;
521 if ((used_block->page_count << 1) == (1 << used_block->page_order)) {
522 used_block->page_order--;
525 ("used_block->offset: 0x%lx, used_block->size: 0x%lx, used_block->bus_addr: 0x%lx, used_block->hw_addr: 0x%lx, used_block->page_count: 0x%lx used_block->page_order: 0x%lx\n",
526 used_block->offset, used_block->size, used_block->bus_addr,
527 used_block->hw_addr, used_block->page_count,
528 used_block->page_order);
530 used_block->page_block = NULL;
531 //used_block->page_block = (struct xgi_pages_block *)kmalloc(sizeof(struct xgi_pages_block), GFP_KERNEL);
532 //if (!used_block->page_block) return NULL;_t
533 //used_block->page_block->next = NULL;
535 used_block->page_table =
536 (struct xgi_pte *) kmalloc(sizeof(struct xgi_pte) * used_block->page_count,
538 if (used_block->page_table == NULL) {
542 lut_id = (used_block->offset >> PAGE_SHIFT);
543 lut_addr = info->lut_base;
545 XGI_INFO("lutAddr: 0x%p lutID: 0x%lx \n", lut_addr, lut_id);
547 /* alloc free pages from system */
548 page_count = used_block->page_count;
549 page_block = used_block->page_block;
550 prev_page_block = used_block->page_block;
551 for (i = 0; page_count > 0; i++) {
552 /* if size is bigger than 2M bytes, it should be split */
553 if (page_count > (1 << XGI_PCIE_ALLOC_MAX_ORDER)) {
554 page_order = XGI_PCIE_ALLOC_MAX_ORDER;
557 for (page_order = 0; count; count >>= 1, ++page_order) ;
559 if ((page_count << 1) == (1 << page_order)) {
564 count = (1 << page_order);
565 page_addr = __get_free_pages(GFP_KERNEL, page_order);
566 XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_addr=0x%lx \n",
571 ("No: %d :Can't get free pages: 0x%lx from system memory !\n",
576 /* Jong 05/30/2006; test */
577 memset((unsigned char *)page_addr, 0xFF,
578 PAGE_SIZE << page_order);
579 /* memset((unsigned char *)page_addr, 0, PAGE_SIZE << page_order); */
581 if (page_block == NULL) {
583 (struct xgi_page_block *)
584 kmalloc(sizeof(struct xgi_page_block), GFP_KERNEL);
587 ("Can't get memory for page_block! \n");
592 if (prev_page_block == NULL) {
593 used_block->page_block = page_block;
594 prev_page_block = page_block;
596 prev_page_block->next = page_block;
597 prev_page_block = page_block;
600 page_block->next = NULL;
601 page_block->phys_addr = __pa(page_addr);
602 page_block->virt_addr = page_addr;
603 page_block->page_count = count;
604 page_block->page_order = page_order;
607 ("Jong05302006-xgi_pcie_mem_alloc-page_block->phys_addr=0x%lx \n",
608 page_block->phys_addr);
610 ("Jong05302006-xgi_pcie_mem_alloc-page_block->virt_addr=0x%lx \n",
611 page_block->virt_addr);
613 page = virt_to_page(page_addr);
615 //XGI_INFO("No: %d page_order: 0x%lx page_count: 0x%x count: 0x%lx index: 0x%lx lut_addr: 0x%p"
616 // "page_block->phys_addr: 0x%lx page_block->virt_addr: 0x%lx \n",
617 // i, page_order, page_count, count, index, lut_addr, page_block->phys_addr, page_block->virt_addr);
619 for (j = 0; j < count; j++, page++, lut_addr++) {
620 used_block->page_table[index + j].phys_addr =
621 __pa(page_address(page));
622 used_block->page_table[index + j].virt_addr =
623 (unsigned long)page_address(page);
626 ("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].phys_addr=0x%lx \n",
627 used_block->page_table[index + j].phys_addr);
629 ("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].virt_addr=0x%lx \n",
630 used_block->page_table[index + j].virt_addr);
632 *lut_addr = __pa(page_address(page));
633 XGI_INC_PAGE_COUNT(page);
638 ("__pa(page_address(page)): 0x%lx lutAddr: 0x%p lutAddr No: 0x%x = 0x%lx \n",
639 __pa(page_address(page)), lut_addr, j,
645 page_block = page_block->next;
651 used_block->owner = owner;
652 list_add(&used_block->list, &xgi_pcie_heap->used_list);
654 #if defined(__i386__) || defined(__x86_64__)
655 asm volatile ("wbinvd":::"memory");
660 /* Flush GART Table */
661 bWriteReg(0xB03F, 0x40);
662 bWriteReg(0xB03F, 0x00);
664 if (owner == PCIE_3D) {
665 xgi_pcie_vertex_block = used_block;
668 if (owner == PCIE_3D_CMDLIST) {
669 xgi_pcie_cmdlist_block = used_block;
672 if (owner == PCIE_3D_SCRATCHPAD) {
673 xgi_pcie_scratchpad_block = used_block;
676 XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-End \n");
680 xgi_pcie_block_stuff_free(used_block);
681 kmem_cache_free(xgi_pcie_cache_block, used_block);
685 static struct xgi_pcie_block *xgi_pcie_mem_free(struct xgi_info * info,
686 unsigned long offset)
688 struct xgi_pcie_block *used_block, *block;
689 struct xgi_pcie_block *prev, *next;
690 unsigned long upper, lower;
692 list_for_each_entry(block, &xgi_pcie_heap->used_list, list) {
693 if (block->offset == offset) {
698 if (&block->list == &xgi_pcie_heap->used_list) {
699 XGI_ERROR("can't find block: 0x%lx to free!\n", offset);
705 ("used_block: 0x%p, offset = 0x%lx, size = 0x%lx, bus_addr = 0x%lx, hw_addr = 0x%lx\n",
706 used_block, used_block->offset, used_block->size,
707 used_block->bus_addr, used_block->hw_addr);
709 xgi_pcie_block_stuff_free(used_block);
711 /* update xgi_pcie_heap */
712 xgi_pcie_heap->max_freesize += used_block->size;
715 upper = used_block->offset + used_block->size;
716 lower = used_block->offset;
718 list_for_each_entry(block, &xgi_pcie_heap->free_list, list) {
719 if (block->offset == upper) {
721 } else if ((block->offset + block->size) == lower) {
726 XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev);
727 list_del(&used_block->list);
730 prev->size += (used_block->size + next->size);
731 list_del(&next->list);
732 XGI_INFO("free node 0x%p\n", next);
733 kmem_cache_free(xgi_pcie_cache_block, next);
734 kmem_cache_free(xgi_pcie_cache_block, used_block);
741 prev->size += used_block->size;
742 XGI_INFO("free node 0x%p\n", used_block);
743 kmem_cache_free(xgi_pcie_cache_block, used_block);
749 next->size += used_block->size;
750 next->offset = used_block->offset;
751 XGI_INFO("free node 0x%p\n", used_block);
752 kmem_cache_free(xgi_pcie_cache_block, used_block);
757 used_block->bus_addr = 0;
758 used_block->hw_addr = 0;
759 used_block->page_count = 0;
760 used_block->page_order = 0;
761 list_add(&used_block->list, &xgi_pcie_heap->free_list);
762 XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n",
763 used_block, used_block->offset, used_block->size);
767 void xgi_pcie_alloc(struct xgi_info * info, unsigned long size,
768 enum PcieOwner owner, struct xgi_mem_alloc * alloc)
770 struct xgi_pcie_block *block;
771 struct xgi_mem_pid *mempid_block;
773 xgi_down(info->pcie_sem);
774 block = xgi_pcie_mem_alloc(info, size, owner);
775 xgi_up(info->pcie_sem);
778 alloc->location = XGI_MEMLOC_INVALID;
782 XGI_ERROR("PCIE RAM allocation failed\n");
785 ("PCIE RAM allocation succeeded: offset = 0x%lx, bus_addr = 0x%lx\n",
786 block->offset, block->bus_addr);
787 alloc->location = XGI_MEMLOC_NON_LOCAL;
788 alloc->size = block->size;
789 alloc->bus_addr = block->bus_addr;
790 alloc->hw_addr = block->hw_addr;
793 manage mempid, handle PCIE_3D, PCIE_3D_TEXTURE.
794 PCIE_3D request means a opengl process created.
795 PCIE_3D_TEXTURE request means texture cannot alloc from fb.
797 if (owner == PCIE_3D || owner == PCIE_3D_TEXTURE) {
799 kmalloc(sizeof(struct xgi_mem_pid), GFP_KERNEL);
801 XGI_ERROR("mempid_block alloc failed\n");
802 mempid_block->location = XGI_MEMLOC_NON_LOCAL;
803 if (owner == PCIE_3D)
804 mempid_block->bus_addr = 0xFFFFFFFF; /*xgi_pcie_vertex_block has the address */
806 mempid_block->bus_addr = alloc->bus_addr;
807 mempid_block->pid = alloc->pid;
810 ("Memory ProcessID add one pcie block pid:%ld successfully! \n",
812 list_add(&mempid_block->list, &xgi_mempid_list);
817 void xgi_pcie_free(struct xgi_info * info, unsigned long bus_addr)
819 struct xgi_pcie_block *block;
820 unsigned long offset = bus_addr - info->pcie.base;
821 struct xgi_mem_pid *mempid_block;
822 struct xgi_mem_pid *mempid_freeblock = NULL;
826 if (xgi_pcie_vertex_block
827 && xgi_pcie_vertex_block->bus_addr == bus_addr)
831 /*check is there any other process using vertex */
834 list_for_each_entry(mempid_block, &xgi_mempid_list, list) {
835 if (mempid_block->location == XGI_MEMLOC_NON_LOCAL
836 && mempid_block->bus_addr == 0xFFFFFFFF) {
840 if (processcnt > 1) {
845 xgi_down(info->pcie_sem);
846 block = xgi_pcie_mem_free(info, offset);
847 xgi_up(info->pcie_sem);
850 XGI_ERROR("xgi_pcie_free() failed at base 0x%lx\n", offset);
854 xgi_pcie_vertex_block = NULL;
857 list_for_each_entry(mempid_block, &xgi_mempid_list, list) {
858 if (mempid_block->location == XGI_MEMLOC_NON_LOCAL
859 && ((isvertex && mempid_block->bus_addr == 0xFFFFFFFF)
860 || (!isvertex && mempid_block->bus_addr == bus_addr))) {
861 mempid_freeblock = mempid_block;
865 if (mempid_freeblock) {
866 list_del(&mempid_freeblock->list);
868 ("Memory ProcessID delete one pcie block pid:%ld successfully! \n",
869 mempid_freeblock->pid);
870 kfree(mempid_freeblock);
875 * given a bus address, fid the pcie mem block
876 * uses the bus address as the key.
878 struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info,
879 unsigned long address)
881 struct xgi_pcie_block *block;
885 list_for_each_entry(block, &xgi_pcie_heap->used_list, list) {
886 if (block->bus_addr == address) {
890 if (block->page_table) {
891 for (i = 0; i < block->page_count; i++) {
892 unsigned long offset = block->bus_addr;
893 if ((address >= offset)
894 && (address < (offset + PAGE_SIZE))) {
901 XGI_ERROR("could not find map for vm 0x%lx\n", address);
908 * @address: GE HW address
910 * Returns CPU virtual address. Assumes the CPU VAddr is continuous in not
913 void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address)
915 struct xgi_pcie_block *block;
916 const unsigned long offset_in_page = address & (PAGE_SIZE - 1);
918 XGI_INFO("begin (address = 0x%lx, offset_in_page = %lu)\n",
919 address, offset_in_page);
921 list_for_each_entry(block, &xgi_pcie_heap->used_list, list) {
922 XGI_INFO("block = 0x%p (hw_addr = 0x%lx, size=%lu)\n",
923 block, block->hw_addr, block->size);
925 if ((address >= block->hw_addr)
926 && (address < (block->hw_addr + block->size))) {
927 const unsigned long loc_in_pagetable =
928 (address - block->hw_addr) >> PAGE_SHIFT;
930 (void *)(block->page_table[loc_in_pagetable].
931 virt_addr + offset_in_page);
933 XGI_INFO("PAGE_SHIFT = %d\n", PAGE_SHIFT);
934 XGI_INFO("block->page_table[0x%lx].virt_addr = 0x%lx\n",
936 block->page_table[loc_in_pagetable].virt_addr);
937 XGI_INFO("return 0x%p\n", ret);
943 XGI_ERROR("could not find map for vm 0x%lx\n", address);
947 void xgi_read_pcie_mem(struct xgi_info * info, struct xgi_mem_req * req)
952 void xgi_write_pcie_mem(struct xgi_info * info, struct xgi_mem_req * req)
957 address -- GE hw address
959 void xgi_test_rwinkernel(struct xgi_info * info, unsigned long address)
961 unsigned long *virtaddr = 0;
963 XGI_INFO("[Jong-kd] input GE HW addr is 0x00000000\n");
967 virtaddr = (unsigned long *)xgi_find_pcie_virt(info, address);
969 XGI_INFO("[Jong-kd] input GE HW addr is 0x%lx\n", address);
970 XGI_INFO("[Jong-kd] convert to CPU virt addr 0x%px\n", virtaddr);
971 XGI_INFO("[Jong-kd] origin [virtaddr] = 0x%lx\n", *virtaddr);
972 if (virtaddr != NULL) {
973 *virtaddr = 0x00f00fff;
976 XGI_INFO("[Jong-kd] modified [virtaddr] = 0x%lx\n", *virtaddr);