1 /****************************************************************************
2 * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation on the rights to use, copy, modify, merge,
10 * publish, distribute, sublicense, and/or sell copies of the Software,
11 * and to permit persons to whom the Software is furnished to do so,
12 * subject to the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 ***************************************************************************/
29 #define XGI_FB_HEAP_START 0x1000000
31 struct kmem_cache *xgi_mem_block_cache = NULL;
33 static struct xgi_mem_block *xgi_mem_new_node(void);
36 int xgi_mem_heap_init(struct xgi_mem_heap *heap, unsigned int start,
39 struct xgi_mem_block *block;
41 INIT_LIST_HEAD(&heap->free_list);
42 INIT_LIST_HEAD(&heap->used_list);
43 INIT_LIST_HEAD(&heap->sort_list);
44 heap->initialized = TRUE;
46 block = kmem_cache_alloc(xgi_mem_block_cache, GFP_KERNEL);
51 block->offset = start;
52 block->size = end - start;
54 list_add(&block->list, &heap->free_list);
56 heap->max_freesize = end - start;
62 void xgi_mem_heap_cleanup(struct xgi_mem_heap * heap)
64 struct list_head *free_list;
65 struct xgi_mem_block *block;
66 struct xgi_mem_block *next;
69 free_list = &heap->free_list;
70 for (i = 0; i < 3; i++, free_list++) {
71 list_for_each_entry_safe(block, next, free_list, list) {
73 ("No. %d block->offset: 0x%lx block->size: 0x%lx \n",
74 i, block->offset, block->size);
75 kmem_cache_free(xgi_mem_block_cache, block);
80 heap->initialized = 0;
84 struct xgi_mem_block *xgi_mem_new_node(void)
86 struct xgi_mem_block *block =
87 kmem_cache_alloc(xgi_mem_block_cache, GFP_KERNEL);
90 DRM_ERROR("kmem_cache_alloc failed\n");
96 block->owner = PCIE_INVALID;
97 block->filp = (DRMFILE) -1;
103 struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap,
104 unsigned long originalSize,
105 enum PcieOwner owner)
107 struct xgi_mem_block *block, *free_block, *used_block;
108 unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK;
111 DRM_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n",
115 DRM_ERROR("size == 0\n");
118 DRM_INFO("max_freesize: 0x%lx \n", heap->max_freesize);
119 if (size > heap->max_freesize) {
121 ("size: 0x%lx is bigger than frame buffer total free size: 0x%lx !\n",
122 size, heap->max_freesize);
126 list_for_each_entry(block, &heap->free_list, list) {
127 DRM_INFO("block: 0x%px \n", block);
128 if (size <= block->size) {
133 if (&block->list == &heap->free_list) {
135 ("Can't allocate %ldk size from frame buffer memory !\n",
141 DRM_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n",
142 size, free_block->offset, free_block->size);
144 if (size == free_block->size) {
145 used_block = free_block;
146 DRM_INFO("size == free_block->size: free_block = 0x%p\n",
148 list_del(&free_block->list);
150 used_block = xgi_mem_new_node();
152 if (used_block == NULL)
155 if (used_block == free_block) {
156 DRM_ERROR("used_block == free_block = 0x%p\n",
160 used_block->offset = free_block->offset;
161 used_block->size = size;
163 free_block->offset += size;
164 free_block->size -= size;
167 heap->max_freesize -= size;
169 list_add(&used_block->list, &heap->used_list);
170 used_block->owner = owner;
175 int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset,
178 struct xgi_mem_block *used_block = NULL, *block;
179 struct xgi_mem_block *prev, *next;
184 list_for_each_entry(block, &heap->used_list, list) {
185 if (block->offset == offset) {
190 if (&block->list == &heap->used_list) {
191 DRM_ERROR("can't find block: 0x%lx to free!\n", offset);
195 if (block->filp != filp) {
200 DRM_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx\n",
201 used_block, used_block->offset, used_block->size);
203 heap->max_freesize += used_block->size;
206 upper = used_block->offset + used_block->size;
207 lower = used_block->offset;
209 list_for_each_entry(block, &heap->free_list, list) {
210 if (block->offset == upper) {
212 } else if ((block->offset + block->size) == lower) {
217 DRM_INFO("next = 0x%p, prev = 0x%p\n", next, prev);
218 list_del(&used_block->list);
221 prev->size += (used_block->size + next->size);
222 list_del(&next->list);
223 DRM_INFO("free node 0x%p\n", next);
224 kmem_cache_free(xgi_mem_block_cache, next);
225 kmem_cache_free(xgi_mem_block_cache, used_block);
228 prev->size += used_block->size;
229 DRM_INFO("free node 0x%p\n", used_block);
230 kmem_cache_free(xgi_mem_block_cache, used_block);
233 next->size += used_block->size;
234 next->offset = used_block->offset;
235 DRM_INFO("free node 0x%p\n", used_block);
236 kmem_cache_free(xgi_mem_block_cache, used_block);
239 list_add(&used_block->list, &heap->free_list);
240 DRM_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n",
241 used_block, used_block->offset, used_block->size);
248 int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
251 struct xgi_mem_block *block;
253 if (alloc->is_front) {
254 alloc->location = XGI_MEMLOC_LOCAL;
258 ("Video RAM allocation on front buffer successfully! \n");
261 block = xgi_mem_alloc(&info->fb_heap, alloc->size, PCIE_2D);
265 alloc->location = XGI_MEMLOC_LOCAL;
267 DRM_ERROR("Video RAM allocation failed\n");
270 DRM_INFO("Video RAM allocation succeeded: 0x%p\n",
271 (char *)block->offset);
272 alloc->location = XGI_MEMLOC_LOCAL;
273 alloc->size = block->size;
274 alloc->offset = block->offset;
275 alloc->hw_addr = block->offset;
285 int xgi_fb_alloc_ioctl(DRM_IOCTL_ARGS)
288 struct xgi_mem_alloc alloc;
289 struct xgi_info *info = dev->dev_private;
292 DRM_COPY_FROM_USER_IOCTL(alloc, (struct xgi_mem_alloc __user *) data,
295 err = xgi_fb_alloc(info, & alloc, filp);
300 DRM_COPY_TO_USER_IOCTL((struct xgi_mem_alloc __user *) data,
301 alloc, sizeof(alloc));
307 int xgi_fb_free(struct xgi_info * info, unsigned long offset, DRMFILE filp)
312 DRM_INFO("free onscreen frame buffer successfully !\n");
315 err = xgi_mem_free(&info->fb_heap, offset, filp);
323 int xgi_fb_free_ioctl(DRM_IOCTL_ARGS)
326 struct xgi_info *info = dev->dev_private;
329 DRM_COPY_FROM_USER_IOCTL(offset, (unsigned long __user *) data,
332 return xgi_fb_free(info, offset, filp);
336 int xgi_fb_heap_init(struct xgi_info * info)
338 return xgi_mem_heap_init(&info->fb_heap, XGI_FB_HEAP_START,
343 * Free all blocks associated with a particular file handle.
345 void xgi_fb_free_all(struct xgi_info * info, DRMFILE filp)
347 if (!info->fb_heap.initialized) {
354 struct xgi_mem_block *block;
356 list_for_each_entry(block, &info->fb_heap.used_list, list) {
357 if (block->filp == filp) {
362 if (&block->list == &info->fb_heap.used_list) {
366 (void) xgi_mem_free(&info->fb_heap, block->offset, filp);