1 /* drm_memory.h -- Memory management wrappers for DRM -*- linux-c -*-
2 * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
35 #define malloctype DRM(M_DRM)
36 /* The macros confliced in the MALLOC_DEFINE */
38 MALLOC_DEFINE(malloctype, "drm", "DRM Data Structures");
43 typedef struct drm_mem_stats {
48 unsigned long bytes_allocated;
49 unsigned long bytes_freed;
52 static DRM_SPINTYPE DRM(mem_lock);
53 static unsigned long DRM(ram_available) = 0; /* In pages */
54 static unsigned long DRM(ram_used) = 0;
55 static drm_mem_stats_t DRM(mem_stats)[] = {
56 [DRM_MEM_DMA] = { "dmabufs" },
57 [DRM_MEM_SAREA] = { "sareas" },
58 [DRM_MEM_DRIVER] = { "driver" },
59 [DRM_MEM_MAGIC] = { "magic" },
60 [DRM_MEM_IOCTLS] = { "ioctltab" },
61 [DRM_MEM_MAPS] = { "maplist" },
62 [DRM_MEM_VMAS] = { "vmalist" },
63 [DRM_MEM_BUFS] = { "buflist" },
64 [DRM_MEM_SEGS] = { "seglist" },
65 [DRM_MEM_PAGES] = { "pagelist" },
66 [DRM_MEM_FILES] = { "files" },
67 [DRM_MEM_QUEUES] = { "queues" },
68 [DRM_MEM_CMDS] = { "commands" },
69 [DRM_MEM_MAPPINGS] = { "mappings" },
70 [DRM_MEM_BUFLISTS] = { "buflists" },
71 [DRM_MEM_AGPLISTS] = { "agplist" },
72 [DRM_MEM_SGLISTS] = { "sglist" },
73 [DRM_MEM_TOTALAGP] = { "totalagp" },
74 [DRM_MEM_BOUNDAGP] = { "boundagp" },
75 [DRM_MEM_CTXBITMAP] = { "ctxbitmap"},
76 [DRM_MEM_STUB] = { "stub" },
77 { NULL, 0, } /* Last entry must be null */
80 void DRM(mem_init)(void)
84 DRM_SPININIT(DRM(mem_lock), "drm memory");
86 for (mem = DRM(mem_stats); mem->name; ++mem) {
87 mem->succeed_count = 0;
90 mem->bytes_allocated = 0;
94 DRM(ram_available) = 0; /* si.totalram */
99 /* drm_mem_info is called whenever a process reads /dev/drm/mem. */
100 static int DRM(_mem_info) DRM_SYSCTL_HANDLER_ARGS
106 DRM_SYSCTL_PRINT(" total counts "
107 " | outstanding \n");
108 DRM_SYSCTL_PRINT("type alloc freed fail bytes freed"
109 " | allocs bytes\n\n");
110 DRM_SYSCTL_PRINT("%-9.9s %5d %5d %4d %10lu |\n",
111 "system", 0, 0, 0, DRM(ram_available));
112 DRM_SYSCTL_PRINT("%-9.9s %5d %5d %4d %10lu |\n",
113 "locked", 0, 0, 0, DRM(ram_used));
114 DRM_SYSCTL_PRINT("\n");
115 for (pt = DRM(mem_stats); pt->name; pt++) {
116 DRM_SYSCTL_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
123 pt->succeed_count - pt->free_count,
124 (long)pt->bytes_allocated
125 - (long)pt->bytes_freed);
127 SYSCTL_OUT(req, "", 1);
132 int DRM(mem_info) DRM_SYSCTL_HANDLER_ARGS
136 DRM_SPINLOCK(&DRM(mem_lock));
137 ret = DRM(_mem_info)(oidp, arg1, arg2, req);
138 DRM_SPINUNLOCK(&DRM(mem_lock));
143 void *DRM(alloc)(size_t size, int area)
148 DRM_MEM_ERROR(area, "Allocating 0 bytes\n");
153 if (!(pt = malloc(size, DRM(M_DRM), M_NOWAIT))) {
154 #elif defined(__NetBSD__)
155 if (!(pt = malloc(size, M_DEVBUF, M_NOWAIT))) {
157 DRM_SPINLOCK(&DRM(mem_lock));
158 ++DRM(mem_stats)[area].fail_count;
159 DRM_SPINUNLOCK(&DRM(mem_lock));
162 DRM_SPINLOCK(&DRM(mem_lock));
163 ++DRM(mem_stats)[area].succeed_count;
164 DRM(mem_stats)[area].bytes_allocated += size;
165 DRM_SPINUNLOCK(&DRM(mem_lock));
169 void *DRM(realloc)(void *oldpt, size_t oldsize, size_t size, int area)
173 if (!(pt = DRM(alloc)(size, area))) return NULL;
174 if (oldpt && oldsize) {
175 memcpy(pt, oldpt, oldsize);
176 DRM(free)(oldpt, oldsize, area);
181 char *DRM(strdup)(const char *s, int area)
184 int length = s ? strlen(s) : 0;
186 if (!(pt = DRM(alloc)(length+1, area))) return NULL;
191 void DRM(strfree)(char *s, int area)
197 size = 1 + strlen(s);
198 DRM(free)((void *)s, size, area);
201 void DRM(free)(void *pt, size_t size, int area)
206 if (!pt) DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
209 free(pt, DRM(M_DRM));
210 #elif defined(__NetBSD__)
213 DRM_SPINLOCK(&DRM(mem_lock));
214 DRM(mem_stats)[area].bytes_freed += size;
215 free_count = ++DRM(mem_stats)[area].free_count;
216 alloc_count = DRM(mem_stats)[area].succeed_count;
217 DRM_SPINUNLOCK(&DRM(mem_lock));
218 if (free_count > alloc_count) {
219 DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
220 free_count, alloc_count);
224 void *DRM(ioremap)(unsigned long offset, unsigned long size)
229 DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
230 "Mapping 0 bytes at 0x%08lx\n", offset);
234 if (!(pt = pmap_mapdev(offset, size))) {
235 DRM_SPINLOCK(&DRM(mem_lock));
236 ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
237 DRM_SPINUNLOCK(&DRM(mem_lock));
240 DRM_SPINLOCK(&DRM(mem_lock));
241 ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
242 DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size;
243 DRM_SPINUNLOCK(&DRM(mem_lock));
249 void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size)
254 DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
255 "Mapping 0 bytes at 0x%08lx\n", offset);
260 if (!(pt = ioremap_nocache(offset, size))) {
261 DRM_SPINLOCK(&DRM(mem_lock));
262 ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
263 DRM_SPINUNLOCK(&DRM(mem_lock));
266 DRM_SPINLOCK(&DRM(mem_lock));
267 ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
268 DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size;
269 DRM_SPINUNLOCK(&DRM(mem_lock));
274 void DRM(ioremapfree)(void *pt, unsigned long size)
280 DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
281 "Attempt to free NULL pointer\n");
283 pmap_unmapdev((vm_offset_t) pt, size);
285 DRM_SPINLOCK(&DRM(mem_lock));
286 DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_freed += size;
287 free_count = ++DRM(mem_stats)[DRM_MEM_MAPPINGS].free_count;
288 alloc_count = DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
289 DRM_SPINUNLOCK(&DRM(mem_lock));
290 if (free_count > alloc_count) {
291 DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
292 "Excess frees: %d frees, %d allocs\n",
293 free_count, alloc_count);
297 #if __REALLY_HAVE_AGP
298 agp_memory *DRM(alloc_agp)(int pages, u32 type)
303 DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n");
307 if ((handle = DRM(agp_allocate_memory)(pages, type))) {
308 DRM_SPINLOCK(&DRM(mem_lock));
309 ++DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count;
310 DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_allocated
311 += pages << PAGE_SHIFT;
312 DRM_SPINUNLOCK(&DRM(mem_lock));
315 DRM_SPINLOCK(&DRM(mem_lock));
316 ++DRM(mem_stats)[DRM_MEM_TOTALAGP].fail_count;
317 DRM_SPINUNLOCK(&DRM(mem_lock));
321 int DRM(free_agp)(agp_memory *handle, int pages)
327 DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
328 "Attempt to free NULL AGP handle\n");
329 return DRM_ERR(EINVAL);
332 if (DRM(agp_free_memory)(handle)) {
333 DRM_SPINLOCK(&DRM(mem_lock));
334 free_count = ++DRM(mem_stats)[DRM_MEM_TOTALAGP].free_count;
335 alloc_count = DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count;
336 DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_freed
337 += pages << PAGE_SHIFT;
338 DRM_SPINUNLOCK(&DRM(mem_lock));
339 if (free_count > alloc_count) {
340 DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
341 "Excess frees: %d frees, %d allocs\n",
342 free_count, alloc_count);
346 return DRM_ERR(EINVAL);
349 int DRM(bind_agp)(agp_memory *handle, unsigned int start)
352 device_t dev = DRM_AGP_FIND_DEVICE();
353 struct agp_memory_info info;
359 DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
360 "Attempt to bind NULL AGP handle\n");
361 return DRM_ERR(EINVAL);
364 if (!(retcode = DRM(agp_bind_memory)(handle, start))) {
365 DRM_SPINLOCK(&DRM(mem_lock));
366 ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count;
367 agp_memory_info(dev, handle, &info);
368 DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_allocated
370 DRM_SPINUNLOCK(&DRM(mem_lock));
373 DRM_SPINLOCK(&DRM(mem_lock));
374 ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].fail_count;
375 DRM_SPINUNLOCK(&DRM(mem_lock));
376 return DRM_ERR(retcode);
379 int DRM(unbind_agp)(agp_memory *handle)
383 int retcode = EINVAL;
384 device_t dev = DRM_AGP_FIND_DEVICE();
385 struct agp_memory_info info;
391 DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
392 "Attempt to unbind NULL AGP handle\n");
393 return DRM_ERR(retcode);
396 agp_memory_info(dev, handle, &info);
398 if ((retcode = DRM(agp_unbind_memory)(handle)))
399 return DRM_ERR(retcode);
401 DRM_SPINLOCK(&DRM(mem_lock));
402 free_count = ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].free_count;
403 alloc_count = DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count;
404 DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_freed
406 DRM_SPINUNLOCK(&DRM(mem_lock));
407 if (free_count > alloc_count) {
408 DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
409 "Excess frees: %d frees, %d allocs\n",
410 free_count, alloc_count);
412 return DRM_ERR(retcode);