9397e4dff31b88fd82485f1808a7297421d4c3b5
[platform/upstream/libdrm.git] / bsd-core / drm_bufs.c
1 /* drm_bufs.h -- Generic buffer template -*- linux-c -*-
2  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
3  */
4 /*-
5  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
6  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
7  * All Rights Reserved.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26  * OTHER DEALINGS IN THE SOFTWARE.
27  *
28  * Authors:
29  *    Rickard E. (Rik) Faith <faith@valinux.com>
30  *    Gareth Hughes <gareth@valinux.com>
31  *
32  */
33
34 #include "drmP.h"
35
36 /*
37  * Compute order.  Can be made faster.
38  */
39 int drm_order(unsigned long size)
40 {
41         int order;
42         unsigned long tmp;
43
44         for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
45
46         if ( size & ~(1 << order) )
47                 ++order;
48
49         return order;
50 }
51
52 unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
53 {
54         struct resource *bsr;
55         unsigned long offset;
56
57         resource = resource * 4 + 0x10;
58
59         bsr = bus_alloc_resource_any(dev->device, SYS_RES_MEMORY, &resource,
60             RF_ACTIVE | RF_SHAREABLE);
61         if (bsr == NULL) {
62                 DRM_ERROR("Couldn't find resource 0x%x\n", resource);
63                 return 0;
64         }
65
66         offset = rman_get_start(bsr);
67
68         bus_release_resource(dev->device, SYS_RES_MEMORY, resource, bsr);
69
70         return offset;
71 }
72
73 unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
74 {
75         struct resource *bsr;
76         unsigned long len;
77
78         resource = resource * 4 + 0x10;
79
80         bsr = bus_alloc_resource_any(dev->device, SYS_RES_MEMORY, &resource,
81             RF_ACTIVE | RF_SHAREABLE);
82         if (bsr == NULL) {
83                 DRM_ERROR("Couldn't find resource 0x%x\n", resource);
84                 return ENOMEM;
85         }
86
87         len = rman_get_size(bsr);
88
89         bus_release_resource(dev->device, SYS_RES_MEMORY, resource, bsr);
90
91         return len;
92 }
93
94 int drm_initmap(drm_device_t *dev, unsigned long start, unsigned long len,
95                 unsigned int resource, int type, int flags)
96 {
97         drm_local_map_t *map;
98         struct resource *bsr;
99
100         if (type != _DRM_REGISTERS && type != _DRM_FRAME_BUFFER)
101                 return EINVAL;
102         if (len == 0)
103                 return EINVAL;
104
105         map = malloc(sizeof(*map), M_DRM, M_ZERO | M_NOWAIT);
106         if (map == NULL)
107                 return ENOMEM;
108
109         map->rid = resource * 4 + 0x10;
110         bsr = bus_alloc_resource_any(dev->device, SYS_RES_MEMORY, &map->rid,
111             RF_ACTIVE | RF_SHAREABLE);
112         if (bsr == NULL) {
113                 DRM_ERROR("Couldn't allocate %s resource\n",
114                     ((type == _DRM_REGISTERS) ? "mmio" : "framebuffer"));
115                 free(map, M_DRM);
116                 return ENOMEM;
117         }
118
119         map->kernel_owned = 1;
120         map->type = type;
121         map->flags = flags;
122         map->bsr = bsr;
123         map->bst = rman_get_bustag(bsr);
124         map->bsh = rman_get_bushandle(bsr);
125         map->offset = start;
126         map->size = len;
127
128         if (type == _DRM_REGISTERS)
129                 map->handle = rman_get_virtual(bsr);
130
131         DRM_DEBUG("initmap %d,0x%x@0x%lx/0x%lx\n", map->type, map->flags,
132             map->offset, map->size);
133
134         if (map->flags & _DRM_WRITE_COMBINING) {
135                 int err;
136
137                 err = drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC);
138                 if (err == 0)
139                         map->mtrr = 1;
140         }
141
142         DRM_LOCK();
143         TAILQ_INSERT_TAIL(&dev->maplist, map, link);
144         DRM_UNLOCK();
145
146         return 0;
147 }
148
149 int drm_addmap(DRM_IOCTL_ARGS)
150 {
151         DRM_DEVICE;
152         drm_map_t request;
153         drm_local_map_t *map;
154         
155         if (!(dev->flags & (FREAD|FWRITE)))
156                 return DRM_ERR(EACCES); /* Require read/write */
157
158         DRM_COPY_FROM_USER_IOCTL( request, (drm_map_t *)data, sizeof(drm_map_t) );
159
160         /* Only allow shared memory to be removable since we only keep enough
161          * book keeping information about shared memory to allow for removal
162          * when processes fork.
163          */
164         if ((request.flags & _DRM_REMOVABLE) && request.type != _DRM_SHM)
165                 return EINVAL;
166         if ((request.offset & PAGE_MASK) || (request.size & PAGE_MASK))
167                 return EINVAL;
168         if (request.offset + request.size < request.offset)
169                 return EINVAL;
170
171         DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
172             request.offset, request.size, request.type);
173
174         /* Check if this is just another version of a kernel-allocated map, and
175          * just hand that back if so.
176          */
177         if (request.type == _DRM_REGISTERS || request.type == _DRM_FRAME_BUFFER)
178         {
179                 DRM_LOCK();
180                 TAILQ_FOREACH(map, &dev->maplist, link) {
181                         if (map->kernel_owned && map->type == request.type &&
182                             map->offset == request.offset) {
183                                 /* XXX: this size setting is questionable. */
184                                 map->size = request.size;
185                                 DRM_DEBUG("Found kernel map %d\n", request.type);
186                                 goto done;
187                         }
188                 }
189                 DRM_UNLOCK();
190         }
191
192         /* Allocate a new map structure, fill it in, and do any type-specific
193          * initialization necessary.
194          */
195         map = malloc(sizeof(*map), M_DRM, M_ZERO | M_NOWAIT);
196         if ( !map )
197                 return DRM_ERR(ENOMEM);
198
199         map->offset = request.offset;
200         map->size = request.size;
201         map->type = request.type;
202         map->flags = request.flags;
203
204         switch ( map->type ) {
205         case _DRM_REGISTERS:
206                 map->handle = drm_ioremap(dev, map);
207                 if (!(map->flags & _DRM_WRITE_COMBINING))
208                         break;
209                 /* FALLTHROUGH */
210         case _DRM_FRAME_BUFFER:
211                 if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
212                         map->mtrr = 1;
213                 break;
214         case _DRM_SHM:
215                 map->handle = malloc(map->size, M_DRM, M_NOWAIT);
216                 DRM_DEBUG( "%lu %d %p\n",
217                            map->size, drm_order(map->size), map->handle );
218                 if ( !map->handle ) {
219                         free(map, M_DRM);
220                         return DRM_ERR(ENOMEM);
221                 }
222                 map->offset = (unsigned long)map->handle;
223                 if ( map->flags & _DRM_CONTAINS_LOCK ) {
224                         /* Prevent a 2nd X Server from creating a 2nd lock */
225                         DRM_LOCK();
226                         if (dev->lock.hw_lock != NULL) {
227                                 DRM_UNLOCK();
228                                 free(map->handle, M_DRM);
229                                 free(map, M_DRM);
230                                 return DRM_ERR(EBUSY);
231                         }
232                         dev->lock.hw_lock = map->handle; /* Pointer to lock */
233                         DRM_UNLOCK();
234                 }
235                 break;
236         case _DRM_AGP:
237                 map->offset += dev->agp->base;
238                 map->mtrr   = dev->agp->mtrr; /* for getmap */
239                 break;
240         case _DRM_SCATTER_GATHER:
241                 if (!dev->sg) {
242                         free(map, M_DRM);
243                         return DRM_ERR(EINVAL);
244                 }
245                 map->offset = map->offset + dev->sg->handle;
246                 break;
247         case _DRM_CONSISTENT:
248                 map->dmah = drm_pci_alloc(dev, map->size, map->size,
249                     0xfffffffful);
250                 if (map->dmah == NULL) {
251                         free(map, M_DRM);
252                         return ENOMEM;
253                 }
254                 map->handle = map->dmah->vaddr;
255                 map->offset = map->dmah->busaddr;
256                 break;
257         default:
258                 free(map, M_DRM);
259                 return DRM_ERR(EINVAL);
260         }
261
262         DRM_LOCK();
263         TAILQ_INSERT_TAIL(&dev->maplist, map, link);
264
265 done:
266         /* Jumped to, with lock held, when a kernel map is found. */
267         request.offset = map->offset;
268         request.size = map->size;
269         request.type = map->type;
270         request.flags = map->flags;
271         request.mtrr   = map->mtrr;
272         request.handle = map->handle;
273         DRM_UNLOCK();
274
275         DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", request.type, request.offset, request.size);
276
277         if ( request.type != _DRM_SHM ) {
278                 request.handle = (void *)request.offset;
279         }
280
281         DRM_COPY_TO_USER_IOCTL( (drm_map_t *)data, request, sizeof(drm_map_t) );
282
283         return 0;
284 }
285
286 void drm_remove_map(drm_device_t *dev, drm_local_map_t *map)
287 {
288         DRM_SPINLOCK_ASSERT(&dev->dev_lock);
289
290         TAILQ_REMOVE(&dev->maplist, map, link);
291
292         switch (map->type) {
293         case _DRM_REGISTERS:
294                 if (map->bsr == NULL)
295                         drm_ioremapfree(map);
296                 /* FALLTHROUGH */
297         case _DRM_FRAME_BUFFER:
298                 if (map->mtrr) {
299                         int __unused retcode;
300                         
301                         retcode = drm_mtrr_del(map->offset, map->size,
302                             DRM_MTRR_WC);
303                         DRM_DEBUG("mtrr_del = %d\n", retcode);
304                 }
305                 break;
306         case _DRM_SHM:
307                 free(map->handle, M_DRM);
308                 break;
309         case _DRM_AGP:
310         case _DRM_SCATTER_GATHER:
311                 break;
312         case _DRM_CONSISTENT:
313                 drm_pci_free(dev, map->dmah);
314                 break;
315         }
316
317         if (map->bsr != NULL) {
318                 bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid,
319                     map->bsr);
320         }
321
322         free(map, M_DRM);
323 }
324
325 /* Remove a map private from list and deallocate resources if the mapping
326  * isn't in use.
327  */
328
329 int drm_rmmap(DRM_IOCTL_ARGS)
330 {
331         DRM_DEVICE;
332         drm_local_map_t *map;
333         drm_map_t request;
334
335         DRM_COPY_FROM_USER_IOCTL( request, (drm_map_t *)data, sizeof(request) );
336
337         DRM_LOCK();
338         TAILQ_FOREACH(map, &dev->maplist, link) {
339                 if (map->handle == request.handle &&
340                     map->flags & _DRM_REMOVABLE)
341                         break;
342         }
343
344         /* No match found. */
345         if (map == NULL) {
346                 DRM_UNLOCK();
347                 return DRM_ERR(EINVAL);
348         }
349
350         drm_remove_map(dev, map);
351
352         DRM_UNLOCK();
353
354         return 0;
355 }
356
357
358 static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
359 {
360         int i;
361
362         if (entry->seg_count) {
363                 for (i = 0; i < entry->seg_count; i++) {
364                         drm_pci_free(dev, entry->seglist[i]);
365                 }
366                 free(entry->seglist, M_DRM);
367
368                 entry->seg_count = 0;
369         }
370
371         if (entry->buf_count) {
372                 for (i = 0; i < entry->buf_count; i++) {
373                         free(entry->buflist[i].dev_private, M_DRM);
374                 }
375                 free(entry->buflist, M_DRM);
376
377                 entry->buf_count = 0;
378         }
379 }
380
381 static int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
382 {
383         drm_device_dma_t *dma = dev->dma;
384         drm_buf_entry_t *entry;
385         drm_buf_t *buf;
386         unsigned long offset;
387         unsigned long agp_offset;
388         int count;
389         int order;
390         int size;
391         int alignment;
392         int page_order;
393         int total;
394         int byte_count;
395         int i;
396         drm_buf_t **temp_buflist;
397
398         count = request->count;
399         order = drm_order(request->size);
400         size = 1 << order;
401
402         alignment  = (request->flags & _DRM_PAGE_ALIGN)
403                 ? round_page(size) : size;
404         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
405         total = PAGE_SIZE << page_order;
406
407         byte_count = 0;
408         agp_offset = dev->agp->base + request->agp_start;
409
410         DRM_DEBUG( "count:      %d\n",  count );
411         DRM_DEBUG( "order:      %d\n",  order );
412         DRM_DEBUG( "size:       %d\n",  size );
413         DRM_DEBUG( "agp_offset: 0x%lx\n", agp_offset );
414         DRM_DEBUG( "alignment:  %d\n",  alignment );
415         DRM_DEBUG( "page_order: %d\n",  page_order );
416         DRM_DEBUG( "total:      %d\n",  total );
417
418         entry = &dma->bufs[order];
419
420         entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
421             M_NOWAIT | M_ZERO);
422         if ( !entry->buflist ) {
423                 return DRM_ERR(ENOMEM);
424         }
425
426         entry->buf_size = size;
427         entry->page_order = page_order;
428
429         offset = 0;
430
431         while ( entry->buf_count < count ) {
432                 buf          = &entry->buflist[entry->buf_count];
433                 buf->idx     = dma->buf_count + entry->buf_count;
434                 buf->total   = alignment;
435                 buf->order   = order;
436                 buf->used    = 0;
437
438                 buf->offset  = (dma->byte_count + offset);
439                 buf->bus_address = agp_offset + offset;
440                 buf->address = (void *)(agp_offset + offset);
441                 buf->next    = NULL;
442                 buf->pending = 0;
443                 buf->filp    = NULL;
444
445                 buf->dev_priv_size = dev->dev_priv_size;
446                 buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
447                     M_NOWAIT | M_ZERO);
448                 if (buf->dev_private == NULL) {
449                         /* Set count correctly so we free the proper amount. */
450                         entry->buf_count = count;
451                         drm_cleanup_buf_error(dev, entry);
452                         return DRM_ERR(ENOMEM);
453                 }
454
455                 offset += alignment;
456                 entry->buf_count++;
457                 byte_count += PAGE_SIZE << page_order;
458         }
459
460         DRM_DEBUG( "byte_count: %d\n", byte_count );
461
462         temp_buflist = realloc(dma->buflist,
463             (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
464             M_NOWAIT);
465         if (temp_buflist == NULL) {
466                 /* Free the entry because it isn't valid */
467                 drm_cleanup_buf_error(dev, entry);
468                 return DRM_ERR(ENOMEM);
469         }
470         dma->buflist = temp_buflist;
471
472         for ( i = 0 ; i < entry->buf_count ; i++ ) {
473                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
474         }
475
476         dma->buf_count += entry->buf_count;
477         dma->byte_count += byte_count;
478
479         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
480         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
481
482         request->count = entry->buf_count;
483         request->size = size;
484
485         dma->flags = _DRM_DMA_USE_AGP;
486
487         return 0;
488 }
489
490 static int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
491 {
492         drm_device_dma_t *dma = dev->dma;
493         int count;
494         int order;
495         int size;
496         int total;
497         int page_order;
498         drm_buf_entry_t *entry;
499         drm_buf_t *buf;
500         int alignment;
501         unsigned long offset;
502         int i;
503         int byte_count;
504         int page_count;
505         unsigned long *temp_pagelist;
506         drm_buf_t **temp_buflist;
507
508         count = request->count;
509         order = drm_order(request->size);
510         size = 1 << order;
511
512         DRM_DEBUG( "count=%d, size=%d (%d), order=%d\n",
513                    request->count, request->size, size, order );
514
515         alignment = (request->flags & _DRM_PAGE_ALIGN)
516                 ? round_page(size) : size;
517         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
518         total = PAGE_SIZE << page_order;
519
520         entry = &dma->bufs[order];
521
522         entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
523             M_NOWAIT | M_ZERO);
524         entry->seglist = malloc(count * sizeof(*entry->seglist), M_DRM,
525             M_NOWAIT | M_ZERO);
526
527         /* Keep the original pagelist until we know all the allocations
528          * have succeeded
529          */
530         temp_pagelist = malloc((dma->page_count + (count << page_order)) *
531             sizeof(*dma->pagelist), M_DRM, M_NOWAIT);
532
533         if (entry->buflist == NULL || entry->seglist == NULL || 
534             temp_pagelist == NULL) {
535                 free(entry->buflist, M_DRM);
536                 free(entry->seglist, M_DRM);
537                 return DRM_ERR(ENOMEM);
538         }
539         
540         memcpy(temp_pagelist, dma->pagelist, dma->page_count * 
541             sizeof(*dma->pagelist));
542
543         DRM_DEBUG( "pagelist: %d entries\n",
544                    dma->page_count + (count << page_order) );
545
546         entry->buf_size = size;
547         entry->page_order = page_order;
548         byte_count = 0;
549         page_count = 0;
550
551         while ( entry->buf_count < count ) {
552                 drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment,
553                     0xfffffffful);
554                 if (dmah == NULL) {
555                         /* Set count correctly so we free the proper amount. */
556                         entry->buf_count = count;
557                         entry->seg_count = count;
558                         drm_cleanup_buf_error(dev, entry);
559                         free(temp_pagelist, M_DRM);
560                         return DRM_ERR(ENOMEM);
561                 }
562
563                 entry->seglist[entry->seg_count++] = dmah;
564                 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
565                         DRM_DEBUG( "page %d @ %p\n",
566                                    dma->page_count + page_count,
567                                    (char *)dmah->vaddr + PAGE_SIZE * i );
568                         temp_pagelist[dma->page_count + page_count++] = 
569                             (long)dmah->vaddr + PAGE_SIZE * i;
570                 }
571                 for ( offset = 0 ;
572                       offset + size <= total && entry->buf_count < count ;
573                       offset += alignment, ++entry->buf_count ) {
574                         buf          = &entry->buflist[entry->buf_count];
575                         buf->idx     = dma->buf_count + entry->buf_count;
576                         buf->total   = alignment;
577                         buf->order   = order;
578                         buf->used    = 0;
579                         buf->offset  = (dma->byte_count + byte_count + offset);
580                         buf->address = ((char *)dmah->vaddr + offset);
581                         buf->bus_address = dmah->busaddr + offset;
582                         buf->next    = NULL;
583                         buf->pending = 0;
584                         buf->filp    = NULL;
585
586                         buf->dev_priv_size = dev->dev_priv_size;
587                         buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
588                             M_NOWAIT | M_ZERO);
589                         if (buf->dev_private == NULL) {
590                                 /* Set count correctly so we free the proper amount. */
591                                 entry->buf_count = count;
592                                 entry->seg_count = count;
593                                 drm_cleanup_buf_error(dev, entry);
594                                 free(temp_pagelist, M_DRM);
595                                 return DRM_ERR(ENOMEM);
596                         }
597
598                         DRM_DEBUG( "buffer %d @ %p\n",
599                                    entry->buf_count, buf->address );
600                 }
601                 byte_count += PAGE_SIZE << page_order;
602         }
603
604         temp_buflist = realloc(dma->buflist,
605             (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
606             M_NOWAIT);
607         if (temp_buflist == NULL) {
608                 /* Free the entry because it isn't valid */
609                 drm_cleanup_buf_error(dev, entry);
610                 free(temp_pagelist, M_DRM);
611                 return DRM_ERR(ENOMEM);
612         }
613         dma->buflist = temp_buflist;
614
615         for ( i = 0 ; i < entry->buf_count ; i++ ) {
616                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
617         }
618
619         /* No allocations failed, so now we can replace the orginal pagelist
620          * with the new one.
621          */
622         free(dma->pagelist, M_DRM);
623         dma->pagelist = temp_pagelist;
624
625         dma->buf_count += entry->buf_count;
626         dma->seg_count += entry->seg_count;
627         dma->page_count += entry->seg_count << page_order;
628         dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
629
630         request->count = entry->buf_count;
631         request->size = size;
632
633         return 0;
634
635 }
636
637 static int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
638 {
639         drm_device_dma_t *dma = dev->dma;
640         drm_buf_entry_t *entry;
641         drm_buf_t *buf;
642         unsigned long offset;
643         unsigned long agp_offset;
644         int count;
645         int order;
646         int size;
647         int alignment;
648         int page_order;
649         int total;
650         int byte_count;
651         int i;
652         drm_buf_t **temp_buflist;
653
654         count = request->count;
655         order = drm_order(request->size);
656         size = 1 << order;
657
658         alignment  = (request->flags & _DRM_PAGE_ALIGN)
659                 ? round_page(size) : size;
660         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
661         total = PAGE_SIZE << page_order;
662
663         byte_count = 0;
664         agp_offset = request->agp_start;
665
666         DRM_DEBUG( "count:      %d\n",  count );
667         DRM_DEBUG( "order:      %d\n",  order );
668         DRM_DEBUG( "size:       %d\n",  size );
669         DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
670         DRM_DEBUG( "alignment:  %d\n",  alignment );
671         DRM_DEBUG( "page_order: %d\n",  page_order );
672         DRM_DEBUG( "total:      %d\n",  total );
673
674         entry = &dma->bufs[order];
675
676         entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
677             M_NOWAIT | M_ZERO);
678         if (entry->buflist == NULL)
679                 return DRM_ERR(ENOMEM);
680
681         entry->buf_size = size;
682         entry->page_order = page_order;
683
684         offset = 0;
685
686         while ( entry->buf_count < count ) {
687                 buf          = &entry->buflist[entry->buf_count];
688                 buf->idx     = dma->buf_count + entry->buf_count;
689                 buf->total   = alignment;
690                 buf->order   = order;
691                 buf->used    = 0;
692
693                 buf->offset  = (dma->byte_count + offset);
694                 buf->bus_address = agp_offset + offset;
695                 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
696                 buf->next    = NULL;
697                 buf->pending = 0;
698                 buf->filp    = NULL;
699
700                 buf->dev_priv_size = dev->dev_priv_size;
701                 buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
702                     M_NOWAIT | M_ZERO);
703                 if (buf->dev_private == NULL) {
704                         /* Set count correctly so we free the proper amount. */
705                         entry->buf_count = count;
706                         drm_cleanup_buf_error(dev, entry);
707                         return DRM_ERR(ENOMEM);
708                 }
709
710                 DRM_DEBUG( "buffer %d @ %p\n",
711                            entry->buf_count, buf->address );
712
713                 offset += alignment;
714                 entry->buf_count++;
715                 byte_count += PAGE_SIZE << page_order;
716         }
717
718         DRM_DEBUG( "byte_count: %d\n", byte_count );
719
720         temp_buflist = realloc(dma->buflist,
721             (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
722             M_NOWAIT);
723         if (temp_buflist == NULL) {
724                 /* Free the entry because it isn't valid */
725                 drm_cleanup_buf_error(dev, entry);
726                 return DRM_ERR(ENOMEM);
727         }
728         dma->buflist = temp_buflist;
729
730         for ( i = 0 ; i < entry->buf_count ; i++ ) {
731                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
732         }
733
734         dma->buf_count += entry->buf_count;
735         dma->byte_count += byte_count;
736
737         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
738         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
739
740         request->count = entry->buf_count;
741         request->size = size;
742
743         dma->flags = _DRM_DMA_USE_SG;
744
745         return 0;
746 }
747
748 int drm_addbufs(DRM_IOCTL_ARGS)
749 {
750         DRM_DEVICE;
751         drm_buf_desc_t request;
752         int err;
753         int order;
754
755         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
756
757         if (request.count < 0 || request.count > 4096)
758                 return DRM_ERR(EINVAL);
759
760         order = drm_order(request.size);
761         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
762                 return DRM_ERR(EINVAL);
763
764         DRM_SPINLOCK(&dev->dma_lock);
765         /* No more allocations after first buffer-using ioctl. */
766         if (dev->buf_use != 0) {
767                 DRM_SPINUNLOCK(&dev->dma_lock);
768                 return DRM_ERR(EBUSY);
769         }
770         /* No more than one allocation per order */
771         if (dev->dma->bufs[order].buf_count != 0) {
772                 DRM_SPINUNLOCK(&dev->dma_lock);
773                 return DRM_ERR(ENOMEM);
774         }
775
776         if ( request.flags & _DRM_AGP_BUFFER )
777                 err = drm_addbufs_agp(dev, &request);
778         else
779         if ( request.flags & _DRM_SG_BUFFER )
780                 err = drm_addbufs_sg(dev, &request);
781         else
782                 err = drm_addbufs_pci(dev, &request);
783         DRM_SPINUNLOCK(&dev->dma_lock);
784
785         DRM_COPY_TO_USER_IOCTL((drm_buf_desc_t *)data, request, sizeof(request));
786
787         return err;
788 }
789
790 int drm_infobufs(DRM_IOCTL_ARGS)
791 {
792         DRM_DEVICE;
793         drm_device_dma_t *dma = dev->dma;
794         drm_buf_info_t request;
795         int i;
796         int count;
797         int retcode = 0;
798
799         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_info_t *)data, sizeof(request) );
800
801         DRM_SPINLOCK(&dev->dma_lock);
802         ++dev->buf_use;         /* Can't allocate more after this call */
803         DRM_SPINUNLOCK(&dev->dma_lock);
804
805         for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
806                 if ( dma->bufs[i].buf_count ) ++count;
807         }
808
809         DRM_DEBUG( "count = %d\n", count );
810
811         if ( request.count >= count ) {
812                 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
813                         if ( dma->bufs[i].buf_count ) {
814                                 drm_buf_desc_t from;
815
816                                 from.count = dma->bufs[i].buf_count;
817                                 from.size = dma->bufs[i].buf_size;
818                                 from.low_mark = dma->bufs[i].freelist.low_mark;
819                                 from.high_mark = dma->bufs[i].freelist.high_mark;
820
821                                 if (DRM_COPY_TO_USER(&request.list[count], &from,
822                                     sizeof(drm_buf_desc_t)) != 0) {
823                                         retcode = DRM_ERR(EFAULT);
824                                         break;
825                                 }
826
827                                 DRM_DEBUG( "%d %d %d %d %d\n",
828                                            i,
829                                            dma->bufs[i].buf_count,
830                                            dma->bufs[i].buf_size,
831                                            dma->bufs[i].freelist.low_mark,
832                                            dma->bufs[i].freelist.high_mark );
833                                 ++count;
834                         }
835                 }
836         }
837         request.count = count;
838
839         DRM_COPY_TO_USER_IOCTL( (drm_buf_info_t *)data, request, sizeof(request) );
840
841         return retcode;
842 }
843
844 int drm_markbufs(DRM_IOCTL_ARGS)
845 {
846         DRM_DEVICE;
847         drm_device_dma_t *dma = dev->dma;
848         drm_buf_desc_t request;
849         int order;
850
851         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
852
853         DRM_DEBUG( "%d, %d, %d\n",
854                    request.size, request.low_mark, request.high_mark );
855         
856
857         order = drm_order(request.size);        
858         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
859             request.low_mark < 0 || request.high_mark < 0) {
860                 return DRM_ERR(EINVAL);
861         }
862
863         DRM_SPINLOCK(&dev->dma_lock);
864         if (request.low_mark > dma->bufs[order].buf_count ||
865             request.high_mark > dma->bufs[order].buf_count) {
866                 return DRM_ERR(EINVAL);
867         }
868
869         dma->bufs[order].freelist.low_mark  = request.low_mark;
870         dma->bufs[order].freelist.high_mark = request.high_mark;
871         DRM_SPINUNLOCK(&dev->dma_lock);
872
873         return 0;
874 }
875
876 int drm_freebufs(DRM_IOCTL_ARGS)
877 {
878         DRM_DEVICE;
879         drm_device_dma_t *dma = dev->dma;
880         drm_buf_free_t request;
881         int i;
882         int idx;
883         drm_buf_t *buf;
884         int retcode = 0;
885
886         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_free_t *)data, sizeof(request) );
887
888         DRM_DEBUG( "%d\n", request.count );
889         
890         DRM_SPINLOCK(&dev->dma_lock);
891         for ( i = 0 ; i < request.count ; i++ ) {
892                 if (DRM_COPY_FROM_USER(&idx, &request.list[i], sizeof(idx))) {
893                         retcode = DRM_ERR(EFAULT);
894                         break;
895                 }
896                 if ( idx < 0 || idx >= dma->buf_count ) {
897                         DRM_ERROR( "Index %d (of %d max)\n",
898                                    idx, dma->buf_count - 1 );
899                         retcode = DRM_ERR(EINVAL);
900                         break;
901                 }
902                 buf = dma->buflist[idx];
903                 if ( buf->filp != filp ) {
904                         DRM_ERROR("Process %d freeing buffer not owned\n",
905                                    DRM_CURRENTPID);
906                         retcode = DRM_ERR(EINVAL);
907                         break;
908                 }
909                 drm_free_buffer(dev, buf);
910         }
911         DRM_SPINUNLOCK(&dev->dma_lock);
912
913         return retcode;
914 }
915
916 int drm_mapbufs(DRM_IOCTL_ARGS)
917 {
918         DRM_DEVICE;
919         drm_device_dma_t *dma = dev->dma;
920         int retcode = 0;
921         const int zero = 0;
922         vm_offset_t address;
923         struct vmspace *vms;
924 #ifdef __FreeBSD__
925         vm_ooffset_t foff;
926         vm_size_t size;
927         vm_offset_t vaddr;
928 #elif defined(__NetBSD__) || defined(__OpenBSD__)
929         struct vnode *vn;
930         voff_t foff;
931         vsize_t size;
932         vaddr_t vaddr;
933 #endif /* __NetBSD__ || __OpenBSD__ */
934
935         drm_buf_map_t request;
936         int i;
937
938         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_map_t *)data, sizeof(request) );
939
940 #if defined(__NetBSD__) || defined(__OpenBSD__)
941         if (!vfinddev(kdev, VCHR, &vn))
942                 return 0;       /* FIXME: Shouldn't this be EINVAL or something? */
943 #endif /* __NetBSD__ || __OpenBSD */
944
945 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
946         vms = p->td_proc->p_vmspace;
947 #else
948         vms = p->p_vmspace;
949 #endif
950
951         DRM_SPINLOCK(&dev->dma_lock);
952         dev->buf_use++;         /* Can't allocate more after this call */
953         DRM_SPINUNLOCK(&dev->dma_lock);
954
955         if (request.count < dma->buf_count)
956                 goto done;
957
958         if ((dev->use_agp && (dma->flags & _DRM_DMA_USE_AGP)) ||
959             (dev->use_sg && (dma->flags & _DRM_DMA_USE_SG))) {
960                 drm_local_map_t *map = dev->agp_buffer_map;
961
962                 if (map == NULL) {
963                         retcode = EINVAL;
964                         goto done;
965                 }
966                 size = round_page(map->size);
967                 foff = map->offset;
968         } else {
969                 size = round_page(dma->byte_count),
970                 foff = 0;
971         }
972
973 #ifdef __FreeBSD__
974         vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
975 #if __FreeBSD_version >= 600023
976         retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
977             VM_PROT_ALL, MAP_SHARED, OBJT_DEVICE, kdev, foff );
978 #else
979         retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
980             VM_PROT_ALL, MAP_SHARED, SLIST_FIRST(&kdev->si_hlist), foff );
981 #endif
982 #elif defined(__NetBSD__) || defined(__OpenBSD__)
983         vaddr = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ);
984         retcode = uvm_mmap(&vms->vm_map, &vaddr, size,
985             UVM_PROT_READ | UVM_PROT_WRITE, UVM_PROT_ALL, MAP_SHARED,
986             &vn->v_uobj, foff, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
987 #endif /* __NetBSD__ || __OpenBSD */
988         if (retcode)
989                 goto done;
990
991         request.virtual = (void *)vaddr;
992
993         for ( i = 0 ; i < dma->buf_count ; i++ ) {
994                 if (DRM_COPY_TO_USER(&request.list[i].idx,
995                     &dma->buflist[i]->idx, sizeof(request.list[0].idx))) {
996                         retcode = EFAULT;
997                         goto done;
998                 }
999                 if (DRM_COPY_TO_USER(&request.list[i].total,
1000                     &dma->buflist[i]->total, sizeof(request.list[0].total))) {
1001                         retcode = EFAULT;
1002                         goto done;
1003                 }
1004                 if (DRM_COPY_TO_USER(&request.list[i].used, &zero,
1005                     sizeof(zero))) {
1006                         retcode = EFAULT;
1007                         goto done;
1008                 }
1009                 address = vaddr + dma->buflist[i]->offset; /* *** */
1010                 if (DRM_COPY_TO_USER(&request.list[i].address, &address,
1011                     sizeof(address))) {
1012                         retcode = EFAULT;
1013                         goto done;
1014                 }
1015         }
1016
1017  done:
1018         request.count = dma->buf_count;
1019
1020         DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1021
1022         DRM_COPY_TO_USER_IOCTL((drm_buf_map_t *)data, request, sizeof(request));
1023
1024         return DRM_ERR(retcode);
1025 }