2 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
26 * Rickard E. (Rik) Faith <faith@valinux.com>
27 * Gareth Hughes <gareth@valinux.com>
32 * Implementation of the ioctls for setup of DRM mappings and DMA buffers.
35 #include "dev/pci/pcireg.h"
40 * Compute order. Can be made faster.
42 int drm_order(unsigned long size)
47 for (order = 0, tmp = size; tmp >>= 1; ++order);
49 if (size & ~(1 << order))
55 /* Allocation of PCI memory resources (framebuffer, registers, etc.) for
56 * drm_get_resource_*. Note that they are not RF_ACTIVE, so there's no virtual
57 * address for accessing them. Cleaned up at unload.
59 static int drm_alloc_resource(struct drm_device *dev, int resource)
61 if (resource >= DRM_MAX_PCI_RESOURCE) {
62 DRM_ERROR("Resource %d too large\n", resource);
67 if (dev->pcir[resource] != NULL) {
72 dev->pcirid[resource] = PCIR_BAR(resource);
73 dev->pcir[resource] = bus_alloc_resource_any(dev->device,
74 SYS_RES_MEMORY, &dev->pcirid[resource], RF_SHAREABLE);
77 if (dev->pcir[resource] == NULL) {
78 DRM_ERROR("Couldn't find resource 0x%x\n", resource);
85 unsigned long drm_get_resource_start(struct drm_device *dev,
86 unsigned int resource)
88 if (drm_alloc_resource(dev, resource) != 0)
91 return rman_get_start(dev->pcir[resource]);
94 unsigned long drm_get_resource_len(struct drm_device *dev,
95 unsigned int resource)
97 if (drm_alloc_resource(dev, resource) != 0)
100 return rman_get_size(dev->pcir[resource]);
103 int drm_addmap(struct drm_device * dev, unsigned long offset,
105 enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t **map_ptr)
107 drm_local_map_t *map;
109 /*drm_agp_mem_t *entry;
112 /* Only allow shared memory to be removable since we only keep enough
113 * book keeping information about shared memory to allow for removal
114 * when processes fork.
116 if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
117 DRM_ERROR("Requested removable map for non-DRM_SHM\n");
120 if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
121 DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n",
125 if (offset + size < offset) {
126 DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n",
131 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset,
134 /* Check if this is just another version of a kernel-allocated map, and
135 * just hand that back if so.
137 if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
139 TAILQ_FOREACH(map, &dev->maplist, link) {
140 if (map->type == type && (map->offset == offset ||
141 (map->type == _DRM_SHM &&
142 map->flags == _DRM_CONTAINS_LOCK))) {
144 DRM_DEBUG("Found kernel map %d\n", type);
151 /* Allocate a new map structure, fill it in, and do any type-specific
152 * initialization necessary.
154 map = malloc(sizeof(*map), M_DRM, M_ZERO | M_NOWAIT);
160 map->offset = offset;
167 map->handle = drm_ioremap(dev, map);
168 if (!(map->flags & _DRM_WRITE_COMBINING))
171 case _DRM_FRAME_BUFFER:
172 if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
176 map->handle = malloc(map->size, M_DRM, M_NOWAIT);
177 DRM_DEBUG("%lu %d %p\n",
178 map->size, drm_order(map->size), map->handle);
184 map->offset = (unsigned long)map->handle;
185 if (map->flags & _DRM_CONTAINS_LOCK) {
186 /* Prevent a 2nd X Server from creating a 2nd lock */
188 if (dev->lock.hw_lock != NULL) {
190 free(map->handle, M_DRM);
194 dev->lock.hw_lock = map->handle; /* Pointer to lock */
200 /* In some cases (i810 driver), user space may have already
201 * added the AGP base itself, because dev->agp->base previously
202 * only got set during AGP enable. So, only add the base
203 * address if the map's offset isn't already within the
206 if (map->offset < dev->agp->base ||
207 map->offset > dev->agp->base +
208 dev->agp->info.ai_aperture_size - 1) {
209 map->offset += dev->agp->base;
211 map->mtrr = dev->agp->mtrr; /* for getmap */
212 /*for (entry = dev->agp->memory; entry; entry = entry->next) {
213 if ((map->offset >= entry->bound) &&
214 (map->offset + map->size <=
215 entry->bound + entry->pages * PAGE_SIZE)) {
226 case _DRM_SCATTER_GATHER:
232 map->offset = map->offset + dev->sg->handle;
234 case _DRM_CONSISTENT:
235 /* Unfortunately, we don't get any alignment specification from
236 * the caller, so we have to guess. drm_pci_alloc requires
237 * a power-of-two alignment, so try to align the bus address of
238 * the map to it size if possible, otherwise just assume
239 * PAGE_SIZE alignment.
242 if ((align & (align - 1)) != 0)
244 map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
245 if (map->dmah == NULL) {
250 map->handle = map->dmah->vaddr;
251 map->offset = map->dmah->busaddr;
254 DRM_ERROR("Bad map type %d\n", map->type);
261 TAILQ_INSERT_TAIL(&dev->maplist, map, link);
264 /* Jumped to, with lock held, when a kernel map is found. */
266 DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
274 int drm_addmap_ioctl(struct drm_device *dev, void *data,
275 struct drm_file *file_priv)
277 struct drm_map *request = data;
278 drm_local_map_t *map;
281 if (!(dev->flags & (FREAD|FWRITE)))
282 return EACCES; /* Require read/write */
284 if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP)
288 err = drm_addmap(dev, request->offset, request->size, request->type,
289 request->flags, &map);
294 request->offset = map->offset;
295 request->size = map->size;
296 request->type = map->type;
297 request->flags = map->flags;
298 request->mtrr = map->mtrr;
299 request->handle = map->handle;
301 if (request->type != _DRM_SHM) {
302 request->handle = (void *)request->offset;
308 void drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
310 DRM_SPINLOCK_ASSERT(&dev->dev_lock);
312 TAILQ_REMOVE(&dev->maplist, map, link);
316 if (map->bsr == NULL)
317 drm_ioremapfree(map);
319 case _DRM_FRAME_BUFFER:
321 int __unused retcode;
323 retcode = drm_mtrr_del(0, map->offset, map->size,
325 DRM_DEBUG("mtrr_del = %d\n", retcode);
329 free(map->handle, M_DRM);
332 case _DRM_SCATTER_GATHER:
334 case _DRM_CONSISTENT:
335 drm_pci_free(dev, map->dmah);
338 DRM_ERROR("Bad map type %d\n", map->type);
342 if (map->bsr != NULL) {
343 bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid,
350 /* Remove a map private from list and deallocate resources if the mapping
354 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
355 struct drm_file *file_priv)
357 drm_local_map_t *map;
358 struct drm_map *request = data;
361 TAILQ_FOREACH(map, &dev->maplist, link) {
362 if (map->handle == request->handle &&
363 map->flags & _DRM_REMOVABLE)
367 /* No match found. */
381 static void drm_cleanup_buf_error(struct drm_device *dev,
382 drm_buf_entry_t *entry)
386 if (entry->seg_count) {
387 for (i = 0; i < entry->seg_count; i++) {
388 drm_pci_free(dev, entry->seglist[i]);
390 free(entry->seglist, M_DRM);
392 entry->seg_count = 0;
395 if (entry->buf_count) {
396 for (i = 0; i < entry->buf_count; i++) {
397 free(entry->buflist[i].dev_private, M_DRM);
399 free(entry->buflist, M_DRM);
401 entry->buf_count = 0;
405 static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
407 drm_device_dma_t *dma = dev->dma;
408 drm_buf_entry_t *entry;
409 /*drm_agp_mem_t *agp_entry;
412 unsigned long offset;
413 unsigned long agp_offset;
422 drm_buf_t **temp_buflist;
424 count = request->count;
425 order = drm_order(request->size);
428 alignment = (request->flags & _DRM_PAGE_ALIGN)
429 ? round_page(size) : size;
430 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
431 total = PAGE_SIZE << page_order;
434 agp_offset = dev->agp->base + request->agp_start;
436 DRM_DEBUG("count: %d\n", count);
437 DRM_DEBUG("order: %d\n", order);
438 DRM_DEBUG("size: %d\n", size);
439 DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
440 DRM_DEBUG("alignment: %d\n", alignment);
441 DRM_DEBUG("page_order: %d\n", page_order);
442 DRM_DEBUG("total: %d\n", total);
444 /* Make sure buffers are located in AGP memory that we own */
445 /* Breaks MGA due to drm_alloc_agp not setting up entries for the
446 * memory. Safe to ignore for now because these ioctls are still
450 for (agp_entry = dev->agp->memory; agp_entry;
451 agp_entry = agp_entry->next) {
452 if ((agp_offset >= agp_entry->bound) &&
453 (agp_offset + total * count <=
454 agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
460 DRM_DEBUG("zone invalid\n");
464 entry = &dma->bufs[order];
466 entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
468 if (!entry->buflist) {
472 entry->buf_size = size;
473 entry->page_order = page_order;
477 while (entry->buf_count < count) {
478 buf = &entry->buflist[entry->buf_count];
479 buf->idx = dma->buf_count + entry->buf_count;
480 buf->total = alignment;
484 buf->offset = (dma->byte_count + offset);
485 buf->bus_address = agp_offset + offset;
486 buf->address = (void *)(agp_offset + offset);
489 buf->file_priv = NULL;
491 buf->dev_priv_size = dev->driver->buf_priv_size;
492 buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
494 if (buf->dev_private == NULL) {
495 /* Set count correctly so we free the proper amount. */
496 entry->buf_count = count;
497 drm_cleanup_buf_error(dev, entry);
503 byte_count += PAGE_SIZE << page_order;
506 DRM_DEBUG("byte_count: %d\n", byte_count);
508 temp_buflist = realloc(dma->buflist,
509 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
511 if (temp_buflist == NULL) {
512 /* Free the entry because it isn't valid */
513 drm_cleanup_buf_error(dev, entry);
516 dma->buflist = temp_buflist;
518 for (i = 0; i < entry->buf_count; i++) {
519 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
522 dma->buf_count += entry->buf_count;
523 dma->byte_count += byte_count;
525 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
526 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
528 request->count = entry->buf_count;
529 request->size = size;
531 dma->flags = _DRM_DMA_USE_AGP;
536 static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
538 drm_device_dma_t *dma = dev->dma;
544 drm_buf_entry_t *entry;
547 unsigned long offset;
551 unsigned long *temp_pagelist;
552 drm_buf_t **temp_buflist;
554 count = request->count;
555 order = drm_order(request->size);
558 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
559 request->count, request->size, size, order);
561 alignment = (request->flags & _DRM_PAGE_ALIGN)
562 ? round_page(size) : size;
563 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
564 total = PAGE_SIZE << page_order;
566 entry = &dma->bufs[order];
568 entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
570 entry->seglist = malloc(count * sizeof(*entry->seglist), M_DRM,
573 /* Keep the original pagelist until we know all the allocations
576 temp_pagelist = malloc((dma->page_count + (count << page_order)) *
577 sizeof(*dma->pagelist), M_DRM, M_NOWAIT);
579 if (entry->buflist == NULL || entry->seglist == NULL ||
580 temp_pagelist == NULL) {
581 free(entry->buflist, M_DRM);
582 free(entry->seglist, M_DRM);
586 memcpy(temp_pagelist, dma->pagelist, dma->page_count *
587 sizeof(*dma->pagelist));
589 DRM_DEBUG("pagelist: %d entries\n",
590 dma->page_count + (count << page_order));
592 entry->buf_size = size;
593 entry->page_order = page_order;
597 while (entry->buf_count < count) {
598 DRM_SPINUNLOCK(&dev->dma_lock);
599 drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment,
601 DRM_SPINLOCK(&dev->dma_lock);
603 /* Set count correctly so we free the proper amount. */
604 entry->buf_count = count;
605 entry->seg_count = count;
606 drm_cleanup_buf_error(dev, entry);
607 free(temp_pagelist, M_DRM);
611 entry->seglist[entry->seg_count++] = dmah;
612 for (i = 0; i < (1 << page_order); i++) {
613 DRM_DEBUG("page %d @ %p\n",
614 dma->page_count + page_count,
615 (char *)dmah->vaddr + PAGE_SIZE * i);
616 temp_pagelist[dma->page_count + page_count++] =
617 (long)dmah->vaddr + PAGE_SIZE * i;
620 offset + size <= total && entry->buf_count < count;
621 offset += alignment, ++entry->buf_count) {
622 buf = &entry->buflist[entry->buf_count];
623 buf->idx = dma->buf_count + entry->buf_count;
624 buf->total = alignment;
627 buf->offset = (dma->byte_count + byte_count + offset);
628 buf->address = ((char *)dmah->vaddr + offset);
629 buf->bus_address = dmah->busaddr + offset;
632 buf->file_priv = NULL;
634 buf->dev_priv_size = dev->driver->buf_priv_size;
635 buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
637 if (buf->dev_private == NULL) {
638 /* Set count correctly so we free the proper amount. */
639 entry->buf_count = count;
640 entry->seg_count = count;
641 drm_cleanup_buf_error(dev, entry);
642 free(temp_pagelist, M_DRM);
646 DRM_DEBUG("buffer %d @ %p\n",
647 entry->buf_count, buf->address);
649 byte_count += PAGE_SIZE << page_order;
652 temp_buflist = realloc(dma->buflist,
653 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
655 if (temp_buflist == NULL) {
656 /* Free the entry because it isn't valid */
657 drm_cleanup_buf_error(dev, entry);
658 free(temp_pagelist, M_DRM);
661 dma->buflist = temp_buflist;
663 for (i = 0; i < entry->buf_count; i++) {
664 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
667 /* No allocations failed, so now we can replace the orginal pagelist
670 free(dma->pagelist, M_DRM);
671 dma->pagelist = temp_pagelist;
673 dma->buf_count += entry->buf_count;
674 dma->seg_count += entry->seg_count;
675 dma->page_count += entry->seg_count << page_order;
676 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
678 request->count = entry->buf_count;
679 request->size = size;
685 static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
687 drm_device_dma_t *dma = dev->dma;
688 drm_buf_entry_t *entry;
690 unsigned long offset;
691 unsigned long agp_offset;
700 drm_buf_t **temp_buflist;
702 count = request->count;
703 order = drm_order(request->size);
706 alignment = (request->flags & _DRM_PAGE_ALIGN)
707 ? round_page(size) : size;
708 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
709 total = PAGE_SIZE << page_order;
712 agp_offset = request->agp_start;
714 DRM_DEBUG("count: %d\n", count);
715 DRM_DEBUG("order: %d\n", order);
716 DRM_DEBUG("size: %d\n", size);
717 DRM_DEBUG("agp_offset: %ld\n", agp_offset);
718 DRM_DEBUG("alignment: %d\n", alignment);
719 DRM_DEBUG("page_order: %d\n", page_order);
720 DRM_DEBUG("total: %d\n", total);
722 entry = &dma->bufs[order];
724 entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
726 if (entry->buflist == NULL)
729 entry->buf_size = size;
730 entry->page_order = page_order;
734 while (entry->buf_count < count) {
735 buf = &entry->buflist[entry->buf_count];
736 buf->idx = dma->buf_count + entry->buf_count;
737 buf->total = alignment;
741 buf->offset = (dma->byte_count + offset);
742 buf->bus_address = agp_offset + offset;
743 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
746 buf->file_priv = NULL;
748 buf->dev_priv_size = dev->driver->buf_priv_size;
749 buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
751 if (buf->dev_private == NULL) {
752 /* Set count correctly so we free the proper amount. */
753 entry->buf_count = count;
754 drm_cleanup_buf_error(dev, entry);
758 DRM_DEBUG("buffer %d @ %p\n",
759 entry->buf_count, buf->address);
763 byte_count += PAGE_SIZE << page_order;
766 DRM_DEBUG("byte_count: %d\n", byte_count);
768 temp_buflist = realloc(dma->buflist,
769 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
771 if (temp_buflist == NULL) {
772 /* Free the entry because it isn't valid */
773 drm_cleanup_buf_error(dev, entry);
776 dma->buflist = temp_buflist;
778 for (i = 0; i < entry->buf_count; i++) {
779 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
782 dma->buf_count += entry->buf_count;
783 dma->byte_count += byte_count;
785 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
786 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
788 request->count = entry->buf_count;
789 request->size = size;
791 dma->flags = _DRM_DMA_USE_SG;
796 int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
800 if (request->count < 0 || request->count > 4096)
803 order = drm_order(request->size);
804 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
807 DRM_SPINLOCK(&dev->dma_lock);
809 /* No more allocations after first buffer-using ioctl. */
810 if (dev->buf_use != 0) {
811 DRM_SPINUNLOCK(&dev->dma_lock);
814 /* No more than one allocation per order */
815 if (dev->dma->bufs[order].buf_count != 0) {
816 DRM_SPINUNLOCK(&dev->dma_lock);
820 ret = drm_do_addbufs_agp(dev, request);
822 DRM_SPINUNLOCK(&dev->dma_lock);
827 int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
831 if (!DRM_SUSER(DRM_CURPROC))
834 if (request->count < 0 || request->count > 4096)
837 order = drm_order(request->size);
838 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
841 DRM_SPINLOCK(&dev->dma_lock);
843 /* No more allocations after first buffer-using ioctl. */
844 if (dev->buf_use != 0) {
845 DRM_SPINUNLOCK(&dev->dma_lock);
848 /* No more than one allocation per order */
849 if (dev->dma->bufs[order].buf_count != 0) {
850 DRM_SPINUNLOCK(&dev->dma_lock);
854 ret = drm_do_addbufs_sg(dev, request);
856 DRM_SPINUNLOCK(&dev->dma_lock);
861 int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
865 if (!DRM_SUSER(DRM_CURPROC))
868 if (request->count < 0 || request->count > 4096)
871 order = drm_order(request->size);
872 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
875 DRM_SPINLOCK(&dev->dma_lock);
877 /* No more allocations after first buffer-using ioctl. */
878 if (dev->buf_use != 0) {
879 DRM_SPINUNLOCK(&dev->dma_lock);
882 /* No more than one allocation per order */
883 if (dev->dma->bufs[order].buf_count != 0) {
884 DRM_SPINUNLOCK(&dev->dma_lock);
888 ret = drm_do_addbufs_pci(dev, request);
890 DRM_SPINUNLOCK(&dev->dma_lock);
895 int drm_addbufs_ioctl(struct drm_device *dev, void *data,
896 struct drm_file *file_priv)
898 struct drm_buf_desc *request = data;
901 if (request->flags & _DRM_AGP_BUFFER)
902 err = drm_addbufs_agp(dev, request);
903 else if (request->flags & _DRM_SG_BUFFER)
904 err = drm_addbufs_sg(dev, request);
906 err = drm_addbufs_pci(dev, request);
911 int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
913 drm_device_dma_t *dma = dev->dma;
914 struct drm_buf_info *request = data;
919 DRM_SPINLOCK(&dev->dma_lock);
920 ++dev->buf_use; /* Can't allocate more after this call */
921 DRM_SPINUNLOCK(&dev->dma_lock);
923 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
924 if (dma->bufs[i].buf_count)
928 DRM_DEBUG("count = %d\n", count);
930 if (request->count >= count) {
931 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
932 if (dma->bufs[i].buf_count) {
933 struct drm_buf_desc from;
935 from.count = dma->bufs[i].buf_count;
936 from.size = dma->bufs[i].buf_size;
937 from.low_mark = dma->bufs[i].freelist.low_mark;
938 from.high_mark = dma->bufs[i].freelist.high_mark;
940 if (DRM_COPY_TO_USER(&request->list[count], &from,
941 sizeof(struct drm_buf_desc)) != 0) {
946 DRM_DEBUG("%d %d %d %d %d\n",
947 i, dma->bufs[i].buf_count,
948 dma->bufs[i].buf_size,
949 dma->bufs[i].freelist.low_mark,
950 dma->bufs[i].freelist.high_mark);
955 request->count = count;
960 int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
962 drm_device_dma_t *dma = dev->dma;
963 struct drm_buf_desc *request = data;
966 DRM_DEBUG("%d, %d, %d\n",
967 request->size, request->low_mark, request->high_mark);
970 order = drm_order(request->size);
971 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
972 request->low_mark < 0 || request->high_mark < 0) {
976 DRM_SPINLOCK(&dev->dma_lock);
977 if (request->low_mark > dma->bufs[order].buf_count ||
978 request->high_mark > dma->bufs[order].buf_count) {
979 DRM_SPINUNLOCK(&dev->dma_lock);
983 dma->bufs[order].freelist.low_mark = request->low_mark;
984 dma->bufs[order].freelist.high_mark = request->high_mark;
985 DRM_SPINUNLOCK(&dev->dma_lock);
990 int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
992 drm_device_dma_t *dma = dev->dma;
993 struct drm_buf_free *request = data;
999 DRM_DEBUG("%d\n", request->count);
1001 DRM_SPINLOCK(&dev->dma_lock);
1002 for (i = 0; i < request->count; i++) {
1003 if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
1007 if (idx < 0 || idx >= dma->buf_count) {
1008 DRM_ERROR("Index %d (of %d max)\n",
1009 idx, dma->buf_count - 1);
1013 buf = dma->buflist[idx];
1014 if (buf->file_priv != file_priv) {
1015 DRM_ERROR("Process %d freeing buffer not owned\n",
1020 drm_free_buffer(dev, buf);
1022 DRM_SPINUNLOCK(&dev->dma_lock);
1027 int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
1029 drm_device_dma_t *dma = dev->dma;
1032 vm_offset_t address;
1033 struct vmspace *vms;
1038 #elif defined(__NetBSD__) || defined(__OpenBSD__)
1043 #endif /* __NetBSD__ || __OpenBSD__ */
1045 struct drm_buf_map *request = data;
1048 #if defined(__NetBSD__) || defined(__OpenBSD__)
1049 if (!vfinddev(kdev, VCHR, &vn))
1050 return 0; /* FIXME: Shouldn't this be EINVAL or something? */
1051 #endif /* __NetBSD__ || __OpenBSD */
1053 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
1054 vms = DRM_CURPROC->td_proc->p_vmspace;
1056 vms = DRM_CURPROC->p_vmspace;
1059 DRM_SPINLOCK(&dev->dma_lock);
1060 dev->buf_use++; /* Can't allocate more after this call */
1061 DRM_SPINUNLOCK(&dev->dma_lock);
1063 if (request->count < dma->buf_count)
1066 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
1067 (drm_core_check_feature(dev, DRIVER_SG) &&
1068 (dma->flags & _DRM_DMA_USE_SG))) {
1069 drm_local_map_t *map = dev->agp_buffer_map;
1075 size = round_page(map->size);
1078 size = round_page(dma->byte_count),
1083 vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1084 #if __FreeBSD_version >= 600023
1085 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1086 VM_PROT_ALL, MAP_SHARED, OBJT_DEVICE, dev->devnode, foff);
1088 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1089 VM_PROT_ALL, MAP_SHARED, SLIST_FIRST(&dev->devnode->si_hlist),
1092 #elif defined(__NetBSD__) || defined(__OpenBSD__)
1093 vaddr = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ);
1094 retcode = uvm_mmap(&vms->vm_map, &vaddr, size,
1095 UVM_PROT_READ | UVM_PROT_WRITE, UVM_PROT_ALL, MAP_SHARED,
1096 &vn->v_uobj, foff, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
1097 #endif /* __NetBSD__ || __OpenBSD */
1101 request->virtual = (void *)vaddr;
1103 for (i = 0; i < dma->buf_count; i++) {
1104 if (DRM_COPY_TO_USER(&request->list[i].idx,
1105 &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1109 if (DRM_COPY_TO_USER(&request->list[i].total,
1110 &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1114 if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
1119 address = vaddr + dma->buflist[i]->offset; /* *** */
1120 if (DRM_COPY_TO_USER(&request->list[i].address, &address,
1128 request->count = dma->buf_count;
1130 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);