3 * Generic buffer template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/vmalloc.h>
39 unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
41 return pci_resource_start(dev->pdev, resource);
43 EXPORT_SYMBOL(drm_get_resource_start);
45 unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
47 return pci_resource_len(dev->pdev, resource);
49 EXPORT_SYMBOL(drm_get_resource_len);
51 static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
54 struct list_head *list;
56 list_for_each(list, &dev->maplist->head) {
57 drm_map_list_t *entry = list_entry(list, drm_map_list_t, head);
58 if (entry->map && map->type == entry->map->type &&
59 ((entry->map->offset == map->offset) ||
60 (map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) {
68 int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash,
69 unsigned long user_token, int hashed_handle)
71 int use_hashed_handle;
73 #if (BITS_PER_LONG == 64)
74 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
75 #elif (BITS_PER_LONG == 32)
76 use_hashed_handle = hashed_handle;
78 #error Unsupported long size. Neither 64 nor 32 bits.
81 if (use_hashed_handle) {
82 return drm_ht_just_insert_please(&dev->map_hash, hash,
83 user_token, 32 - PAGE_SHIFT - 3,
84 PAGE_SHIFT, DRM_MAP_HASH_OFFSET);
86 hash->key = user_token;
87 return drm_ht_insert_item(&dev->map_hash, hash);
92 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
94 * \param inode device inode.
95 * \param filp file pointer.
97 * \param arg pointer to a drm_map structure.
98 * \return zero on success or a negative value on error.
100 * Adjusts the memory offset to its absolute value according to the mapping
101 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
102 * applicable and if supported by the kernel.
104 static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
105 unsigned int size, drm_map_type_t type,
106 drm_map_flags_t flags, drm_map_list_t ** maplist)
109 drm_map_list_t *list;
110 drm_dma_handle_t *dmah;
111 unsigned long user_token;
114 map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
118 map->offset = offset;
123 /* Only allow shared memory to be removable since we only keep enough
124 * book keeping information about shared memory to allow for removal
125 * when processes fork.
127 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
128 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
131 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
132 map->offset, map->size, map->type);
133 if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
134 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
142 case _DRM_FRAME_BUFFER:
143 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
144 if (map->offset + (map->size - 1) < map->offset ||
145 map->offset < virt_to_phys(high_memory)) {
146 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
151 map->offset += dev->hose->mem_space->start;
153 /* Some drivers preinitialize some maps, without the X Server
154 * needing to be aware of it. Therefore, we just return success
155 * when the server tries to create a duplicate map.
157 list = drm_find_matching_map(dev, map);
159 if (list->map->size != map->size) {
160 DRM_DEBUG("Matching maps of type %d with "
161 "mismatched sizes, (%ld vs %ld)\n",
162 map->type, map->size,
164 list->map->size = map->size;
167 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
172 if (drm_core_has_MTRR(dev)) {
173 if (map->type == _DRM_FRAME_BUFFER ||
174 (map->flags & _DRM_WRITE_COMBINING)) {
175 map->mtrr = mtrr_add(map->offset, map->size,
176 MTRR_TYPE_WRCOMB, 1);
179 if (map->type == _DRM_REGISTERS)
180 map->handle = drm_ioremap(map->offset, map->size, dev);
183 list = drm_find_matching_map(dev, map);
185 if(list->map->size != map->size) {
186 DRM_DEBUG("Matching maps of type %d with "
187 "mismatched sizes, (%ld vs %ld)\n",
188 map->type, map->size, list->map->size);
189 list->map->size = map->size;
192 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
196 map->handle = vmalloc_32(map->size);
197 DRM_DEBUG("%lu %d %p\n",
198 map->size, drm_order(map->size), map->handle);
200 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
203 map->offset = (unsigned long)map->handle;
204 if (map->flags & _DRM_CONTAINS_LOCK) {
205 /* Prevent a 2nd X Server from creating a 2nd lock */
206 if (dev->lock.hw_lock != NULL) {
208 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
211 dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */
215 drm_agp_mem_t *entry;
218 if (!drm_core_has_AGP(dev)) {
219 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
223 map->offset += dev->hose->mem_space->start;
225 /* Note: dev->agp->base may actually be 0 when the DRM
226 * is not in control of AGP space. But if user space is
227 * it should already have added the AGP base itself.
229 map->offset += dev->agp->base;
230 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
232 /* This assumes the DRM is in total control of AGP space.
233 * It's not always the case as AGP can be in the control
234 * of user space (i.e. i810 driver). So this loop will get
235 * skipped and we double check that dev->agp->memory is
236 * actually set as well as being invalid before EPERM'ing
238 for (entry = dev->agp->memory; entry; entry = entry->next) {
239 if ((map->offset >= entry->bound) &&
240 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
245 if (dev->agp->memory && !valid) {
246 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
249 DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size);
252 case _DRM_SCATTER_GATHER:
254 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
257 map->offset += (unsigned long)dev->sg->virtual;
259 case _DRM_CONSISTENT:
260 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
261 * As we're limiting the address to 2^32-1 (or less),
262 * casting it down to 32 bits is no problem, but we
263 * need to point to a 64bit variable first. */
264 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
266 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
269 map->handle = dmah->vaddr;
270 map->offset = (unsigned long)dmah->busaddr;
274 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
278 list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
280 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
283 memset(list, 0, sizeof(*list));
286 down(&dev->struct_sem);
287 list_add(&list->head, &dev->maplist->head);
289 /* Assign a 32-bit handle */
291 user_token = (map->type == _DRM_SHM) ? (unsigned long) map->handle :
293 ret = drm_map_handle(dev, &list->hash, user_token, FALSE);
296 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
297 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
298 up(&dev->struct_sem);
302 list->user_token = list->hash.key;
303 up(&dev->struct_sem);
309 int drm_addmap(drm_device_t * dev, unsigned int offset,
310 unsigned int size, drm_map_type_t type,
311 drm_map_flags_t flags, drm_local_map_t ** map_ptr)
313 drm_map_list_t *list;
316 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
318 *map_ptr = list->map;
322 EXPORT_SYMBOL(drm_addmap);
324 int drm_addmap_ioctl(struct inode *inode, struct file *filp,
325 unsigned int cmd, unsigned long arg)
327 drm_file_t *priv = filp->private_data;
328 drm_device_t *dev = priv->head->dev;
330 drm_map_list_t *maplist;
331 drm_map_t __user *argp = (void __user *)arg;
334 if (!(filp->f_mode & 3))
335 return -EACCES; /* Require read/write */
337 if (copy_from_user(&map, argp, sizeof(map))) {
341 if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP))
344 err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags,
350 if (copy_to_user(argp, maplist->map, sizeof(drm_map_t)))
353 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
354 if (put_user((void *)(unsigned long)maplist->user_token, &argp->handle))
360 * Remove a map private from list and deallocate resources if the mapping
363 * \param inode device inode.
364 * \param filp file pointer.
365 * \param cmd command.
366 * \param arg pointer to a drm_map_t structure.
367 * \return zero on success or a negative value on error.
369 * Searches the map on drm_device::maplist, removes it from the list, see if
370 * its being used, and free any associate resource (such as MTRR's) if it's not
375 int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
377 struct list_head *list;
378 drm_map_list_t *r_list = NULL;
379 drm_dma_handle_t dmah;
381 /* Find the list entry for the map and remove it */
382 list_for_each(list, &dev->maplist->head) {
383 r_list = list_entry(list, drm_map_list_t, head);
385 if (r_list->map == map) {
387 drm_ht_remove_key(&dev->map_hash, r_list->user_token);
388 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
393 /* List has wrapped around to the head pointer, or it's empty and we
394 * didn't find anything.
396 if (list == (&dev->maplist->head)) {
402 drm_ioremapfree(map->handle, map->size, dev);
404 case _DRM_FRAME_BUFFER:
405 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
407 retcode = mtrr_del(map->mtrr, map->offset, map->size);
408 DRM_DEBUG("mtrr_del=%d\n", retcode);
415 case _DRM_SCATTER_GATHER:
417 case _DRM_CONSISTENT:
418 dmah.vaddr = map->handle;
419 dmah.busaddr = map->offset;
420 dmah.size = map->size;
421 __drm_pci_free(dev, &dmah);
424 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
428 EXPORT_SYMBOL(drm_rmmap_locked);
430 int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
434 down(&dev->struct_sem);
435 ret = drm_rmmap_locked(dev, map);
436 up(&dev->struct_sem);
440 EXPORT_SYMBOL(drm_rmmap);
442 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
443 * the last close of the device, and this is necessary for cleanup when things
444 * exit uncleanly. Therefore, having userland manually remove mappings seems
445 * like a pointless exercise since they're going away anyway.
447 * One use case might be after addmap is allowed for normal users for SHM and
448 * gets used by drivers that the server doesn't need to care about. This seems
451 int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
452 unsigned int cmd, unsigned long arg)
454 drm_file_t *priv = filp->private_data;
455 drm_device_t *dev = priv->head->dev;
457 drm_local_map_t *map = NULL;
458 struct list_head *list;
461 if (copy_from_user(&request, (drm_map_t __user *) arg, sizeof(request))) {
465 down(&dev->struct_sem);
466 list_for_each(list, &dev->maplist->head) {
467 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
470 r_list->user_token == (unsigned long)request.handle &&
471 r_list->map->flags & _DRM_REMOVABLE) {
477 /* List has wrapped around to the head pointer, or its empty we didn't
480 if (list == (&dev->maplist->head)) {
481 up(&dev->struct_sem);
486 up(&dev->struct_sem);
490 /* Register and framebuffer maps are permanent */
491 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
492 up(&dev->struct_sem);
496 ret = drm_rmmap_locked(dev, map);
498 up(&dev->struct_sem);
504 * Cleanup after an error on one of the addbufs() functions.
506 * \param dev DRM device.
507 * \param entry buffer entry where the error occurred.
509 * Frees any pages and buffers associated with the given entry.
511 static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry)
515 if (entry->seg_count) {
516 for (i = 0; i < entry->seg_count; i++) {
517 if (entry->seglist[i]) {
518 drm_pci_free(dev, entry->seglist[i]);
521 drm_free(entry->seglist,
523 sizeof(*entry->seglist), DRM_MEM_SEGS);
525 entry->seg_count = 0;
528 if (entry->buf_count) {
529 for (i = 0; i < entry->buf_count; i++) {
530 if (entry->buflist[i].dev_private) {
531 drm_free(entry->buflist[i].dev_private,
532 entry->buflist[i].dev_priv_size,
536 drm_free(entry->buflist,
538 sizeof(*entry->buflist), DRM_MEM_BUFS);
540 entry->buf_count = 0;
546 * Add AGP buffers for DMA transfers
548 * \param dev drm_device_t to which the buffers are to be added.
549 * \param request pointer to a drm_buf_desc_t describing the request.
550 * \return zero on success or a negative number on failure.
552 * After some sanity checks creates a drm_buf structure for each buffer and
553 * reallocates the buffer list of the same size order to accommodate the new
556 int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
558 drm_device_dma_t *dma = dev->dma;
559 drm_buf_entry_t *entry;
560 drm_agp_mem_t *agp_entry;
562 unsigned long offset;
563 unsigned long agp_offset;
572 drm_buf_t **temp_buflist;
577 count = request->count;
578 order = drm_order(request->size);
581 alignment = (request->flags & _DRM_PAGE_ALIGN)
582 ? PAGE_ALIGN(size) : size;
583 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
584 total = PAGE_SIZE << page_order;
587 agp_offset = dev->agp->base + request->agp_start;
589 DRM_DEBUG("count: %d\n", count);
590 DRM_DEBUG("order: %d\n", order);
591 DRM_DEBUG("size: %d\n", size);
592 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
593 DRM_DEBUG("alignment: %d\n", alignment);
594 DRM_DEBUG("page_order: %d\n", page_order);
595 DRM_DEBUG("total: %d\n", total);
597 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
599 if (dev->queue_count)
600 return -EBUSY; /* Not while in use */
602 /* Make sure buffers are located in AGP memory that we own */
604 for (agp_entry = dev->agp->memory; agp_entry; agp_entry = agp_entry->next) {
605 if ((agp_offset >= agp_entry->bound) &&
606 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
611 if (dev->agp->memory && !valid) {
612 DRM_DEBUG("zone invalid\n");
615 spin_lock(&dev->count_lock);
617 spin_unlock(&dev->count_lock);
620 atomic_inc(&dev->buf_alloc);
621 spin_unlock(&dev->count_lock);
623 down(&dev->struct_sem);
624 entry = &dma->bufs[order];
625 if (entry->buf_count) {
626 up(&dev->struct_sem);
627 atomic_dec(&dev->buf_alloc);
628 return -ENOMEM; /* May only call once for each order */
631 if (count < 0 || count > 4096) {
632 up(&dev->struct_sem);
633 atomic_dec(&dev->buf_alloc);
637 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
639 if (!entry->buflist) {
640 up(&dev->struct_sem);
641 atomic_dec(&dev->buf_alloc);
644 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
646 entry->buf_size = size;
647 entry->page_order = page_order;
651 while (entry->buf_count < count) {
652 buf = &entry->buflist[entry->buf_count];
653 buf->idx = dma->buf_count + entry->buf_count;
654 buf->total = alignment;
658 buf->offset = (dma->byte_count + offset);
659 buf->bus_address = agp_offset + offset;
660 buf->address = (void *)(agp_offset + offset);
664 init_waitqueue_head(&buf->dma_wait);
667 buf->dev_priv_size = dev->driver->dev_priv_size;
668 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
669 if (!buf->dev_private) {
670 /* Set count correctly so we free the proper amount. */
671 entry->buf_count = count;
672 drm_cleanup_buf_error(dev, entry);
673 up(&dev->struct_sem);
674 atomic_dec(&dev->buf_alloc);
677 memset(buf->dev_private, 0, buf->dev_priv_size);
679 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
683 byte_count += PAGE_SIZE << page_order;
686 DRM_DEBUG("byte_count: %d\n", byte_count);
688 temp_buflist = drm_realloc(dma->buflist,
689 dma->buf_count * sizeof(*dma->buflist),
690 (dma->buf_count + entry->buf_count)
691 * sizeof(*dma->buflist), DRM_MEM_BUFS);
693 /* Free the entry because it isn't valid */
694 drm_cleanup_buf_error(dev, entry);
695 up(&dev->struct_sem);
696 atomic_dec(&dev->buf_alloc);
699 dma->buflist = temp_buflist;
701 for (i = 0; i < entry->buf_count; i++) {
702 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
705 dma->buf_count += entry->buf_count;
706 dma->seg_count += entry->seg_count;
707 dma->page_count += byte_count >> PAGE_SHIFT;
708 dma->byte_count += byte_count;
710 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
711 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
713 up(&dev->struct_sem);
715 request->count = entry->buf_count;
716 request->size = size;
718 dma->flags = _DRM_DMA_USE_AGP;
720 atomic_dec(&dev->buf_alloc);
723 EXPORT_SYMBOL(drm_addbufs_agp);
724 #endif /* __OS_HAS_AGP */
726 int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
728 drm_device_dma_t *dma = dev->dma;
734 drm_buf_entry_t *entry;
735 drm_dma_handle_t *dmah;
738 unsigned long offset;
742 unsigned long *temp_pagelist;
743 drm_buf_t **temp_buflist;
745 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
751 if (!capable(CAP_SYS_ADMIN))
754 count = request->count;
755 order = drm_order(request->size);
758 DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
759 request->count, request->size, size, order, dev->queue_count);
761 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
763 if (dev->queue_count)
764 return -EBUSY; /* Not while in use */
766 alignment = (request->flags & _DRM_PAGE_ALIGN)
767 ? PAGE_ALIGN(size) : size;
768 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
769 total = PAGE_SIZE << page_order;
771 spin_lock(&dev->count_lock);
773 spin_unlock(&dev->count_lock);
776 atomic_inc(&dev->buf_alloc);
777 spin_unlock(&dev->count_lock);
779 down(&dev->struct_sem);
780 entry = &dma->bufs[order];
781 if (entry->buf_count) {
782 up(&dev->struct_sem);
783 atomic_dec(&dev->buf_alloc);
784 return -ENOMEM; /* May only call once for each order */
787 if (count < 0 || count > 4096) {
788 up(&dev->struct_sem);
789 atomic_dec(&dev->buf_alloc);
793 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
795 if (!entry->buflist) {
796 up(&dev->struct_sem);
797 atomic_dec(&dev->buf_alloc);
800 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
802 entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
804 if (!entry->seglist) {
805 drm_free(entry->buflist,
806 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
807 up(&dev->struct_sem);
808 atomic_dec(&dev->buf_alloc);
811 memset(entry->seglist, 0, count * sizeof(*entry->seglist));
813 /* Keep the original pagelist until we know all the allocations
816 temp_pagelist = drm_alloc((dma->page_count + (count << page_order))
817 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
818 if (!temp_pagelist) {
819 drm_free(entry->buflist,
820 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
821 drm_free(entry->seglist,
822 count * sizeof(*entry->seglist), DRM_MEM_SEGS);
823 up(&dev->struct_sem);
824 atomic_dec(&dev->buf_alloc);
827 memcpy(temp_pagelist,
828 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
829 DRM_DEBUG("pagelist: %d entries\n",
830 dma->page_count + (count << page_order));
832 entry->buf_size = size;
833 entry->page_order = page_order;
837 while (entry->buf_count < count) {
839 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
842 /* Set count correctly so we free the proper amount. */
843 entry->buf_count = count;
844 entry->seg_count = count;
845 drm_cleanup_buf_error(dev, entry);
846 drm_free(temp_pagelist,
847 (dma->page_count + (count << page_order))
848 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
849 up(&dev->struct_sem);
850 atomic_dec(&dev->buf_alloc);
853 entry->seglist[entry->seg_count++] = dmah;
854 for (i = 0; i < (1 << page_order); i++) {
855 DRM_DEBUG("page %d @ 0x%08lx\n",
856 dma->page_count + page_count,
857 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
858 temp_pagelist[dma->page_count + page_count++]
859 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
862 offset + size <= total && entry->buf_count < count;
863 offset += alignment, ++entry->buf_count) {
864 buf = &entry->buflist[entry->buf_count];
865 buf->idx = dma->buf_count + entry->buf_count;
866 buf->total = alignment;
869 buf->offset = (dma->byte_count + byte_count + offset);
870 buf->address = (void *)(dmah->vaddr + offset);
871 buf->bus_address = dmah->busaddr + offset;
875 init_waitqueue_head(&buf->dma_wait);
878 buf->dev_priv_size = dev->driver->dev_priv_size;
879 buf->dev_private = drm_alloc(buf->dev_priv_size,
881 if (!buf->dev_private) {
882 /* Set count correctly so we free the proper amount. */
883 entry->buf_count = count;
884 entry->seg_count = count;
885 drm_cleanup_buf_error(dev, entry);
886 drm_free(temp_pagelist,
888 (count << page_order))
889 * sizeof(*dma->pagelist),
891 up(&dev->struct_sem);
892 atomic_dec(&dev->buf_alloc);
895 memset(buf->dev_private, 0, buf->dev_priv_size);
897 DRM_DEBUG("buffer %d @ %p\n",
898 entry->buf_count, buf->address);
900 byte_count += PAGE_SIZE << page_order;
903 temp_buflist = drm_realloc(dma->buflist,
904 dma->buf_count * sizeof(*dma->buflist),
905 (dma->buf_count + entry->buf_count)
906 * sizeof(*dma->buflist), DRM_MEM_BUFS);
908 /* Free the entry because it isn't valid */
909 drm_cleanup_buf_error(dev, entry);
910 drm_free(temp_pagelist,
911 (dma->page_count + (count << page_order))
912 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
913 up(&dev->struct_sem);
914 atomic_dec(&dev->buf_alloc);
917 dma->buflist = temp_buflist;
919 for (i = 0; i < entry->buf_count; i++) {
920 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
923 /* No allocations failed, so now we can replace the orginal pagelist
926 if (dma->page_count) {
927 drm_free(dma->pagelist,
928 dma->page_count * sizeof(*dma->pagelist),
931 dma->pagelist = temp_pagelist;
933 dma->buf_count += entry->buf_count;
934 dma->seg_count += entry->seg_count;
935 dma->page_count += entry->seg_count << page_order;
936 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
938 up(&dev->struct_sem);
940 request->count = entry->buf_count;
941 request->size = size;
943 atomic_dec(&dev->buf_alloc);
947 EXPORT_SYMBOL(drm_addbufs_pci);
949 static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
951 drm_device_dma_t *dma = dev->dma;
952 drm_buf_entry_t *entry;
954 unsigned long offset;
955 unsigned long agp_offset;
964 drm_buf_t **temp_buflist;
966 if (!drm_core_check_feature(dev, DRIVER_SG))
972 if (!capable(CAP_SYS_ADMIN))
975 count = request->count;
976 order = drm_order(request->size);
979 alignment = (request->flags & _DRM_PAGE_ALIGN)
980 ? PAGE_ALIGN(size) : size;
981 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
982 total = PAGE_SIZE << page_order;
985 agp_offset = request->agp_start;
987 DRM_DEBUG("count: %d\n", count);
988 DRM_DEBUG("order: %d\n", order);
989 DRM_DEBUG("size: %d\n", size);
990 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
991 DRM_DEBUG("alignment: %d\n", alignment);
992 DRM_DEBUG("page_order: %d\n", page_order);
993 DRM_DEBUG("total: %d\n", total);
995 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
997 if (dev->queue_count)
998 return -EBUSY; /* Not while in use */
1000 spin_lock(&dev->count_lock);
1002 spin_unlock(&dev->count_lock);
1005 atomic_inc(&dev->buf_alloc);
1006 spin_unlock(&dev->count_lock);
1008 down(&dev->struct_sem);
1009 entry = &dma->bufs[order];
1010 if (entry->buf_count) {
1011 up(&dev->struct_sem);
1012 atomic_dec(&dev->buf_alloc);
1013 return -ENOMEM; /* May only call once for each order */
1016 if (count < 0 || count > 4096) {
1017 up(&dev->struct_sem);
1018 atomic_dec(&dev->buf_alloc);
1022 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1024 if (!entry->buflist) {
1025 up(&dev->struct_sem);
1026 atomic_dec(&dev->buf_alloc);
1029 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1031 entry->buf_size = size;
1032 entry->page_order = page_order;
1036 while (entry->buf_count < count) {
1037 buf = &entry->buflist[entry->buf_count];
1038 buf->idx = dma->buf_count + entry->buf_count;
1039 buf->total = alignment;
1043 buf->offset = (dma->byte_count + offset);
1044 buf->bus_address = agp_offset + offset;
1045 buf->address = (void *)(agp_offset + offset
1046 + (unsigned long)dev->sg->virtual);
1050 init_waitqueue_head(&buf->dma_wait);
1053 buf->dev_priv_size = dev->driver->dev_priv_size;
1054 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1055 if (!buf->dev_private) {
1056 /* Set count correctly so we free the proper amount. */
1057 entry->buf_count = count;
1058 drm_cleanup_buf_error(dev, entry);
1059 up(&dev->struct_sem);
1060 atomic_dec(&dev->buf_alloc);
1064 memset(buf->dev_private, 0, buf->dev_priv_size);
1066 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1068 offset += alignment;
1070 byte_count += PAGE_SIZE << page_order;
1073 DRM_DEBUG("byte_count: %d\n", byte_count);
1075 temp_buflist = drm_realloc(dma->buflist,
1076 dma->buf_count * sizeof(*dma->buflist),
1077 (dma->buf_count + entry->buf_count)
1078 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1079 if (!temp_buflist) {
1080 /* Free the entry because it isn't valid */
1081 drm_cleanup_buf_error(dev, entry);
1082 up(&dev->struct_sem);
1083 atomic_dec(&dev->buf_alloc);
1086 dma->buflist = temp_buflist;
1088 for (i = 0; i < entry->buf_count; i++) {
1089 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1092 dma->buf_count += entry->buf_count;
1093 dma->seg_count += entry->seg_count;
1094 dma->page_count += byte_count >> PAGE_SHIFT;
1095 dma->byte_count += byte_count;
1097 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1098 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1100 up(&dev->struct_sem);
1102 request->count = entry->buf_count;
1103 request->size = size;
1105 dma->flags = _DRM_DMA_USE_SG;
1107 atomic_dec(&dev->buf_alloc);
1111 int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
1113 drm_device_dma_t *dma = dev->dma;
1114 drm_buf_entry_t *entry;
1116 unsigned long offset;
1117 unsigned long agp_offset;
1126 drm_buf_t **temp_buflist;
1128 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1134 if (!capable(CAP_SYS_ADMIN))
1137 count = request->count;
1138 order = drm_order(request->size);
1141 alignment = (request->flags & _DRM_PAGE_ALIGN)
1142 ? PAGE_ALIGN(size) : size;
1143 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1144 total = PAGE_SIZE << page_order;
1147 agp_offset = request->agp_start;
1149 DRM_DEBUG("count: %d\n", count);
1150 DRM_DEBUG("order: %d\n", order);
1151 DRM_DEBUG("size: %d\n", size);
1152 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1153 DRM_DEBUG("alignment: %d\n", alignment);
1154 DRM_DEBUG("page_order: %d\n", page_order);
1155 DRM_DEBUG("total: %d\n", total);
1157 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1159 if (dev->queue_count)
1160 return -EBUSY; /* Not while in use */
1162 spin_lock(&dev->count_lock);
1164 spin_unlock(&dev->count_lock);
1167 atomic_inc(&dev->buf_alloc);
1168 spin_unlock(&dev->count_lock);
1170 down(&dev->struct_sem);
1171 entry = &dma->bufs[order];
1172 if (entry->buf_count) {
1173 up(&dev->struct_sem);
1174 atomic_dec(&dev->buf_alloc);
1175 return -ENOMEM; /* May only call once for each order */
1178 if (count < 0 || count > 4096) {
1179 up(&dev->struct_sem);
1180 atomic_dec(&dev->buf_alloc);
1184 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1186 if (!entry->buflist) {
1187 up(&dev->struct_sem);
1188 atomic_dec(&dev->buf_alloc);
1191 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1193 entry->buf_size = size;
1194 entry->page_order = page_order;
1198 while (entry->buf_count < count) {
1199 buf = &entry->buflist[entry->buf_count];
1200 buf->idx = dma->buf_count + entry->buf_count;
1201 buf->total = alignment;
1205 buf->offset = (dma->byte_count + offset);
1206 buf->bus_address = agp_offset + offset;
1207 buf->address = (void *)(agp_offset + offset);
1211 init_waitqueue_head(&buf->dma_wait);
1214 buf->dev_priv_size = dev->driver->dev_priv_size;
1215 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1216 if (!buf->dev_private) {
1217 /* Set count correctly so we free the proper amount. */
1218 entry->buf_count = count;
1219 drm_cleanup_buf_error(dev, entry);
1220 up(&dev->struct_sem);
1221 atomic_dec(&dev->buf_alloc);
1224 memset(buf->dev_private, 0, buf->dev_priv_size);
1226 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1228 offset += alignment;
1230 byte_count += PAGE_SIZE << page_order;
1233 DRM_DEBUG("byte_count: %d\n", byte_count);
1235 temp_buflist = drm_realloc(dma->buflist,
1236 dma->buf_count * sizeof(*dma->buflist),
1237 (dma->buf_count + entry->buf_count)
1238 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1239 if (!temp_buflist) {
1240 /* Free the entry because it isn't valid */
1241 drm_cleanup_buf_error(dev, entry);
1242 up(&dev->struct_sem);
1243 atomic_dec(&dev->buf_alloc);
1246 dma->buflist = temp_buflist;
1248 for (i = 0; i < entry->buf_count; i++) {
1249 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1252 dma->buf_count += entry->buf_count;
1253 dma->seg_count += entry->seg_count;
1254 dma->page_count += byte_count >> PAGE_SHIFT;
1255 dma->byte_count += byte_count;
1257 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1258 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1260 up(&dev->struct_sem);
1262 request->count = entry->buf_count;
1263 request->size = size;
1265 dma->flags = _DRM_DMA_USE_FB;
1267 atomic_dec(&dev->buf_alloc);
1270 EXPORT_SYMBOL(drm_addbufs_fb);
1274 * Add buffers for DMA transfers (ioctl).
1276 * \param inode device inode.
1277 * \param filp file pointer.
1278 * \param cmd command.
1279 * \param arg pointer to a drm_buf_desc_t request.
1280 * \return zero on success or a negative number on failure.
1282 * According with the memory type specified in drm_buf_desc::flags and the
1283 * build options, it dispatches the call either to addbufs_agp(),
1284 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1285 * PCI memory respectively.
1287 int drm_addbufs(struct inode *inode, struct file *filp,
1288 unsigned int cmd, unsigned long arg)
1290 drm_buf_desc_t request;
1291 drm_file_t *priv = filp->private_data;
1292 drm_device_t *dev = priv->head->dev;
1295 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1298 if (copy_from_user(&request, (drm_buf_desc_t __user *) arg,
1303 if (request.flags & _DRM_AGP_BUFFER)
1304 ret = drm_addbufs_agp(dev, &request);
1307 if (request.flags & _DRM_SG_BUFFER)
1308 ret = drm_addbufs_sg(dev, &request);
1309 else if (request.flags & _DRM_FB_BUFFER)
1310 ret = drm_addbufs_fb(dev, &request);
1312 ret = drm_addbufs_pci(dev, &request);
1315 if (copy_to_user((void __user *) arg, &request,
1324 * Get information about the buffer mappings.
1326 * This was originally mean for debugging purposes, or by a sophisticated
1327 * client library to determine how best to use the available buffers (e.g.,
1328 * large buffers can be used for image transfer).
1330 * \param inode device inode.
1331 * \param filp file pointer.
1332 * \param cmd command.
1333 * \param arg pointer to a drm_buf_info structure.
1334 * \return zero on success or a negative number on failure.
1336 * Increments drm_device::buf_use while holding the drm_device::count_lock
1337 * lock, preventing of allocating more buffers after this call. Information
1338 * about each requested buffer is then copied into user space.
1340 int drm_infobufs(struct inode *inode, struct file *filp,
1341 unsigned int cmd, unsigned long arg)
1343 drm_file_t *priv = filp->private_data;
1344 drm_device_t *dev = priv->head->dev;
1345 drm_device_dma_t *dma = dev->dma;
1346 drm_buf_info_t request;
1347 drm_buf_info_t __user *argp = (void __user *)arg;
1351 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1357 spin_lock(&dev->count_lock);
1358 if (atomic_read(&dev->buf_alloc)) {
1359 spin_unlock(&dev->count_lock);
1362 ++dev->buf_use; /* Can't allocate more after this call */
1363 spin_unlock(&dev->count_lock);
1365 if (copy_from_user(&request, argp, sizeof(request)))
1368 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1369 if (dma->bufs[i].buf_count)
1373 DRM_DEBUG("count = %d\n", count);
1375 if (request.count >= count) {
1376 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1377 if (dma->bufs[i].buf_count) {
1378 drm_buf_desc_t __user *to =
1379 &request.list[count];
1380 drm_buf_entry_t *from = &dma->bufs[i];
1381 drm_freelist_t *list = &dma->bufs[i].freelist;
1382 if (copy_to_user(&to->count,
1384 sizeof(from->buf_count)) ||
1385 copy_to_user(&to->size,
1387 sizeof(from->buf_size)) ||
1388 copy_to_user(&to->low_mark,
1390 sizeof(list->low_mark)) ||
1391 copy_to_user(&to->high_mark,
1393 sizeof(list->high_mark)))
1396 DRM_DEBUG("%d %d %d %d %d\n",
1398 dma->bufs[i].buf_count,
1399 dma->bufs[i].buf_size,
1400 dma->bufs[i].freelist.low_mark,
1401 dma->bufs[i].freelist.high_mark);
1406 request.count = count;
1408 if (copy_to_user(argp, &request, sizeof(request)))
1415 * Specifies a low and high water mark for buffer allocation
1417 * \param inode device inode.
1418 * \param filp file pointer.
1419 * \param cmd command.
1420 * \param arg a pointer to a drm_buf_desc structure.
1421 * \return zero on success or a negative number on failure.
1423 * Verifies that the size order is bounded between the admissible orders and
1424 * updates the respective drm_device_dma::bufs entry low and high water mark.
1426 * \note This ioctl is deprecated and mostly never used.
1428 int drm_markbufs(struct inode *inode, struct file *filp,
1429 unsigned int cmd, unsigned long arg)
1431 drm_file_t *priv = filp->private_data;
1432 drm_device_t *dev = priv->head->dev;
1433 drm_device_dma_t *dma = dev->dma;
1434 drm_buf_desc_t request;
1436 drm_buf_entry_t *entry;
1438 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1444 if (copy_from_user(&request,
1445 (drm_buf_desc_t __user *) arg, sizeof(request)))
1448 DRM_DEBUG("%d, %d, %d\n",
1449 request.size, request.low_mark, request.high_mark);
1450 order = drm_order(request.size);
1451 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1453 entry = &dma->bufs[order];
1455 if (request.low_mark < 0 || request.low_mark > entry->buf_count)
1457 if (request.high_mark < 0 || request.high_mark > entry->buf_count)
1460 entry->freelist.low_mark = request.low_mark;
1461 entry->freelist.high_mark = request.high_mark;
1467 * Unreserve the buffers in list, previously reserved using drmDMA.
1469 * \param inode device inode.
1470 * \param filp file pointer.
1471 * \param cmd command.
1472 * \param arg pointer to a drm_buf_free structure.
1473 * \return zero on success or a negative number on failure.
1475 * Calls free_buffer() for each used buffer.
1476 * This function is primarily used for debugging.
1478 int drm_freebufs(struct inode *inode, struct file *filp,
1479 unsigned int cmd, unsigned long arg)
1481 drm_file_t *priv = filp->private_data;
1482 drm_device_t *dev = priv->head->dev;
1483 drm_device_dma_t *dma = dev->dma;
1484 drm_buf_free_t request;
1489 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1495 if (copy_from_user(&request,
1496 (drm_buf_free_t __user *) arg, sizeof(request)))
1499 DRM_DEBUG("%d\n", request.count);
1500 for (i = 0; i < request.count; i++) {
1501 if (copy_from_user(&idx, &request.list[i], sizeof(idx)))
1503 if (idx < 0 || idx >= dma->buf_count) {
1504 DRM_ERROR("Index %d (of %d max)\n",
1505 idx, dma->buf_count - 1);
1508 buf = dma->buflist[idx];
1509 if (buf->filp != filp) {
1510 DRM_ERROR("Process %d freeing buffer not owned\n",
1514 drm_free_buffer(dev, buf);
1521 * Maps all of the DMA buffers into client-virtual space (ioctl).
1523 * \param inode device inode.
1524 * \param filp file pointer.
1525 * \param cmd command.
1526 * \param arg pointer to a drm_buf_map structure.
1527 * \return zero on success or a negative number on failure.
1529 * Maps the AGP or SG buffer region with do_mmap(), and copies information
1530 * about each buffer into user space. The PCI buffers are already mapped on the
1531 * addbufs_pci() call.
1533 int drm_mapbufs(struct inode *inode, struct file *filp,
1534 unsigned int cmd, unsigned long arg)
1536 drm_file_t *priv = filp->private_data;
1537 drm_device_t *dev = priv->head->dev;
1538 drm_device_dma_t *dma = dev->dma;
1539 drm_buf_map_t __user *argp = (void __user *)arg;
1542 unsigned long virtual;
1543 unsigned long address;
1544 drm_buf_map_t request;
1547 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1553 spin_lock(&dev->count_lock);
1554 if (atomic_read(&dev->buf_alloc)) {
1555 spin_unlock(&dev->count_lock);
1558 dev->buf_use++; /* Can't allocate more after this call */
1559 spin_unlock(&dev->count_lock);
1561 if (copy_from_user(&request, argp, sizeof(request)))
1564 if (request.count >= dma->buf_count) {
1565 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1566 || (drm_core_check_feature(dev, DRIVER_SG)
1567 && (dma->flags & _DRM_DMA_USE_SG))
1568 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1569 && (dma->flags & _DRM_DMA_USE_FB))) {
1570 drm_map_t *map = dev->agp_buffer_map;
1571 unsigned long token = dev->agp_buffer_token;
1577 down_write(¤t->mm->mmap_sem);
1578 virtual = do_mmap(filp, 0, map->size,
1579 PROT_READ | PROT_WRITE,
1582 up_write(¤t->mm->mmap_sem);
1584 down_write(¤t->mm->mmap_sem);
1585 virtual = do_mmap(filp, 0, dma->byte_count,
1586 PROT_READ | PROT_WRITE,
1588 up_write(¤t->mm->mmap_sem);
1590 if (virtual > -1024UL) {
1592 retcode = (signed long)virtual;
1595 request.virtual = (void __user *)virtual;
1597 for (i = 0; i < dma->buf_count; i++) {
1598 if (copy_to_user(&request.list[i].idx,
1599 &dma->buflist[i]->idx,
1600 sizeof(request.list[0].idx))) {
1604 if (copy_to_user(&request.list[i].total,
1605 &dma->buflist[i]->total,
1606 sizeof(request.list[0].total))) {
1610 if (copy_to_user(&request.list[i].used,
1611 &zero, sizeof(zero))) {
1615 address = virtual + dma->buflist[i]->offset; /* *** */
1616 if (copy_to_user(&request.list[i].address,
1617 &address, sizeof(address))) {
1624 request.count = dma->buf_count;
1625 DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
1627 if (copy_to_user(argp, &request, sizeof(request)))
1634 * Compute size order. Returns the exponent of the smaller power of two which
1635 * is greater or equal to given number.
1640 * \todo Can be made faster.
1642 int drm_order(unsigned long size)
1647 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1649 if (size & (size - 1))
1654 EXPORT_SYMBOL(drm_order);