3 * Generic buffer template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/vmalloc.h>
39 unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource)
41 return pci_resource_start(dev->pdev, resource);
43 EXPORT_SYMBOL(drm_get_resource_start);
45 unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource)
47 return pci_resource_len(dev->pdev, resource);
49 EXPORT_SYMBOL(drm_get_resource_len);
51 struct drm_map_list *drm_find_matching_map(struct drm_device *dev, drm_local_map_t *map)
53 struct drm_map_list *entry;
54 list_for_each_entry(entry, &dev->maplist, head) {
55 if (entry->map && map->type == entry->map->type &&
56 ((entry->map->offset == map->offset) ||
57 (map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) {
64 EXPORT_SYMBOL(drm_find_matching_map);
66 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
67 unsigned long user_token, int hashed_handle)
69 int use_hashed_handle;
71 #if (BITS_PER_LONG == 64)
72 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
73 #elif (BITS_PER_LONG == 32)
74 use_hashed_handle = hashed_handle;
76 #error Unsupported long size. Neither 64 nor 32 bits.
79 if (!use_hashed_handle) {
81 hash->key = user_token >> PAGE_SHIFT;
82 ret = drm_ht_insert_item(&dev->map_hash, hash);
86 return drm_ht_just_insert_please(&dev->map_hash, hash,
87 user_token, 32 - PAGE_SHIFT - 3,
88 0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT);
92 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
94 * \param inode device inode.
95 * \param file_priv DRM file private.
97 * \param arg pointer to a drm_map structure.
98 * \return zero on success or a negative value on error.
100 * Adjusts the memory offset to its absolute value according to the mapping
101 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
102 * applicable and if supported by the kernel.
104 static int drm_addmap_core(struct drm_device *dev, unsigned int offset,
105 unsigned int size, enum drm_map_type type,
106 enum drm_map_flags flags,
107 struct drm_map_list **maplist)
110 struct drm_map_list *list;
111 drm_dma_handle_t *dmah;
112 unsigned long user_token;
115 map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
119 map->offset = offset;
124 /* Only allow shared memory to be removable since we only keep enough
125 * book keeping information about shared memory to allow for removal
126 * when processes fork.
128 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
129 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
132 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
133 map->offset, map->size, map->type);
134 if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
135 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
143 case _DRM_FRAME_BUFFER:
144 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
145 if (map->offset + (map->size - 1) < map->offset ||
146 map->offset < virt_to_phys(high_memory)) {
147 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
152 map->offset += dev->hose->mem_space->start;
154 /* Some drivers preinitialize some maps, without the X Server
155 * needing to be aware of it. Therefore, we just return success
156 * when the server tries to create a duplicate map.
158 list = drm_find_matching_map(dev, map);
160 if (list->map->size != map->size) {
161 DRM_DEBUG("Matching maps of type %d with "
162 "mismatched sizes, (%ld vs %ld)\n",
163 map->type, map->size,
165 list->map->size = map->size;
168 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
173 if (drm_core_has_MTRR(dev)) {
174 if (map->type == _DRM_FRAME_BUFFER ||
175 (map->flags & _DRM_WRITE_COMBINING)) {
176 map->mtrr = mtrr_add(map->offset, map->size,
177 MTRR_TYPE_WRCOMB, 1);
180 if (map->type == _DRM_REGISTERS) {
181 map->handle = ioremap(map->offset, map->size);
183 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
189 list = drm_find_matching_map(dev, map);
191 if(list->map->size != map->size) {
192 DRM_DEBUG("Matching maps of type %d with "
193 "mismatched sizes, (%ld vs %ld)\n",
194 map->type, map->size, list->map->size);
195 list->map->size = map->size;
198 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
202 map->handle = vmalloc_user(map->size);
203 DRM_DEBUG("%lu %d %p\n",
204 map->size, drm_order(map->size), map->handle);
206 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
209 map->offset = (unsigned long)map->handle;
210 if (map->flags & _DRM_CONTAINS_LOCK) {
211 /* Prevent a 2nd X Server from creating a 2nd lock */
212 if (dev->lock.hw_lock != NULL) {
214 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
217 dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */
221 struct drm_agp_mem *entry;
224 if (!drm_core_has_AGP(dev)) {
225 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
229 map->offset += dev->hose->mem_space->start;
231 /* In some cases (i810 driver), user space may have already
232 * added the AGP base itself, because dev->agp->base previously
233 * only got set during AGP enable. So, only add the base
234 * address if the map's offset isn't already within the
237 if (map->offset < dev->agp->base ||
238 map->offset > dev->agp->base +
239 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
240 map->offset += dev->agp->base;
242 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
244 /* This assumes the DRM is in total control of AGP space.
245 * It's not always the case as AGP can be in the control
246 * of user space (i.e. i810 driver). So this loop will get
247 * skipped and we double check that dev->agp->memory is
248 * actually set as well as being invalid before EPERM'ing
250 list_for_each_entry(entry, &dev->agp->memory, head) {
251 if ((map->offset >= entry->bound) &&
252 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
257 if (!list_empty(&dev->agp->memory) && !valid) {
258 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
261 DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size);
264 case _DRM_SCATTER_GATHER:
266 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
269 map->offset += (unsigned long)dev->sg->virtual;
271 case _DRM_CONSISTENT:
272 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
273 * As we're limiting the address to 2^32-1 (or less),
274 * casting it down to 32 bits is no problem, but we
275 * need to point to a 64bit variable first. */
276 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
278 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
281 map->handle = dmah->vaddr;
282 map->offset = (unsigned long)dmah->busaddr;
286 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
290 list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
292 if (map->type == _DRM_REGISTERS)
293 iounmap(map->handle);
294 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
297 memset(list, 0, sizeof(*list));
300 mutex_lock(&dev->struct_mutex);
301 list_add(&list->head, &dev->maplist);
303 /* Assign a 32-bit handle */
305 user_token = (map->type == _DRM_SHM) ? (unsigned long) map->handle :
307 ret = drm_map_handle(dev, &list->hash, user_token, 0);
310 if (map->type == _DRM_REGISTERS)
311 iounmap(map->handle);
312 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
313 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
314 mutex_unlock(&dev->struct_mutex);
318 list->user_token = list->hash.key << PAGE_SHIFT;
319 mutex_unlock(&dev->struct_mutex);
325 int drm_addmap(struct drm_device *dev, unsigned int offset,
326 unsigned int size, enum drm_map_type type,
327 enum drm_map_flags flags, drm_local_map_t ** map_ptr)
329 struct drm_map_list *list;
332 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
334 *map_ptr = list->map;
338 EXPORT_SYMBOL(drm_addmap);
340 int drm_addmap_ioctl(struct drm_device *dev, void *data,
341 struct drm_file *file_priv)
343 struct drm_map *map = data;
344 struct drm_map_list *maplist;
347 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP))
350 err = drm_addmap_core(dev, map->offset, map->size, map->type,
351 map->flags, &maplist);
356 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
357 map->handle = (void *)(unsigned long)maplist->user_token;
362 * Remove a map private from list and deallocate resources if the mapping
365 * \param inode device inode.
366 * \param file_priv DRM file private.
367 * \param cmd command.
368 * \param arg pointer to a struct drm_map structure.
369 * \return zero on success or a negative value on error.
371 * Searches the map on drm_device::maplist, removes it from the list, see if
372 * its being used, and free any associate resource (such as MTRR's) if it's not
377 int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
379 struct drm_map_list *r_list = NULL, *list_t;
380 drm_dma_handle_t dmah;
383 /* Find the list entry for the map and remove it */
384 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
385 if (r_list->map == map) {
386 list_del(&r_list->head);
387 drm_ht_remove_key(&dev->map_hash,
388 r_list->user_token >> PAGE_SHIFT);
389 drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
398 /* List has wrapped around to the head pointer, or it's empty and we
399 * didn't find anything.
404 iounmap(map->handle);
406 case _DRM_FRAME_BUFFER:
407 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
409 retcode = mtrr_del(map->mtrr, map->offset, map->size);
410 DRM_DEBUG("mtrr_del=%d\n", retcode);
417 case _DRM_SCATTER_GATHER:
419 case _DRM_CONSISTENT:
420 dmah.vaddr = map->handle;
421 dmah.busaddr = map->offset;
422 dmah.size = map->size;
423 __drm_pci_free(dev, &dmah);
428 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
432 EXPORT_SYMBOL(drm_rmmap_locked);
434 int drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
438 mutex_lock(&dev->struct_mutex);
439 ret = drm_rmmap_locked(dev, map);
440 mutex_unlock(&dev->struct_mutex);
444 EXPORT_SYMBOL(drm_rmmap);
446 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
447 * the last close of the device, and this is necessary for cleanup when things
448 * exit uncleanly. Therefore, having userland manually remove mappings seems
449 * like a pointless exercise since they're going away anyway.
451 * One use case might be after addmap is allowed for normal users for SHM and
452 * gets used by drivers that the server doesn't need to care about. This seems
455 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
456 struct drm_file *file_priv)
458 struct drm_map *request = data;
459 drm_local_map_t *map = NULL;
460 struct drm_map_list *r_list;
463 mutex_lock(&dev->struct_mutex);
464 list_for_each_entry(r_list, &dev->maplist, head) {
466 r_list->user_token == (unsigned long)request->handle &&
467 r_list->map->flags & _DRM_REMOVABLE) {
473 /* List has wrapped around to the head pointer, or its empty we didn't
476 if (list_empty(&dev->maplist) || !map) {
477 mutex_unlock(&dev->struct_mutex);
481 /* Register and framebuffer maps are permanent */
482 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
483 mutex_unlock(&dev->struct_mutex);
487 ret = drm_rmmap_locked(dev, map);
489 mutex_unlock(&dev->struct_mutex);
495 * Cleanup after an error on one of the addbufs() functions.
497 * \param dev DRM device.
498 * \param entry buffer entry where the error occurred.
500 * Frees any pages and buffers associated with the given entry.
502 static void drm_cleanup_buf_error(struct drm_device *dev,
503 struct drm_buf_entry *entry)
507 if (entry->seg_count) {
508 for (i = 0; i < entry->seg_count; i++) {
509 if (entry->seglist[i]) {
510 drm_pci_free(dev, entry->seglist[i]);
513 drm_free(entry->seglist,
515 sizeof(*entry->seglist), DRM_MEM_SEGS);
517 entry->seg_count = 0;
520 if (entry->buf_count) {
521 for (i = 0; i < entry->buf_count; i++) {
522 if (entry->buflist[i].dev_private) {
523 drm_free(entry->buflist[i].dev_private,
524 entry->buflist[i].dev_priv_size,
528 drm_free(entry->buflist,
530 sizeof(*entry->buflist), DRM_MEM_BUFS);
532 entry->buf_count = 0;
538 * Add AGP buffers for DMA transfers.
540 * \param dev struct drm_device to which the buffers are to be added.
541 * \param request pointer to a struct drm_buf_desc describing the request.
542 * \return zero on success or a negative number on failure.
544 * After some sanity checks creates a drm_buf structure for each buffer and
545 * reallocates the buffer list of the same size order to accommodate the new
548 int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
550 struct drm_device_dma *dma = dev->dma;
551 struct drm_buf_entry *entry;
552 struct drm_agp_mem *agp_entry;
554 unsigned long offset;
555 unsigned long agp_offset;
564 struct drm_buf **temp_buflist;
569 count = request->count;
570 order = drm_order(request->size);
573 alignment = (request->flags & _DRM_PAGE_ALIGN)
574 ? PAGE_ALIGN(size) : size;
575 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
576 total = PAGE_SIZE << page_order;
579 agp_offset = dev->agp->base + request->agp_start;
581 DRM_DEBUG("count: %d\n", count);
582 DRM_DEBUG("order: %d\n", order);
583 DRM_DEBUG("size: %d\n", size);
584 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
585 DRM_DEBUG("alignment: %d\n", alignment);
586 DRM_DEBUG("page_order: %d\n", page_order);
587 DRM_DEBUG("total: %d\n", total);
589 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
591 if (dev->queue_count)
592 return -EBUSY; /* Not while in use */
594 /* Make sure buffers are located in AGP memory that we own */
596 list_for_each_entry(agp_entry, &dev->agp->memory, head) {
597 if ((agp_offset >= agp_entry->bound) &&
598 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
603 if (!list_empty(&dev->agp->memory) && !valid) {
604 DRM_DEBUG("zone invalid\n");
607 spin_lock(&dev->count_lock);
609 spin_unlock(&dev->count_lock);
612 atomic_inc(&dev->buf_alloc);
613 spin_unlock(&dev->count_lock);
615 mutex_lock(&dev->struct_mutex);
616 entry = &dma->bufs[order];
617 if (entry->buf_count) {
618 mutex_unlock(&dev->struct_mutex);
619 atomic_dec(&dev->buf_alloc);
620 return -ENOMEM; /* May only call once for each order */
623 if (count < 0 || count > 4096) {
624 mutex_unlock(&dev->struct_mutex);
625 atomic_dec(&dev->buf_alloc);
629 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
631 if (!entry->buflist) {
632 mutex_unlock(&dev->struct_mutex);
633 atomic_dec(&dev->buf_alloc);
636 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
638 entry->buf_size = size;
639 entry->page_order = page_order;
643 while (entry->buf_count < count) {
644 buf = &entry->buflist[entry->buf_count];
645 buf->idx = dma->buf_count + entry->buf_count;
646 buf->total = alignment;
650 buf->offset = (dma->byte_count + offset);
651 buf->bus_address = agp_offset + offset;
652 buf->address = (void *)(agp_offset + offset);
656 init_waitqueue_head(&buf->dma_wait);
657 buf->file_priv = NULL;
659 buf->dev_priv_size = dev->driver->dev_priv_size;
660 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
661 if (!buf->dev_private) {
662 /* Set count correctly so we free the proper amount. */
663 entry->buf_count = count;
664 drm_cleanup_buf_error(dev, entry);
665 mutex_unlock(&dev->struct_mutex);
666 atomic_dec(&dev->buf_alloc);
669 memset(buf->dev_private, 0, buf->dev_priv_size);
671 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
675 byte_count += PAGE_SIZE << page_order;
678 DRM_DEBUG("byte_count: %d\n", byte_count);
680 temp_buflist = drm_realloc(dma->buflist,
681 dma->buf_count * sizeof(*dma->buflist),
682 (dma->buf_count + entry->buf_count)
683 * sizeof(*dma->buflist), DRM_MEM_BUFS);
685 /* Free the entry because it isn't valid */
686 drm_cleanup_buf_error(dev, entry);
687 mutex_unlock(&dev->struct_mutex);
688 atomic_dec(&dev->buf_alloc);
691 dma->buflist = temp_buflist;
693 for (i = 0; i < entry->buf_count; i++) {
694 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
697 dma->buf_count += entry->buf_count;
698 dma->seg_count += entry->seg_count;
699 dma->page_count += byte_count >> PAGE_SHIFT;
700 dma->byte_count += byte_count;
702 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
703 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
705 mutex_unlock(&dev->struct_mutex);
707 request->count = entry->buf_count;
708 request->size = size;
710 dma->flags = _DRM_DMA_USE_AGP;
712 atomic_dec(&dev->buf_alloc);
715 EXPORT_SYMBOL(drm_addbufs_agp);
716 #endif /* __OS_HAS_AGP */
718 int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
720 struct drm_device_dma *dma = dev->dma;
726 struct drm_buf_entry *entry;
727 drm_dma_handle_t *dmah;
730 unsigned long offset;
734 unsigned long *temp_pagelist;
735 struct drm_buf **temp_buflist;
737 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
743 if (!capable(CAP_SYS_ADMIN))
746 count = request->count;
747 order = drm_order(request->size);
750 DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
751 request->count, request->size, size, order, dev->queue_count);
753 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
755 if (dev->queue_count)
756 return -EBUSY; /* Not while in use */
758 alignment = (request->flags & _DRM_PAGE_ALIGN)
759 ? PAGE_ALIGN(size) : size;
760 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
761 total = PAGE_SIZE << page_order;
763 spin_lock(&dev->count_lock);
765 spin_unlock(&dev->count_lock);
768 atomic_inc(&dev->buf_alloc);
769 spin_unlock(&dev->count_lock);
771 mutex_lock(&dev->struct_mutex);
772 entry = &dma->bufs[order];
773 if (entry->buf_count) {
774 mutex_unlock(&dev->struct_mutex);
775 atomic_dec(&dev->buf_alloc);
776 return -ENOMEM; /* May only call once for each order */
779 if (count < 0 || count > 4096) {
780 mutex_unlock(&dev->struct_mutex);
781 atomic_dec(&dev->buf_alloc);
785 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
787 if (!entry->buflist) {
788 mutex_unlock(&dev->struct_mutex);
789 atomic_dec(&dev->buf_alloc);
792 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
794 entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
796 if (!entry->seglist) {
797 drm_free(entry->buflist,
798 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
799 mutex_unlock(&dev->struct_mutex);
800 atomic_dec(&dev->buf_alloc);
803 memset(entry->seglist, 0, count * sizeof(*entry->seglist));
805 /* Keep the original pagelist until we know all the allocations
808 temp_pagelist = drm_alloc((dma->page_count + (count << page_order))
809 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
810 if (!temp_pagelist) {
811 drm_free(entry->buflist,
812 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
813 drm_free(entry->seglist,
814 count * sizeof(*entry->seglist), DRM_MEM_SEGS);
815 mutex_unlock(&dev->struct_mutex);
816 atomic_dec(&dev->buf_alloc);
819 memcpy(temp_pagelist,
820 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
821 DRM_DEBUG("pagelist: %d entries\n",
822 dma->page_count + (count << page_order));
824 entry->buf_size = size;
825 entry->page_order = page_order;
829 while (entry->buf_count < count) {
831 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
834 /* Set count correctly so we free the proper amount. */
835 entry->buf_count = count;
836 entry->seg_count = count;
837 drm_cleanup_buf_error(dev, entry);
838 drm_free(temp_pagelist,
839 (dma->page_count + (count << page_order))
840 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
841 mutex_unlock(&dev->struct_mutex);
842 atomic_dec(&dev->buf_alloc);
845 entry->seglist[entry->seg_count++] = dmah;
846 for (i = 0; i < (1 << page_order); i++) {
847 DRM_DEBUG("page %d @ 0x%08lx\n",
848 dma->page_count + page_count,
849 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
850 temp_pagelist[dma->page_count + page_count++]
851 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
854 offset + size <= total && entry->buf_count < count;
855 offset += alignment, ++entry->buf_count) {
856 buf = &entry->buflist[entry->buf_count];
857 buf->idx = dma->buf_count + entry->buf_count;
858 buf->total = alignment;
861 buf->offset = (dma->byte_count + byte_count + offset);
862 buf->address = (void *)(dmah->vaddr + offset);
863 buf->bus_address = dmah->busaddr + offset;
867 init_waitqueue_head(&buf->dma_wait);
868 buf->file_priv = NULL;
870 buf->dev_priv_size = dev->driver->dev_priv_size;
871 buf->dev_private = drm_alloc(buf->dev_priv_size,
873 if (!buf->dev_private) {
874 /* Set count correctly so we free the proper amount. */
875 entry->buf_count = count;
876 entry->seg_count = count;
877 drm_cleanup_buf_error(dev, entry);
878 drm_free(temp_pagelist,
880 (count << page_order))
881 * sizeof(*dma->pagelist),
883 mutex_unlock(&dev->struct_mutex);
884 atomic_dec(&dev->buf_alloc);
887 memset(buf->dev_private, 0, buf->dev_priv_size);
889 DRM_DEBUG("buffer %d @ %p\n",
890 entry->buf_count, buf->address);
892 byte_count += PAGE_SIZE << page_order;
895 temp_buflist = drm_realloc(dma->buflist,
896 dma->buf_count * sizeof(*dma->buflist),
897 (dma->buf_count + entry->buf_count)
898 * sizeof(*dma->buflist), DRM_MEM_BUFS);
900 /* Free the entry because it isn't valid */
901 drm_cleanup_buf_error(dev, entry);
902 drm_free(temp_pagelist,
903 (dma->page_count + (count << page_order))
904 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
905 mutex_unlock(&dev->struct_mutex);
906 atomic_dec(&dev->buf_alloc);
909 dma->buflist = temp_buflist;
911 for (i = 0; i < entry->buf_count; i++) {
912 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
915 /* No allocations failed, so now we can replace the orginal pagelist
918 if (dma->page_count) {
919 drm_free(dma->pagelist,
920 dma->page_count * sizeof(*dma->pagelist),
923 dma->pagelist = temp_pagelist;
925 dma->buf_count += entry->buf_count;
926 dma->seg_count += entry->seg_count;
927 dma->page_count += entry->seg_count << page_order;
928 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
930 mutex_unlock(&dev->struct_mutex);
932 request->count = entry->buf_count;
933 request->size = size;
935 if (request->flags & _DRM_PCI_BUFFER_RO)
936 dma->flags = _DRM_DMA_USE_PCI_RO;
938 atomic_dec(&dev->buf_alloc);
942 EXPORT_SYMBOL(drm_addbufs_pci);
944 static int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
946 struct drm_device_dma *dma = dev->dma;
947 struct drm_buf_entry *entry;
949 unsigned long offset;
950 unsigned long agp_offset;
959 struct drm_buf **temp_buflist;
961 if (!drm_core_check_feature(dev, DRIVER_SG))
967 if (!capable(CAP_SYS_ADMIN))
970 count = request->count;
971 order = drm_order(request->size);
974 alignment = (request->flags & _DRM_PAGE_ALIGN)
975 ? PAGE_ALIGN(size) : size;
976 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
977 total = PAGE_SIZE << page_order;
980 agp_offset = request->agp_start;
982 DRM_DEBUG("count: %d\n", count);
983 DRM_DEBUG("order: %d\n", order);
984 DRM_DEBUG("size: %d\n", size);
985 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
986 DRM_DEBUG("alignment: %d\n", alignment);
987 DRM_DEBUG("page_order: %d\n", page_order);
988 DRM_DEBUG("total: %d\n", total);
990 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
992 if (dev->queue_count)
993 return -EBUSY; /* Not while in use */
995 spin_lock(&dev->count_lock);
997 spin_unlock(&dev->count_lock);
1000 atomic_inc(&dev->buf_alloc);
1001 spin_unlock(&dev->count_lock);
1003 mutex_lock(&dev->struct_mutex);
1004 entry = &dma->bufs[order];
1005 if (entry->buf_count) {
1006 mutex_unlock(&dev->struct_mutex);
1007 atomic_dec(&dev->buf_alloc);
1008 return -ENOMEM; /* May only call once for each order */
1011 if (count < 0 || count > 4096) {
1012 mutex_unlock(&dev->struct_mutex);
1013 atomic_dec(&dev->buf_alloc);
1017 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1019 if (!entry->buflist) {
1020 mutex_unlock(&dev->struct_mutex);
1021 atomic_dec(&dev->buf_alloc);
1024 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1026 entry->buf_size = size;
1027 entry->page_order = page_order;
1031 while (entry->buf_count < count) {
1032 buf = &entry->buflist[entry->buf_count];
1033 buf->idx = dma->buf_count + entry->buf_count;
1034 buf->total = alignment;
1038 buf->offset = (dma->byte_count + offset);
1039 buf->bus_address = agp_offset + offset;
1040 buf->address = (void *)(agp_offset + offset
1041 + (unsigned long)dev->sg->virtual);
1045 init_waitqueue_head(&buf->dma_wait);
1046 buf->file_priv = NULL;
1048 buf->dev_priv_size = dev->driver->dev_priv_size;
1049 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1050 if (!buf->dev_private) {
1051 /* Set count correctly so we free the proper amount. */
1052 entry->buf_count = count;
1053 drm_cleanup_buf_error(dev, entry);
1054 mutex_unlock(&dev->struct_mutex);
1055 atomic_dec(&dev->buf_alloc);
1059 memset(buf->dev_private, 0, buf->dev_priv_size);
1061 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1063 offset += alignment;
1065 byte_count += PAGE_SIZE << page_order;
1068 DRM_DEBUG("byte_count: %d\n", byte_count);
1070 temp_buflist = drm_realloc(dma->buflist,
1071 dma->buf_count * sizeof(*dma->buflist),
1072 (dma->buf_count + entry->buf_count)
1073 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1074 if (!temp_buflist) {
1075 /* Free the entry because it isn't valid */
1076 drm_cleanup_buf_error(dev, entry);
1077 mutex_unlock(&dev->struct_mutex);
1078 atomic_dec(&dev->buf_alloc);
1081 dma->buflist = temp_buflist;
1083 for (i = 0; i < entry->buf_count; i++) {
1084 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1087 dma->buf_count += entry->buf_count;
1088 dma->seg_count += entry->seg_count;
1089 dma->page_count += byte_count >> PAGE_SHIFT;
1090 dma->byte_count += byte_count;
1092 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1093 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1095 mutex_unlock(&dev->struct_mutex);
1097 request->count = entry->buf_count;
1098 request->size = size;
1100 dma->flags = _DRM_DMA_USE_SG;
1102 atomic_dec(&dev->buf_alloc);
1106 int drm_addbufs_fb(struct drm_device *dev, struct drm_buf_desc *request)
1108 struct drm_device_dma *dma = dev->dma;
1109 struct drm_buf_entry *entry;
1110 struct drm_buf *buf;
1111 unsigned long offset;
1112 unsigned long agp_offset;
1121 struct drm_buf **temp_buflist;
1123 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1129 if (!capable(CAP_SYS_ADMIN))
1132 count = request->count;
1133 order = drm_order(request->size);
1136 alignment = (request->flags & _DRM_PAGE_ALIGN)
1137 ? PAGE_ALIGN(size) : size;
1138 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1139 total = PAGE_SIZE << page_order;
1142 agp_offset = request->agp_start;
1144 DRM_DEBUG("count: %d\n", count);
1145 DRM_DEBUG("order: %d\n", order);
1146 DRM_DEBUG("size: %d\n", size);
1147 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1148 DRM_DEBUG("alignment: %d\n", alignment);
1149 DRM_DEBUG("page_order: %d\n", page_order);
1150 DRM_DEBUG("total: %d\n", total);
1152 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1154 if (dev->queue_count)
1155 return -EBUSY; /* Not while in use */
1157 spin_lock(&dev->count_lock);
1159 spin_unlock(&dev->count_lock);
1162 atomic_inc(&dev->buf_alloc);
1163 spin_unlock(&dev->count_lock);
1165 mutex_lock(&dev->struct_mutex);
1166 entry = &dma->bufs[order];
1167 if (entry->buf_count) {
1168 mutex_unlock(&dev->struct_mutex);
1169 atomic_dec(&dev->buf_alloc);
1170 return -ENOMEM; /* May only call once for each order */
1173 if (count < 0 || count > 4096) {
1174 mutex_unlock(&dev->struct_mutex);
1175 atomic_dec(&dev->buf_alloc);
1179 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1181 if (!entry->buflist) {
1182 mutex_unlock(&dev->struct_mutex);
1183 atomic_dec(&dev->buf_alloc);
1186 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1188 entry->buf_size = size;
1189 entry->page_order = page_order;
1193 while (entry->buf_count < count) {
1194 buf = &entry->buflist[entry->buf_count];
1195 buf->idx = dma->buf_count + entry->buf_count;
1196 buf->total = alignment;
1200 buf->offset = (dma->byte_count + offset);
1201 buf->bus_address = agp_offset + offset;
1202 buf->address = (void *)(agp_offset + offset);
1206 init_waitqueue_head(&buf->dma_wait);
1207 buf->file_priv = NULL;
1209 buf->dev_priv_size = dev->driver->dev_priv_size;
1210 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1211 if (!buf->dev_private) {
1212 /* Set count correctly so we free the proper amount. */
1213 entry->buf_count = count;
1214 drm_cleanup_buf_error(dev, entry);
1215 mutex_unlock(&dev->struct_mutex);
1216 atomic_dec(&dev->buf_alloc);
1219 memset(buf->dev_private, 0, buf->dev_priv_size);
1221 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1223 offset += alignment;
1225 byte_count += PAGE_SIZE << page_order;
1228 DRM_DEBUG("byte_count: %d\n", byte_count);
1230 temp_buflist = drm_realloc(dma->buflist,
1231 dma->buf_count * sizeof(*dma->buflist),
1232 (dma->buf_count + entry->buf_count)
1233 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1234 if (!temp_buflist) {
1235 /* Free the entry because it isn't valid */
1236 drm_cleanup_buf_error(dev, entry);
1237 mutex_unlock(&dev->struct_mutex);
1238 atomic_dec(&dev->buf_alloc);
1241 dma->buflist = temp_buflist;
1243 for (i = 0; i < entry->buf_count; i++) {
1244 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1247 dma->buf_count += entry->buf_count;
1248 dma->seg_count += entry->seg_count;
1249 dma->page_count += byte_count >> PAGE_SHIFT;
1250 dma->byte_count += byte_count;
1252 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1253 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1255 mutex_unlock(&dev->struct_mutex);
1257 request->count = entry->buf_count;
1258 request->size = size;
1260 dma->flags = _DRM_DMA_USE_FB;
1262 atomic_dec(&dev->buf_alloc);
1265 EXPORT_SYMBOL(drm_addbufs_fb);
1269 * Add buffers for DMA transfers (ioctl).
1271 * \param inode device inode.
1272 * \param file_priv DRM file private.
1273 * \param cmd command.
1274 * \param arg pointer to a struct drm_buf_desc request.
1275 * \return zero on success or a negative number on failure.
1277 * According with the memory type specified in drm_buf_desc::flags and the
1278 * build options, it dispatches the call either to addbufs_agp(),
1279 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1280 * PCI memory respectively.
1282 int drm_addbufs(struct drm_device *dev, void *data,
1283 struct drm_file *file_priv)
1285 struct drm_buf_desc *request = data;
1288 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1292 if (request->flags & _DRM_AGP_BUFFER)
1293 ret = drm_addbufs_agp(dev, request);
1296 if (request->flags & _DRM_SG_BUFFER)
1297 ret = drm_addbufs_sg(dev, request);
1298 else if (request->flags & _DRM_FB_BUFFER)
1299 ret = drm_addbufs_fb(dev, request);
1301 ret = drm_addbufs_pci(dev, request);
1307 * Get information about the buffer mappings.
1309 * This was originally mean for debugging purposes, or by a sophisticated
1310 * client library to determine how best to use the available buffers (e.g.,
1311 * large buffers can be used for image transfer).
1313 * \param inode device inode.
1314 * \param file_priv DRM file private.
1315 * \param cmd command.
1316 * \param arg pointer to a drm_buf_info structure.
1317 * \return zero on success or a negative number on failure.
1319 * Increments drm_device::buf_use while holding the drm_device::count_lock
1320 * lock, preventing of allocating more buffers after this call. Information
1321 * about each requested buffer is then copied into user space.
1323 int drm_infobufs(struct drm_device *dev, void *data,
1324 struct drm_file *file_priv)
1326 struct drm_device_dma *dma = dev->dma;
1327 struct drm_buf_info *request = data;
1331 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1337 spin_lock(&dev->count_lock);
1338 if (atomic_read(&dev->buf_alloc)) {
1339 spin_unlock(&dev->count_lock);
1342 ++dev->buf_use; /* Can't allocate more after this call */
1343 spin_unlock(&dev->count_lock);
1345 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1346 if (dma->bufs[i].buf_count)
1350 DRM_DEBUG("count = %d\n", count);
1352 if (request->count >= count) {
1353 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1354 if (dma->bufs[i].buf_count) {
1355 struct drm_buf_desc __user *to =
1356 &request->list[count];
1357 struct drm_buf_entry *from = &dma->bufs[i];
1358 struct drm_freelist *list = &dma->bufs[i].freelist;
1359 if (copy_to_user(&to->count,
1361 sizeof(from->buf_count)) ||
1362 copy_to_user(&to->size,
1364 sizeof(from->buf_size)) ||
1365 copy_to_user(&to->low_mark,
1367 sizeof(list->low_mark)) ||
1368 copy_to_user(&to->high_mark,
1370 sizeof(list->high_mark)))
1373 DRM_DEBUG("%d %d %d %d %d\n",
1375 dma->bufs[i].buf_count,
1376 dma->bufs[i].buf_size,
1377 dma->bufs[i].freelist.low_mark,
1378 dma->bufs[i].freelist.high_mark);
1383 request->count = count;
1389 * Specifies a low and high water mark for buffer allocation
1391 * \param inode device inode.
1392 * \param file_priv DRM file private.
1393 * \param cmd command.
1394 * \param arg a pointer to a drm_buf_desc structure.
1395 * \return zero on success or a negative number on failure.
1397 * Verifies that the size order is bounded between the admissible orders and
1398 * updates the respective drm_device_dma::bufs entry low and high water mark.
1400 * \note This ioctl is deprecated and mostly never used.
1402 int drm_markbufs(struct drm_device *dev, void *data,
1403 struct drm_file *file_priv)
1405 struct drm_device_dma *dma = dev->dma;
1406 struct drm_buf_desc *request = data;
1408 struct drm_buf_entry *entry;
1410 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1416 DRM_DEBUG("%d, %d, %d\n",
1417 request->size, request->low_mark, request->high_mark);
1418 order = drm_order(request->size);
1419 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1421 entry = &dma->bufs[order];
1423 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1425 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1428 entry->freelist.low_mark = request->low_mark;
1429 entry->freelist.high_mark = request->high_mark;
1435 * Unreserve the buffers in list, previously reserved using drmDMA.
1437 * \param inode device inode.
1438 * \param file_priv DRM file private.
1439 * \param cmd command.
1440 * \param arg pointer to a drm_buf_free structure.
1441 * \return zero on success or a negative number on failure.
1443 * Calls free_buffer() for each used buffer.
1444 * This function is primarily used for debugging.
1446 int drm_freebufs(struct drm_device *dev, void *data,
1447 struct drm_file *file_priv)
1449 struct drm_device_dma *dma = dev->dma;
1450 struct drm_buf_free *request = data;
1453 struct drm_buf *buf;
1455 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1461 DRM_DEBUG("%d\n", request->count);
1462 for (i = 0; i < request->count; i++) {
1463 if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1465 if (idx < 0 || idx >= dma->buf_count) {
1466 DRM_ERROR("Index %d (of %d max)\n",
1467 idx, dma->buf_count - 1);
1470 buf = dma->buflist[idx];
1471 if (buf->file_priv != file_priv) {
1472 DRM_ERROR("Process %d freeing buffer not owned\n",
1476 drm_free_buffer(dev, buf);
1483 * Maps all of the DMA buffers into client-virtual space (ioctl).
1485 * \param inode device inode.
1486 * \param file_priv DRM file private.
1487 * \param cmd command.
1488 * \param arg pointer to a drm_buf_map structure.
1489 * \return zero on success or a negative number on failure.
1491 * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
1492 * about each buffer into user space. For PCI buffers, it calls do_mmap() with
1493 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1496 int drm_mapbufs(struct drm_device *dev, void *data,
1497 struct drm_file *file_priv)
1499 struct drm_device_dma *dma = dev->dma;
1502 unsigned long virtual;
1503 unsigned long address;
1504 struct drm_buf_map *request = data;
1507 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1513 spin_lock(&dev->count_lock);
1514 if (atomic_read(&dev->buf_alloc)) {
1515 spin_unlock(&dev->count_lock);
1518 dev->buf_use++; /* Can't allocate more after this call */
1519 spin_unlock(&dev->count_lock);
1521 if (request->count >= dma->buf_count) {
1522 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1523 || (drm_core_check_feature(dev, DRIVER_SG)
1524 && (dma->flags & _DRM_DMA_USE_SG))
1525 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1526 && (dma->flags & _DRM_DMA_USE_FB))) {
1527 struct drm_map *map = dev->agp_buffer_map;
1528 unsigned long token = dev->agp_buffer_token;
1534 down_write(¤t->mm->mmap_sem);
1535 virtual = do_mmap(file_priv->filp, 0, map->size,
1536 PROT_READ | PROT_WRITE,
1539 up_write(¤t->mm->mmap_sem);
1541 down_write(¤t->mm->mmap_sem);
1542 virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
1543 PROT_READ | PROT_WRITE,
1545 up_write(¤t->mm->mmap_sem);
1547 if (virtual > -1024UL) {
1549 retcode = (signed long)virtual;
1552 request->virtual = (void __user *)virtual;
1554 for (i = 0; i < dma->buf_count; i++) {
1555 if (copy_to_user(&request->list[i].idx,
1556 &dma->buflist[i]->idx,
1557 sizeof(request->list[0].idx))) {
1561 if (copy_to_user(&request->list[i].total,
1562 &dma->buflist[i]->total,
1563 sizeof(request->list[0].total))) {
1567 if (copy_to_user(&request->list[i].used,
1568 &zero, sizeof(zero))) {
1572 address = virtual + dma->buflist[i]->offset; /* *** */
1573 if (copy_to_user(&request->list[i].address,
1574 &address, sizeof(address))) {
1581 request->count = dma->buf_count;
1582 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1588 * Compute size order. Returns the exponent of the smaller power of two which
1589 * is greater or equal to given number.
1594 * \todo Can be made faster.
1596 int drm_order(unsigned long size)
1601 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1603 if (size & (size - 1))
1608 EXPORT_SYMBOL(drm_order);