2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
30 #include "drm_compat.h"
33 #include <linux/swap.h>
36 i915_gem_object_set_domain_range(struct drm_gem_object *obj,
39 uint32_t read_domains,
40 uint32_t write_domain);
42 i915_gem_set_domain(struct drm_gem_object *obj,
43 struct drm_file *file_priv,
44 uint32_t read_domains,
45 uint32_t write_domain);
46 static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
47 static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
48 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
50 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
53 struct drm_i915_private *dev_priv = dev->dev_private;
56 (start & (PAGE_SIZE - 1)) != 0 ||
57 (end & (PAGE_SIZE - 1)) != 0) {
61 drm_mm_init(&dev_priv->mm.gtt_space, start,
64 dev->gtt_total = (uint32_t) (end - start);
70 i915_gem_init_ioctl(struct drm_device *dev, void *data,
71 struct drm_file *file_priv)
73 struct drm_i915_gem_init *args = data;
76 mutex_lock(&dev->struct_mutex);
77 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
78 mutex_unlock(&dev->struct_mutex);
85 * Creates a new mm object and returns a handle to it.
88 i915_gem_create_ioctl(struct drm_device *dev, void *data,
89 struct drm_file *file_priv)
91 struct drm_i915_gem_create *args = data;
92 struct drm_gem_object *obj;
95 args->size = roundup(args->size, PAGE_SIZE);
97 /* Allocate the new object */
98 obj = drm_gem_object_alloc(dev, args->size);
102 ret = drm_gem_handle_create(file_priv, obj, &handle);
103 mutex_lock(&dev->struct_mutex);
104 drm_gem_object_handle_unreference(obj);
105 mutex_unlock(&dev->struct_mutex);
110 args->handle = handle;
116 * Reads data from the object referenced by handle.
118 * On error, the contents of *data are undefined.
121 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
122 struct drm_file *file_priv)
124 struct drm_i915_gem_pread *args = data;
125 struct drm_gem_object *obj;
126 struct drm_i915_gem_object *obj_priv;
131 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
134 obj_priv = obj->driver_private;
136 /* Bounds check source.
138 * XXX: This could use review for overflow issues...
140 if (args->offset > obj->size || args->size > obj->size ||
141 args->offset + args->size > obj->size) {
142 drm_gem_object_unreference(obj);
146 mutex_lock(&dev->struct_mutex);
148 ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
149 I915_GEM_DOMAIN_CPU, 0);
151 drm_gem_object_unreference(obj);
152 mutex_unlock(&dev->struct_mutex);
155 offset = args->offset;
157 read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
158 args->size, &offset);
159 if (read != args->size) {
160 drm_gem_object_unreference(obj);
161 mutex_unlock(&dev->struct_mutex);
168 drm_gem_object_unreference(obj);
169 mutex_unlock(&dev->struct_mutex);
174 #include "drm_compat.h"
177 i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
178 struct drm_i915_gem_pwrite *args,
179 struct drm_file *file_priv)
181 struct drm_i915_gem_object *obj_priv = obj->driver_private;
184 char __user *user_data;
189 unsigned long unwritten;
191 user_data = (char __user *) (uintptr_t) args->data_ptr;
193 if (!access_ok(VERIFY_READ, user_data, remain))
197 mutex_lock(&dev->struct_mutex);
198 ret = i915_gem_object_pin(obj, 0);
200 mutex_unlock(&dev->struct_mutex);
203 ret = i915_gem_set_domain(obj, file_priv,
204 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
208 obj_priv = obj->driver_private;
209 offset = obj_priv->gtt_offset + args->offset;
213 /* Operation in this page
216 * o = offset within page
219 i = offset >> PAGE_SHIFT;
220 o = offset & (PAGE_SIZE-1);
222 if ((o + l) > PAGE_SIZE)
225 pfn = (dev->agp->base >> PAGE_SHIFT) + i;
227 #ifdef DRM_KMAP_ATOMIC_PROT_PFN
228 /* kmap_atomic can't map IO pages on non-HIGHMEM kernels
230 vaddr = kmap_atomic_prot_pfn(pfn, KM_USER0,
231 __pgprot(__PAGE_KERNEL));
233 DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
234 i, o, l, pfn, vaddr);
236 unwritten = __copy_from_user_inatomic_nocache(vaddr + o,
238 kunmap_atomic(vaddr, KM_USER0);
243 vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
245 DRM_INFO("pwrite slow i %d o %d l %d "
246 "pfn %ld vaddr %p\n",
247 i, o, l, pfn, vaddr);
253 unwritten = __copy_from_user(vaddr + o, user_data, l);
255 DRM_INFO("unwritten %ld\n", unwritten);
268 #if WATCH_PWRITE && 1
269 i915_gem_clflush_object(obj);
270 i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0);
271 i915_gem_clflush_object(obj);
275 i915_gem_object_unpin(obj);
276 mutex_unlock(&dev->struct_mutex);
282 i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
283 struct drm_i915_gem_pwrite *args,
284 struct drm_file *file_priv)
290 mutex_lock(&dev->struct_mutex);
292 ret = i915_gem_set_domain(obj, file_priv,
293 I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
295 mutex_unlock(&dev->struct_mutex);
299 offset = args->offset;
301 written = vfs_write(obj->filp,
302 (char __user *)(uintptr_t) args->data_ptr,
303 args->size, &offset);
304 if (written != args->size) {
305 mutex_unlock(&dev->struct_mutex);
312 mutex_unlock(&dev->struct_mutex);
318 * Writes data to the object referenced by handle.
320 * On error, the contents of the buffer that were to be modified are undefined.
323 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
324 struct drm_file *file_priv)
326 struct drm_i915_gem_pwrite *args = data;
327 struct drm_gem_object *obj;
328 struct drm_i915_gem_object *obj_priv;
331 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
334 obj_priv = obj->driver_private;
336 /* Bounds check destination.
338 * XXX: This could use review for overflow issues...
340 if (args->offset > obj->size || args->size > obj->size ||
341 args->offset + args->size > obj->size) {
342 drm_gem_object_unreference(obj);
346 /* We can only do the GTT pwrite on untiled buffers, as otherwise
347 * it would end up going through the fenced access, and we'll get
348 * different detiling behavior between reading and writing.
349 * pread/pwrite currently are reading and writing from the CPU
350 * perspective, requiring manual detiling by the client.
352 if (obj_priv->tiling_mode == I915_TILING_NONE &&
354 ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
356 ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
360 DRM_INFO("pwrite failed %d\n", ret);
363 drm_gem_object_unreference(obj);
369 * Called when user space prepares to use an object
372 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
373 struct drm_file *file_priv)
375 struct drm_i915_gem_set_domain *args = data;
376 struct drm_gem_object *obj;
379 if (!(dev->driver->driver_features & DRIVER_GEM))
382 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
386 mutex_lock(&dev->struct_mutex);
388 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
389 obj, obj->size, args->read_domains, args->write_domain);
391 ret = i915_gem_set_domain(obj, file_priv,
392 args->read_domains, args->write_domain);
393 drm_gem_object_unreference(obj);
394 mutex_unlock(&dev->struct_mutex);
399 * Called when user space has done writes to this buffer
402 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
403 struct drm_file *file_priv)
405 struct drm_i915_gem_sw_finish *args = data;
406 struct drm_gem_object *obj;
407 struct drm_i915_gem_object *obj_priv;
410 if (!(dev->driver->driver_features & DRIVER_GEM))
413 mutex_lock(&dev->struct_mutex);
414 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
416 mutex_unlock(&dev->struct_mutex);
421 DRM_INFO("%s: sw_finish %d (%p %d)\n",
422 __func__, args->handle, obj, obj->size);
424 obj_priv = obj->driver_private;
426 /* Pinned buffers may be scanout, so flush the cache */
427 if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
428 i915_gem_clflush_object(obj);
429 drm_agp_chipset_flush(dev);
431 drm_gem_object_unreference(obj);
432 mutex_unlock(&dev->struct_mutex);
437 * Maps the contents of an object, returning the address it is mapped
440 * While the mapping holds a reference on the contents of the object, it doesn't
441 * imply a ref on the object itself.
444 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
445 struct drm_file *file_priv)
447 struct drm_i915_gem_mmap *args = data;
448 struct drm_gem_object *obj;
452 if (!(dev->driver->driver_features & DRIVER_GEM))
455 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
459 offset = args->offset;
461 down_write(¤t->mm->mmap_sem);
462 addr = do_mmap(obj->filp, 0, args->size,
463 PROT_READ | PROT_WRITE, MAP_SHARED,
465 up_write(¤t->mm->mmap_sem);
466 mutex_lock(&dev->struct_mutex);
467 drm_gem_object_unreference(obj);
468 mutex_unlock(&dev->struct_mutex);
469 if (IS_ERR((void *)addr))
472 args->addr_ptr = (uint64_t) addr;
478 i915_gem_object_free_page_list(struct drm_gem_object *obj)
480 struct drm_i915_gem_object *obj_priv = obj->driver_private;
481 int page_count = obj->size / PAGE_SIZE;
484 if (obj_priv->page_list == NULL)
488 for (i = 0; i < page_count; i++)
489 if (obj_priv->page_list[i] != NULL) {
491 set_page_dirty(obj_priv->page_list[i]);
492 mark_page_accessed(obj_priv->page_list[i]);
493 page_cache_release(obj_priv->page_list[i]);
497 drm_free(obj_priv->page_list,
498 page_count * sizeof(struct page *),
500 obj_priv->page_list = NULL;
504 i915_gem_object_move_to_active(struct drm_gem_object *obj)
506 struct drm_device *dev = obj->dev;
507 struct drm_i915_private *dev_priv = dev->dev_private;
508 struct drm_i915_gem_object *obj_priv = obj->driver_private;
510 /* Add a reference if we're newly entering the active list. */
511 if (!obj_priv->active) {
512 drm_gem_object_reference(obj);
513 obj_priv->active = 1;
515 /* Move from whatever list we were on to the tail of execution. */
516 list_move_tail(&obj_priv->list,
517 &dev_priv->mm.active_list);
522 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
524 struct drm_device *dev = obj->dev;
525 struct drm_i915_private *dev_priv = dev->dev_private;
526 struct drm_i915_gem_object *obj_priv = obj->driver_private;
528 i915_verify_inactive(dev, __FILE__, __LINE__);
529 if (obj_priv->pin_count != 0)
530 list_del_init(&obj_priv->list);
532 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
534 if (obj_priv->active) {
535 obj_priv->active = 0;
536 drm_gem_object_unreference(obj);
538 i915_verify_inactive(dev, __FILE__, __LINE__);
542 * Creates a new sequence number, emitting a write of it to the status page
543 * plus an interrupt, which will trigger i915_user_interrupt_handler.
545 * Must be called with struct_lock held.
547 * Returned sequence numbers are nonzero on success.
550 i915_add_request(struct drm_device *dev, uint32_t flush_domains)
552 struct drm_i915_private *dev_priv = dev->dev_private;
553 struct drm_i915_gem_request *request;
558 request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
562 /* Grab the seqno we're going to make this request be, and bump the
563 * next (skipping 0 so it can be the reserved no-seqno value).
565 seqno = dev_priv->mm.next_gem_seqno;
566 dev_priv->mm.next_gem_seqno++;
567 if (dev_priv->mm.next_gem_seqno == 0)
568 dev_priv->mm.next_gem_seqno++;
571 OUT_RING(MI_STORE_DWORD_INDEX);
572 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
575 OUT_RING(MI_USER_INTERRUPT);
578 DRM_DEBUG("%d\n", seqno);
580 request->seqno = seqno;
581 request->emitted_jiffies = jiffies;
582 request->flush_domains = flush_domains;
583 was_empty = list_empty(&dev_priv->mm.request_list);
584 list_add_tail(&request->list, &dev_priv->mm.request_list);
587 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
592 * Command execution barrier
594 * Ensures that all commands in the ring are finished
595 * before signalling the CPU
598 i915_retire_commands(struct drm_device *dev)
600 struct drm_i915_private *dev_priv = dev->dev_private;
601 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
602 uint32_t flush_domains = 0;
605 /* The sampler always gets flushed on i965 (sigh) */
607 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
610 OUT_RING(0); /* noop */
612 return flush_domains;
616 * Moves buffers associated only with the given active seqno from the active
617 * to inactive list, potentially freeing them.
620 i915_gem_retire_request(struct drm_device *dev,
621 struct drm_i915_gem_request *request)
623 struct drm_i915_private *dev_priv = dev->dev_private;
625 if (request->flush_domains != 0) {
626 struct drm_i915_gem_object *obj_priv, *next;
628 /* First clear any buffers that were only waiting for a flush
629 * matching the one just retired.
632 list_for_each_entry_safe(obj_priv, next,
633 &dev_priv->mm.flushing_list, list) {
634 struct drm_gem_object *obj = obj_priv->obj;
636 if (obj->write_domain & request->flush_domains) {
637 obj->write_domain = 0;
638 i915_gem_object_move_to_inactive(obj);
644 /* Move any buffers on the active list that are no longer referenced
645 * by the ringbuffer to the flushing/inactive lists as appropriate.
647 while (!list_empty(&dev_priv->mm.active_list)) {
648 struct drm_gem_object *obj;
649 struct drm_i915_gem_object *obj_priv;
651 obj_priv = list_first_entry(&dev_priv->mm.active_list,
652 struct drm_i915_gem_object,
656 /* If the seqno being retired doesn't match the oldest in the
657 * list, then the oldest in the list must still be newer than
660 if (obj_priv->last_rendering_seqno != request->seqno)
663 DRM_INFO("%s: retire %d moves to inactive list %p\n",
664 __func__, request->seqno, obj);
667 /* If this request flushes the write domain,
668 * clear the write domain from the object now
670 if (request->flush_domains & obj->write_domain)
671 obj->write_domain = 0;
673 if (obj->write_domain != 0) {
674 list_move_tail(&obj_priv->list,
675 &dev_priv->mm.flushing_list);
677 i915_gem_object_move_to_inactive(obj);
683 * Returns true if seq1 is later than seq2.
686 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
688 return (int32_t)(seq1 - seq2) >= 0;
692 i915_get_gem_seqno(struct drm_device *dev)
694 struct drm_i915_private *dev_priv = dev->dev_private;
696 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
700 * This function clears the request list as sequence numbers are passed.
703 i915_gem_retire_requests(struct drm_device *dev)
705 struct drm_i915_private *dev_priv = dev->dev_private;
708 seqno = i915_get_gem_seqno(dev);
710 while (!list_empty(&dev_priv->mm.request_list)) {
711 struct drm_i915_gem_request *request;
712 uint32_t retiring_seqno;
714 request = list_first_entry(&dev_priv->mm.request_list,
715 struct drm_i915_gem_request,
717 retiring_seqno = request->seqno;
719 if (i915_seqno_passed(seqno, retiring_seqno) ||
720 dev_priv->mm.wedged) {
721 i915_gem_retire_request(dev, request);
723 list_del(&request->list);
724 drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
731 i915_gem_retire_work_handler(struct work_struct *work)
733 struct drm_i915_private *dev_priv;
734 struct drm_device *dev;
736 dev_priv = container_of(work, struct drm_i915_private,
737 mm.retire_work.work);
740 mutex_lock(&dev->struct_mutex);
741 i915_gem_retire_requests(dev);
742 if (!list_empty(&dev_priv->mm.request_list))
743 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
744 mutex_unlock(&dev->struct_mutex);
748 * Waits for a sequence number to be signaled, and cleans up the
749 * request and object lists appropriately for that event.
752 i915_wait_request(struct drm_device *dev, uint32_t seqno)
754 struct drm_i915_private *dev_priv = dev->dev_private;
759 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
760 dev_priv->mm.waiting_gem_seqno = seqno;
761 i915_user_irq_on(dev);
762 ret = wait_event_interruptible(dev_priv->irq_queue,
763 i915_seqno_passed(i915_get_gem_seqno(dev),
765 dev_priv->mm.wedged);
766 i915_user_irq_off(dev);
767 dev_priv->mm.waiting_gem_seqno = 0;
769 if (dev_priv->mm.wedged)
772 if (ret && ret != -ERESTARTSYS)
773 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
774 __func__, ret, seqno, i915_get_gem_seqno(dev));
776 /* Directly dispatch request retiring. While we have the work queue
777 * to handle this, the waiter on a request often wants an associated
778 * buffer to have made it to the inactive list, and we would need
779 * a separate wait queue to handle that.
782 i915_gem_retire_requests(dev);
788 i915_gem_flush(struct drm_device *dev,
789 uint32_t invalidate_domains,
790 uint32_t flush_domains)
792 struct drm_i915_private *dev_priv = dev->dev_private;
797 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
798 invalidate_domains, flush_domains);
801 if (flush_domains & I915_GEM_DOMAIN_CPU)
802 drm_agp_chipset_flush(dev);
804 if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
805 I915_GEM_DOMAIN_GTT)) {
809 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
810 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
811 * also flushed at 2d versus 3d pipeline switches.
815 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
816 * MI_READ_FLUSH is set, and is always flushed on 965.
818 * I915_GEM_DOMAIN_COMMAND may not exist?
820 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
821 * invalidated when MI_EXE_FLUSH is set.
823 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
824 * invalidated with every MI_FLUSH.
828 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
829 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
830 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
831 * are flushed at any MI_FLUSH.
834 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
835 if ((invalidate_domains|flush_domains) &
836 I915_GEM_DOMAIN_RENDER)
837 cmd &= ~MI_NO_WRITE_FLUSH;
838 if (!IS_I965G(dev)) {
840 * On the 965, the sampler cache always gets flushed
841 * and this bit is reserved.
843 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
844 cmd |= MI_READ_FLUSH;
846 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
850 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
854 OUT_RING(0); /* noop */
860 * Ensures that all rendering to the object has completed and the object is
861 * safe to unbind from the GTT or access from the CPU.
864 i915_gem_object_wait_rendering(struct drm_gem_object *obj)
866 struct drm_device *dev = obj->dev;
867 struct drm_i915_gem_object *obj_priv = obj->driver_private;
869 uint32_t write_domain;
871 /* If there are writes queued to the buffer, flush and
872 * create a new seqno to wait for.
874 write_domain = obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT);
877 DRM_INFO("%s: flushing object %p from write domain %08x\n",
878 __func__, obj, write_domain);
880 i915_gem_flush(dev, 0, write_domain);
882 i915_gem_object_move_to_active(obj);
883 obj_priv->last_rendering_seqno = i915_add_request(dev,
885 BUG_ON(obj_priv->last_rendering_seqno == 0);
887 DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
891 /* If there is rendering queued on the buffer being evicted, wait for
894 if (obj_priv->active) {
896 DRM_INFO("%s: object %p wait for seqno %08x\n",
897 __func__, obj, obj_priv->last_rendering_seqno);
899 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
908 * Unbinds an object from the GTT aperture.
911 i915_gem_object_unbind(struct drm_gem_object *obj)
913 struct drm_device *dev = obj->dev;
914 struct drm_i915_gem_object *obj_priv = obj->driver_private;
918 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
919 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
921 if (obj_priv->gtt_space == NULL)
924 if (obj_priv->pin_count != 0) {
925 DRM_ERROR("Attempting to unbind pinned buffer\n");
929 /* Wait for any rendering to complete
931 ret = i915_gem_object_wait_rendering(obj);
933 DRM_ERROR("wait_rendering failed: %d\n", ret);
937 /* Move the object to the CPU domain to ensure that
938 * any possible CPU writes while it's not in the GTT
939 * are flushed when we go to remap it. This will
940 * also ensure that all pending GPU writes are finished
943 ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
944 I915_GEM_DOMAIN_CPU);
946 DRM_ERROR("set_domain failed: %d\n", ret);
950 if (obj_priv->agp_mem != NULL) {
951 drm_unbind_agp(obj_priv->agp_mem);
952 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
953 obj_priv->agp_mem = NULL;
956 BUG_ON(obj_priv->active);
958 i915_gem_object_free_page_list(obj);
960 if (obj_priv->gtt_space) {
961 atomic_dec(&dev->gtt_count);
962 atomic_sub(obj->size, &dev->gtt_memory);
964 drm_mm_put_block(obj_priv->gtt_space);
965 obj_priv->gtt_space = NULL;
968 /* Remove ourselves from the LRU list if present. */
969 if (!list_empty(&obj_priv->list))
970 list_del_init(&obj_priv->list);
976 i915_gem_evict_something(struct drm_device *dev)
978 struct drm_i915_private *dev_priv = dev->dev_private;
979 struct drm_gem_object *obj;
980 struct drm_i915_gem_object *obj_priv;
984 /* If there's an inactive buffer available now, grab it
987 if (!list_empty(&dev_priv->mm.inactive_list)) {
988 obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
989 struct drm_i915_gem_object,
992 BUG_ON(obj_priv->pin_count != 0);
994 DRM_INFO("%s: evicting %p\n", __func__, obj);
996 BUG_ON(obj_priv->active);
998 /* Wait on the rendering and unbind the buffer. */
999 ret = i915_gem_object_unbind(obj);
1003 /* If we didn't get anything, but the ring is still processing
1004 * things, wait for one of those things to finish and hopefully
1005 * leave us a buffer to evict.
1007 if (!list_empty(&dev_priv->mm.request_list)) {
1008 struct drm_i915_gem_request *request;
1010 request = list_first_entry(&dev_priv->mm.request_list,
1011 struct drm_i915_gem_request,
1014 ret = i915_wait_request(dev, request->seqno);
1018 /* if waiting caused an object to become inactive,
1019 * then loop around and wait for it. Otherwise, we
1020 * assume that waiting freed and unbound something,
1021 * so there should now be some space in the GTT
1023 if (!list_empty(&dev_priv->mm.inactive_list))
1028 /* If we didn't have anything on the request list but there
1029 * are buffers awaiting a flush, emit one and try again.
1030 * When we wait on it, those buffers waiting for that flush
1031 * will get moved to inactive.
1033 if (!list_empty(&dev_priv->mm.flushing_list)) {
1034 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1035 struct drm_i915_gem_object,
1037 obj = obj_priv->obj;
1042 i915_add_request(dev, obj->write_domain);
1048 DRM_ERROR("inactive empty %d request empty %d "
1049 "flushing empty %d\n",
1050 list_empty(&dev_priv->mm.inactive_list),
1051 list_empty(&dev_priv->mm.request_list),
1052 list_empty(&dev_priv->mm.flushing_list));
1053 /* If we didn't do any of the above, there's nothing to be done
1054 * and we just can't fit it in.
1062 i915_gem_object_get_page_list(struct drm_gem_object *obj)
1064 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1066 struct address_space *mapping;
1067 struct inode *inode;
1071 if (obj_priv->page_list)
1074 /* Get the list of pages out of our struct file. They'll be pinned
1075 * at this point until we release them.
1077 page_count = obj->size / PAGE_SIZE;
1078 BUG_ON(obj_priv->page_list != NULL);
1079 obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
1081 if (obj_priv->page_list == NULL) {
1082 DRM_ERROR("Faled to allocate page list\n");
1086 inode = obj->filp->f_path.dentry->d_inode;
1087 mapping = inode->i_mapping;
1088 for (i = 0; i < page_count; i++) {
1089 page = read_mapping_page(mapping, i, NULL);
1091 ret = PTR_ERR(page);
1092 DRM_ERROR("read_mapping_page failed: %d\n", ret);
1093 i915_gem_object_free_page_list(obj);
1096 obj_priv->page_list[i] = page;
1102 * Finds free space in the GTT aperture and binds the object there.
1105 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1107 struct drm_device *dev = obj->dev;
1108 struct drm_i915_private *dev_priv = dev->dev_private;
1109 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1110 struct drm_mm_node *free_space;
1111 int page_count, ret;
1114 alignment = PAGE_SIZE;
1115 if (alignment & (PAGE_SIZE - 1)) {
1116 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
1121 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
1122 obj->size, alignment, 0);
1123 if (free_space != NULL) {
1124 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
1126 if (obj_priv->gtt_space != NULL) {
1127 obj_priv->gtt_space->private = obj;
1128 obj_priv->gtt_offset = obj_priv->gtt_space->start;
1131 if (obj_priv->gtt_space == NULL) {
1132 /* If the gtt is empty and we're still having trouble
1133 * fitting our object in, we're out of memory.
1136 DRM_INFO("%s: GTT full, evicting something\n", __func__);
1138 if (list_empty(&dev_priv->mm.inactive_list) &&
1139 list_empty(&dev_priv->mm.flushing_list) &&
1140 list_empty(&dev_priv->mm.active_list)) {
1141 DRM_ERROR("GTT full, but LRU list empty\n");
1145 ret = i915_gem_evict_something(dev);
1147 DRM_ERROR("Failed to evict a buffer %d\n", ret);
1154 DRM_INFO("Binding object of size %d at 0x%08x\n",
1155 obj->size, obj_priv->gtt_offset);
1157 ret = i915_gem_object_get_page_list(obj);
1159 drm_mm_put_block(obj_priv->gtt_space);
1160 obj_priv->gtt_space = NULL;
1164 page_count = obj->size / PAGE_SIZE;
1165 /* Create an AGP memory structure pointing at our pages, and bind it
1168 obj_priv->agp_mem = drm_agp_bind_pages(dev,
1169 obj_priv->page_list,
1171 obj_priv->gtt_offset);
1172 if (obj_priv->agp_mem == NULL) {
1173 i915_gem_object_free_page_list(obj);
1174 drm_mm_put_block(obj_priv->gtt_space);
1175 obj_priv->gtt_space = NULL;
1178 atomic_inc(&dev->gtt_count);
1179 atomic_add(obj->size, &dev->gtt_memory);
1181 /* Assert that the object is not currently in any GPU domain. As it
1182 * wasn't in the GTT, there shouldn't be any way it could have been in
1185 BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1186 BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1192 i915_gem_clflush_object(struct drm_gem_object *obj)
1194 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1196 /* If we don't have a page list set up, then we're not pinned
1197 * to GPU, and we can ignore the cache flush because it'll happen
1198 * again at bind time.
1200 if (obj_priv->page_list == NULL)
1203 drm_ttm_cache_flush(obj_priv->page_list, obj->size / PAGE_SIZE);
1207 * Set the next domain for the specified object. This
1208 * may not actually perform the necessary flushing/invaliding though,
1209 * as that may want to be batched with other set_domain operations
1211 * This is (we hope) the only really tricky part of gem. The goal
1212 * is fairly simple -- track which caches hold bits of the object
1213 * and make sure they remain coherent. A few concrete examples may
1214 * help to explain how it works. For shorthand, we use the notation
1215 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
1216 * a pair of read and write domain masks.
1218 * Case 1: the batch buffer
1224 * 5. Unmapped from GTT
1227 * Let's take these a step at a time
1230 * Pages allocated from the kernel may still have
1231 * cache contents, so we set them to (CPU, CPU) always.
1232 * 2. Written by CPU (using pwrite)
1233 * The pwrite function calls set_domain (CPU, CPU) and
1234 * this function does nothing (as nothing changes)
1236 * This function asserts that the object is not
1237 * currently in any GPU-based read or write domains
1239 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
1240 * As write_domain is zero, this function adds in the
1241 * current read domains (CPU+COMMAND, 0).
1242 * flush_domains is set to CPU.
1243 * invalidate_domains is set to COMMAND
1244 * clflush is run to get data out of the CPU caches
1245 * then i915_dev_set_domain calls i915_gem_flush to
1246 * emit an MI_FLUSH and drm_agp_chipset_flush
1247 * 5. Unmapped from GTT
1248 * i915_gem_object_unbind calls set_domain (CPU, CPU)
1249 * flush_domains and invalidate_domains end up both zero
1250 * so no flushing/invalidating happens
1254 * Case 2: The shared render buffer
1258 * 3. Read/written by GPU
1259 * 4. set_domain to (CPU,CPU)
1260 * 5. Read/written by CPU
1261 * 6. Read/written by GPU
1264 * Same as last example, (CPU, CPU)
1266 * Nothing changes (assertions find that it is not in the GPU)
1267 * 3. Read/written by GPU
1268 * execbuffer calls set_domain (RENDER, RENDER)
1269 * flush_domains gets CPU
1270 * invalidate_domains gets GPU
1272 * MI_FLUSH and drm_agp_chipset_flush
1273 * 4. set_domain (CPU, CPU)
1274 * flush_domains gets GPU
1275 * invalidate_domains gets CPU
1276 * wait_rendering (obj) to make sure all drawing is complete.
1277 * This will include an MI_FLUSH to get the data from GPU
1279 * clflush (obj) to invalidate the CPU cache
1280 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
1281 * 5. Read/written by CPU
1282 * cache lines are loaded and dirtied
1283 * 6. Read written by GPU
1284 * Same as last GPU access
1286 * Case 3: The constant buffer
1291 * 4. Updated (written) by CPU again
1300 * flush_domains = CPU
1301 * invalidate_domains = RENDER
1304 * drm_agp_chipset_flush
1305 * 4. Updated (written) by CPU again
1307 * flush_domains = 0 (no previous write domain)
1308 * invalidate_domains = 0 (no new read domains)
1311 * flush_domains = CPU
1312 * invalidate_domains = RENDER
1315 * drm_agp_chipset_flush
1318 i915_gem_object_set_domain(struct drm_gem_object *obj,
1319 uint32_t read_domains,
1320 uint32_t write_domain)
1322 struct drm_device *dev = obj->dev;
1323 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1324 uint32_t invalidate_domains = 0;
1325 uint32_t flush_domains = 0;
1329 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
1331 obj->read_domains, read_domains,
1332 obj->write_domain, write_domain);
1335 * If the object isn't moving to a new write domain,
1336 * let the object stay in multiple read domains
1338 if (write_domain == 0)
1339 read_domains |= obj->read_domains;
1341 obj_priv->dirty = 1;
1344 * Flush the current write domain if
1345 * the new read domains don't match. Invalidate
1346 * any read domains which differ from the old
1349 if (obj->write_domain && obj->write_domain != read_domains) {
1350 flush_domains |= obj->write_domain;
1351 invalidate_domains |= read_domains & ~obj->write_domain;
1354 * Invalidate any read caches which may have
1355 * stale data. That is, any new read domains.
1357 invalidate_domains |= read_domains & ~obj->read_domains;
1358 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
1360 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
1361 __func__, flush_domains, invalidate_domains);
1364 * If we're invaliding the CPU cache and flushing a GPU cache,
1365 * then pause for rendering so that the GPU caches will be
1366 * flushed before the cpu cache is invalidated
1368 if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
1369 (flush_domains & ~(I915_GEM_DOMAIN_CPU |
1370 I915_GEM_DOMAIN_GTT))) {
1371 ret = i915_gem_object_wait_rendering(obj);
1375 i915_gem_clflush_object(obj);
1378 if ((write_domain | flush_domains) != 0)
1379 obj->write_domain = write_domain;
1381 /* If we're invalidating the CPU domain, clear the per-page CPU
1382 * domain list as well.
1384 if (obj_priv->page_cpu_valid != NULL &&
1385 (obj->read_domains & I915_GEM_DOMAIN_CPU) &&
1386 ((read_domains & I915_GEM_DOMAIN_CPU) == 0)) {
1387 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
1389 obj->read_domains = read_domains;
1391 dev->invalidate_domains |= invalidate_domains;
1392 dev->flush_domains |= flush_domains;
1394 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
1396 obj->read_domains, obj->write_domain,
1397 dev->invalidate_domains, dev->flush_domains);
1403 * Set the read/write domain on a range of the object.
1405 * Currently only implemented for CPU reads, otherwise drops to normal
1406 * i915_gem_object_set_domain().
1409 i915_gem_object_set_domain_range(struct drm_gem_object *obj,
1412 uint32_t read_domains,
1413 uint32_t write_domain)
1415 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1418 if (obj->read_domains & I915_GEM_DOMAIN_CPU)
1421 if (read_domains != I915_GEM_DOMAIN_CPU ||
1423 return i915_gem_object_set_domain(obj,
1424 read_domains, write_domain);
1426 /* Wait on any GPU rendering to the object to be flushed. */
1427 if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) {
1428 ret = i915_gem_object_wait_rendering(obj);
1433 if (obj_priv->page_cpu_valid == NULL) {
1434 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
1438 /* Flush the cache on any pages that are still invalid from the CPU's
1441 for (i = offset / PAGE_SIZE; i < (offset + size - 1) / PAGE_SIZE; i++) {
1442 if (obj_priv->page_cpu_valid[i])
1445 drm_ttm_cache_flush(obj_priv->page_list + i, 1);
1447 obj_priv->page_cpu_valid[i] = 1;
1454 * Once all of the objects have been set in the proper domain,
1455 * perform the necessary flush and invalidate operations.
1457 * Returns the write domains flushed, for use in flush tracking.
1460 i915_gem_dev_set_domain(struct drm_device *dev)
1462 uint32_t flush_domains = dev->flush_domains;
1465 * Now that all the buffers are synced to the proper domains,
1466 * flush and invalidate the collected domains
1468 if (dev->invalidate_domains | dev->flush_domains) {
1470 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
1472 dev->invalidate_domains,
1473 dev->flush_domains);
1476 dev->invalidate_domains,
1477 dev->flush_domains);
1478 dev->invalidate_domains = 0;
1479 dev->flush_domains = 0;
1482 return flush_domains;
1486 * Pin an object to the GTT and evaluate the relocations landing in it.
1489 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1490 struct drm_file *file_priv,
1491 struct drm_i915_gem_exec_object *entry)
1493 struct drm_device *dev = obj->dev;
1494 struct drm_i915_gem_relocation_entry reloc;
1495 struct drm_i915_gem_relocation_entry __user *relocs;
1496 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1498 uint32_t last_reloc_offset = -1;
1499 void *reloc_page = NULL;
1501 /* Choose the GTT offset for our buffer and put it there. */
1502 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
1506 entry->offset = obj_priv->gtt_offset;
1508 relocs = (struct drm_i915_gem_relocation_entry __user *)
1509 (uintptr_t) entry->relocs_ptr;
1510 /* Apply the relocations, using the GTT aperture to avoid cache
1511 * flushing requirements.
1513 for (i = 0; i < entry->relocation_count; i++) {
1514 struct drm_gem_object *target_obj;
1515 struct drm_i915_gem_object *target_obj_priv;
1516 uint32_t reloc_val, reloc_offset, *reloc_entry;
1519 ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
1521 i915_gem_object_unpin(obj);
1525 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
1526 reloc.target_handle);
1527 if (target_obj == NULL) {
1528 i915_gem_object_unpin(obj);
1531 target_obj_priv = target_obj->driver_private;
1533 /* The target buffer should have appeared before us in the
1534 * exec_object list, so it should have a GTT space bound by now.
1536 if (target_obj_priv->gtt_space == NULL) {
1537 DRM_ERROR("No GTT space found for object %d\n",
1538 reloc.target_handle);
1539 drm_gem_object_unreference(target_obj);
1540 i915_gem_object_unpin(obj);
1544 if (reloc.offset > obj->size - 4) {
1545 DRM_ERROR("Relocation beyond object bounds: "
1546 "obj %p target %d offset %d size %d.\n",
1547 obj, reloc.target_handle,
1548 (int) reloc.offset, (int) obj->size);
1549 drm_gem_object_unreference(target_obj);
1550 i915_gem_object_unpin(obj);
1553 if (reloc.offset & 3) {
1554 DRM_ERROR("Relocation not 4-byte aligned: "
1555 "obj %p target %d offset %d.\n",
1556 obj, reloc.target_handle,
1557 (int) reloc.offset);
1558 drm_gem_object_unreference(target_obj);
1559 i915_gem_object_unpin(obj);
1563 if (reloc.write_domain && target_obj->pending_write_domain &&
1564 reloc.write_domain != target_obj->pending_write_domain) {
1565 DRM_ERROR("Write domain conflict: "
1566 "obj %p target %d offset %d "
1567 "new %08x old %08x\n",
1568 obj, reloc.target_handle,
1571 target_obj->pending_write_domain);
1572 drm_gem_object_unreference(target_obj);
1573 i915_gem_object_unpin(obj);
1578 DRM_INFO("%s: obj %p offset %08x target %d "
1579 "read %08x write %08x gtt %08x "
1580 "presumed %08x delta %08x\n",
1584 (int) reloc.target_handle,
1585 (int) reloc.read_domains,
1586 (int) reloc.write_domain,
1587 (int) target_obj_priv->gtt_offset,
1588 (int) reloc.presumed_offset,
1592 target_obj->pending_read_domains |= reloc.read_domains;
1593 target_obj->pending_write_domain |= reloc.write_domain;
1595 /* If the relocation already has the right value in it, no
1596 * more work needs to be done.
1598 if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
1599 drm_gem_object_unreference(target_obj);
1603 /* Now that we're going to actually write some data in,
1604 * make sure that any rendering using this buffer's contents
1607 i915_gem_object_wait_rendering(obj);
1609 /* As we're writing through the gtt, flush
1610 * any CPU writes before we write the relocations
1612 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
1613 i915_gem_clflush_object(obj);
1614 drm_agp_chipset_flush(dev);
1615 obj->write_domain = 0;
1618 /* Map the page containing the relocation we're going to
1621 reloc_offset = obj_priv->gtt_offset + reloc.offset;
1622 if (reloc_page == NULL ||
1623 (last_reloc_offset & ~(PAGE_SIZE - 1)) !=
1624 (reloc_offset & ~(PAGE_SIZE - 1))) {
1625 if (reloc_page != NULL)
1626 iounmap(reloc_page);
1628 reloc_page = ioremap(dev->agp->base +
1629 (reloc_offset & ~(PAGE_SIZE - 1)),
1631 last_reloc_offset = reloc_offset;
1632 if (reloc_page == NULL) {
1633 drm_gem_object_unreference(target_obj);
1634 i915_gem_object_unpin(obj);
1639 reloc_entry = (uint32_t *)((char *)reloc_page +
1640 (reloc_offset & (PAGE_SIZE - 1)));
1641 reloc_val = target_obj_priv->gtt_offset + reloc.delta;
1644 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
1645 obj, (unsigned int) reloc.offset,
1646 readl(reloc_entry), reloc_val);
1648 writel(reloc_val, reloc_entry);
1650 /* Write the updated presumed offset for this entry back out
1653 reloc.presumed_offset = target_obj_priv->gtt_offset;
1654 ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
1656 drm_gem_object_unreference(target_obj);
1657 i915_gem_object_unpin(obj);
1661 drm_gem_object_unreference(target_obj);
1664 if (reloc_page != NULL)
1665 iounmap(reloc_page);
1669 i915_gem_dump_object(obj, 128, __func__, ~0);
1674 /** Dispatch a batchbuffer to the ring
1677 i915_dispatch_gem_execbuffer(struct drm_device *dev,
1678 struct drm_i915_gem_execbuffer *exec,
1679 uint64_t exec_offset)
1681 struct drm_i915_private *dev_priv = dev->dev_private;
1682 struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
1683 (uintptr_t) exec->cliprects_ptr;
1684 int nbox = exec->num_cliprects;
1686 uint32_t exec_start, exec_len;
1689 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
1690 exec_len = (uint32_t) exec->batch_len;
1692 if ((exec_start | exec_len) & 0x7) {
1693 DRM_ERROR("alignment\n");
1700 count = nbox ? nbox : 1;
1702 for (i = 0; i < count; i++) {
1704 int ret = i915_emit_box(dev, boxes, i,
1705 exec->DR1, exec->DR4);
1710 if (IS_I830(dev) || IS_845G(dev)) {
1712 OUT_RING(MI_BATCH_BUFFER);
1713 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
1714 OUT_RING(exec_start + exec_len - 4);
1719 if (IS_I965G(dev)) {
1720 OUT_RING(MI_BATCH_BUFFER_START |
1722 MI_BATCH_NON_SECURE_I965);
1723 OUT_RING(exec_start);
1725 OUT_RING(MI_BATCH_BUFFER_START |
1727 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
1733 /* XXX breadcrumb */
1737 /* Throttle our rendering by waiting until the ring has completed our requests
1738 * emitted over 20 msec ago.
1740 * This should get us reasonable parallelism between CPU and GPU but also
1741 * relatively low latency when blocking on a particular request to finish.
1744 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
1746 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1750 mutex_lock(&dev->struct_mutex);
1751 seqno = i915_file_priv->mm.last_gem_throttle_seqno;
1752 i915_file_priv->mm.last_gem_throttle_seqno =
1753 i915_file_priv->mm.last_gem_seqno;
1755 ret = i915_wait_request(dev, seqno);
1756 mutex_unlock(&dev->struct_mutex);
1761 i915_gem_execbuffer(struct drm_device *dev, void *data,
1762 struct drm_file *file_priv)
1764 struct drm_i915_private *dev_priv = dev->dev_private;
1765 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1766 struct drm_i915_gem_execbuffer *args = data;
1767 struct drm_i915_gem_exec_object *exec_list = NULL;
1768 struct drm_gem_object **object_list = NULL;
1769 struct drm_gem_object *batch_obj;
1770 int ret, i, pinned = 0;
1771 uint64_t exec_offset;
1772 uint32_t seqno, flush_domains;
1775 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1776 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1779 /* Copy in the exec list from userland */
1780 exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
1782 object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
1784 if (exec_list == NULL || object_list == NULL) {
1785 DRM_ERROR("Failed to allocate exec or object list "
1787 args->buffer_count);
1791 ret = copy_from_user(exec_list,
1792 (struct drm_i915_relocation_entry __user *)
1793 (uintptr_t) args->buffers_ptr,
1794 sizeof(*exec_list) * args->buffer_count);
1796 DRM_ERROR("copy %d exec entries failed %d\n",
1797 args->buffer_count, ret);
1801 mutex_lock(&dev->struct_mutex);
1803 i915_verify_inactive(dev, __FILE__, __LINE__);
1805 if (dev_priv->mm.wedged) {
1806 DRM_ERROR("Execbuf while wedged\n");
1807 mutex_unlock(&dev->struct_mutex);
1811 if (dev_priv->mm.suspended) {
1812 DRM_ERROR("Execbuf while VT-switched.\n");
1813 mutex_unlock(&dev->struct_mutex);
1817 /* Zero the gloabl flush/invalidate flags. These
1818 * will be modified as each object is bound to the
1821 dev->invalidate_domains = 0;
1822 dev->flush_domains = 0;
1824 /* Look up object handles and perform the relocations */
1825 for (i = 0; i < args->buffer_count; i++) {
1826 object_list[i] = drm_gem_object_lookup(dev, file_priv,
1827 exec_list[i].handle);
1828 if (object_list[i] == NULL) {
1829 DRM_ERROR("Invalid object handle %d at index %d\n",
1830 exec_list[i].handle, i);
1835 object_list[i]->pending_read_domains = 0;
1836 object_list[i]->pending_write_domain = 0;
1837 ret = i915_gem_object_pin_and_relocate(object_list[i],
1841 DRM_ERROR("object bind and relocate failed %d\n", ret);
1847 /* Set the pending read domains for the batch buffer to COMMAND */
1848 batch_obj = object_list[args->buffer_count-1];
1849 batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
1850 batch_obj->pending_write_domain = 0;
1852 i915_verify_inactive(dev, __FILE__, __LINE__);
1854 for (i = 0; i < args->buffer_count; i++) {
1855 struct drm_gem_object *obj = object_list[i];
1856 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1858 if (obj_priv->gtt_space == NULL) {
1859 /* We evicted the buffer in the process of validating
1860 * our set of buffers in. We could try to recover by
1861 * kicking them everything out and trying again from
1868 /* make sure all previous memory operations have passed */
1869 ret = i915_gem_object_set_domain(obj,
1870 obj->pending_read_domains,
1871 obj->pending_write_domain);
1876 i915_verify_inactive(dev, __FILE__, __LINE__);
1878 /* Flush/invalidate caches and chipset buffer */
1879 flush_domains = i915_gem_dev_set_domain(dev);
1881 i915_verify_inactive(dev, __FILE__, __LINE__);
1884 for (i = 0; i < args->buffer_count; i++) {
1885 i915_gem_object_check_coherency(object_list[i],
1886 exec_list[i].handle);
1890 exec_offset = exec_list[args->buffer_count - 1].offset;
1893 i915_gem_dump_object(object_list[args->buffer_count - 1],
1899 /* Exec the batchbuffer */
1900 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
1902 DRM_ERROR("dispatch failed %d\n", ret);
1907 * Ensure that the commands in the batch buffer are
1908 * finished before the interrupt fires
1910 flush_domains |= i915_retire_commands(dev);
1912 i915_verify_inactive(dev, __FILE__, __LINE__);
1915 * Get a seqno representing the execution of the current buffer,
1916 * which we can wait on. We would like to mitigate these interrupts,
1917 * likely by only creating seqnos occasionally (so that we have
1918 * *some* interrupts representing completion of buffers that we can
1919 * wait on when trying to clear up gtt space).
1921 seqno = i915_add_request(dev, flush_domains);
1923 i915_file_priv->mm.last_gem_seqno = seqno;
1924 for (i = 0; i < args->buffer_count; i++) {
1925 struct drm_gem_object *obj = object_list[i];
1926 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1928 i915_gem_object_move_to_active(obj);
1929 obj_priv->last_rendering_seqno = seqno;
1931 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
1935 i915_dump_lru(dev, __func__);
1938 i915_verify_inactive(dev, __FILE__, __LINE__);
1940 /* Copy the new buffer offsets back to the user's exec list. */
1941 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1942 (uintptr_t) args->buffers_ptr,
1944 sizeof(*exec_list) * args->buffer_count);
1946 DRM_ERROR("failed to copy %d exec entries "
1947 "back to user (%d)\n",
1948 args->buffer_count, ret);
1950 if (object_list != NULL) {
1951 for (i = 0; i < pinned; i++)
1952 i915_gem_object_unpin(object_list[i]);
1954 for (i = 0; i < args->buffer_count; i++)
1955 drm_gem_object_unreference(object_list[i]);
1957 mutex_unlock(&dev->struct_mutex);
1960 drm_free(object_list, sizeof(*object_list) * args->buffer_count,
1962 drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
1969 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
1971 struct drm_device *dev = obj->dev;
1972 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1975 i915_verify_inactive(dev, __FILE__, __LINE__);
1976 if (obj_priv->gtt_space == NULL) {
1977 ret = i915_gem_object_bind_to_gtt(obj, alignment);
1979 DRM_ERROR("Failure to bind: %d", ret);
1983 obj_priv->pin_count++;
1985 /* If the object is not active and not pending a flush,
1986 * remove it from the inactive list
1988 if (obj_priv->pin_count == 1) {
1989 atomic_inc(&dev->pin_count);
1990 atomic_add(obj->size, &dev->pin_memory);
1991 if (!obj_priv->active &&
1992 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
1993 I915_GEM_DOMAIN_GTT)) == 0 &&
1994 !list_empty(&obj_priv->list))
1995 list_del_init(&obj_priv->list);
1997 i915_verify_inactive(dev, __FILE__, __LINE__);
2003 i915_gem_object_unpin(struct drm_gem_object *obj)
2005 struct drm_device *dev = obj->dev;
2006 struct drm_i915_private *dev_priv = dev->dev_private;
2007 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2009 i915_verify_inactive(dev, __FILE__, __LINE__);
2010 obj_priv->pin_count--;
2011 BUG_ON(obj_priv->pin_count < 0);
2012 BUG_ON(obj_priv->gtt_space == NULL);
2014 /* If the object is no longer pinned, and is
2015 * neither active nor being flushed, then stick it on
2018 if (obj_priv->pin_count == 0) {
2019 if (!obj_priv->active &&
2020 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
2021 I915_GEM_DOMAIN_GTT)) == 0)
2022 list_move_tail(&obj_priv->list,
2023 &dev_priv->mm.inactive_list);
2024 atomic_dec(&dev->pin_count);
2025 atomic_sub(obj->size, &dev->pin_memory);
2027 i915_verify_inactive(dev, __FILE__, __LINE__);
2031 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2032 struct drm_file *file_priv)
2034 struct drm_i915_gem_pin *args = data;
2035 struct drm_gem_object *obj;
2036 struct drm_i915_gem_object *obj_priv;
2039 mutex_lock(&dev->struct_mutex);
2041 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2043 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
2045 mutex_unlock(&dev->struct_mutex);
2048 obj_priv = obj->driver_private;
2050 ret = i915_gem_object_pin(obj, args->alignment);
2052 drm_gem_object_unreference(obj);
2053 mutex_unlock(&dev->struct_mutex);
2057 /* XXX - flush the CPU caches for pinned objects
2058 * as the X server doesn't manage domains yet
2060 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
2061 i915_gem_clflush_object(obj);
2062 drm_agp_chipset_flush(dev);
2063 obj->write_domain = 0;
2065 args->offset = obj_priv->gtt_offset;
2066 drm_gem_object_unreference(obj);
2067 mutex_unlock(&dev->struct_mutex);
2073 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
2074 struct drm_file *file_priv)
2076 struct drm_i915_gem_pin *args = data;
2077 struct drm_gem_object *obj;
2079 mutex_lock(&dev->struct_mutex);
2081 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2083 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
2085 mutex_unlock(&dev->struct_mutex);
2089 i915_gem_object_unpin(obj);
2091 drm_gem_object_unreference(obj);
2092 mutex_unlock(&dev->struct_mutex);
2097 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2098 struct drm_file *file_priv)
2100 struct drm_i915_gem_busy *args = data;
2101 struct drm_gem_object *obj;
2102 struct drm_i915_gem_object *obj_priv;
2104 mutex_lock(&dev->struct_mutex);
2105 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2107 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
2109 mutex_unlock(&dev->struct_mutex);
2113 obj_priv = obj->driver_private;
2114 args->busy = obj_priv->active;
2116 drm_gem_object_unreference(obj);
2117 mutex_unlock(&dev->struct_mutex);
2122 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
2123 struct drm_file *file_priv)
2125 return i915_gem_ring_throttle(dev, file_priv);
2128 int i915_gem_init_object(struct drm_gem_object *obj)
2130 struct drm_i915_gem_object *obj_priv;
2132 obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
2133 if (obj_priv == NULL)
2137 * We've just allocated pages from the kernel,
2138 * so they've just been written by the CPU with
2139 * zeros. They'll need to be clflushed before we
2140 * use them with the GPU.
2142 obj->write_domain = I915_GEM_DOMAIN_CPU;
2143 obj->read_domains = I915_GEM_DOMAIN_CPU;
2145 obj->driver_private = obj_priv;
2146 obj_priv->obj = obj;
2147 INIT_LIST_HEAD(&obj_priv->list);
2151 void i915_gem_free_object(struct drm_gem_object *obj)
2153 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2155 while (obj_priv->pin_count > 0)
2156 i915_gem_object_unpin(obj);
2158 i915_gem_object_unbind(obj);
2160 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
2161 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
2165 i915_gem_set_domain(struct drm_gem_object *obj,
2166 struct drm_file *file_priv,
2167 uint32_t read_domains,
2168 uint32_t write_domain)
2170 struct drm_device *dev = obj->dev;
2172 uint32_t flush_domains;
2174 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
2176 ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
2179 flush_domains = i915_gem_dev_set_domain(obj->dev);
2181 if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
2182 (void) i915_add_request(dev, flush_domains);
2187 /** Unbinds all objects that are on the given buffer list. */
2189 i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
2191 struct drm_gem_object *obj;
2192 struct drm_i915_gem_object *obj_priv;
2195 while (!list_empty(head)) {
2196 obj_priv = list_first_entry(head,
2197 struct drm_i915_gem_object,
2199 obj = obj_priv->obj;
2201 if (obj_priv->pin_count != 0) {
2202 DRM_ERROR("Pinned object in unbind list\n");
2203 mutex_unlock(&dev->struct_mutex);
2207 ret = i915_gem_object_unbind(obj);
2209 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
2211 mutex_unlock(&dev->struct_mutex);
2221 i915_gem_idle(struct drm_device *dev)
2223 struct drm_i915_private *dev_priv = dev->dev_private;
2224 uint32_t seqno, cur_seqno, last_seqno;
2227 if (dev_priv->mm.suspended)
2230 /* Hack! Don't let anybody do execbuf while we don't control the chip.
2231 * We need to replace this with a semaphore, or something.
2233 dev_priv->mm.suspended = 1;
2235 i915_kernel_lost_context(dev);
2237 /* Flush the GPU along with all non-CPU write domains
2239 i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
2240 ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2241 seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
2242 I915_GEM_DOMAIN_GTT));
2245 mutex_unlock(&dev->struct_mutex);
2249 dev_priv->mm.waiting_gem_seqno = seqno;
2253 cur_seqno = i915_get_gem_seqno(dev);
2254 if (i915_seqno_passed(cur_seqno, seqno))
2256 if (last_seqno == cur_seqno) {
2257 if (stuck++ > 100) {
2258 DRM_ERROR("hardware wedged\n");
2259 dev_priv->mm.wedged = 1;
2260 DRM_WAKEUP(&dev_priv->irq_queue);
2265 last_seqno = cur_seqno;
2267 dev_priv->mm.waiting_gem_seqno = 0;
2269 i915_gem_retire_requests(dev);
2271 /* Active and flushing should now be empty as we've
2272 * waited for a sequence higher than any pending execbuffer
2274 BUG_ON(!list_empty(&dev_priv->mm.active_list));
2275 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2277 /* Request should now be empty as we've also waited
2278 * for the last request in the list
2280 BUG_ON(!list_empty(&dev_priv->mm.request_list));
2282 /* Move all buffers out of the GTT. */
2283 i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
2285 BUG_ON(!list_empty(&dev_priv->mm.active_list));
2286 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2287 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
2288 BUG_ON(!list_empty(&dev_priv->mm.request_list));
2293 i915_gem_init_hws(struct drm_device *dev)
2295 struct drm_i915_private *dev_priv = dev->dev_private;
2296 struct drm_gem_object *obj;
2297 struct drm_i915_gem_object *obj_priv;
2300 /* If we need a physical address for the status page, it's already
2301 * initialized at driver load time.
2303 if (!I915_NEED_GFX_HWS(dev))
2306 obj = drm_gem_object_alloc(dev, 4096);
2308 DRM_ERROR("Failed to allocate status page\n");
2311 obj_priv = obj->driver_private;
2313 ret = i915_gem_object_pin(obj, 4096);
2315 drm_gem_object_unreference(obj);
2319 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
2320 dev_priv->hws_map.offset = dev->agp->base + obj_priv->gtt_offset;
2321 dev_priv->hws_map.size = 4096;
2322 dev_priv->hws_map.type = 0;
2323 dev_priv->hws_map.flags = 0;
2324 dev_priv->hws_map.mtrr = 0;
2326 drm_core_ioremap(&dev_priv->hws_map, dev);
2327 if (dev_priv->hws_map.handle == NULL) {
2328 DRM_ERROR("Failed to map status page.\n");
2329 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
2330 drm_gem_object_unreference(obj);
2333 dev_priv->hws_obj = obj;
2334 dev_priv->hw_status_page = dev_priv->hws_map.handle;
2335 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
2336 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
2337 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
2343 i915_gem_init_ringbuffer(struct drm_device *dev)
2345 struct drm_i915_private *dev_priv = dev->dev_private;
2346 struct drm_gem_object *obj;
2347 struct drm_i915_gem_object *obj_priv;
2350 ret = i915_gem_init_hws(dev);
2354 obj = drm_gem_object_alloc(dev, 128 * 1024);
2356 DRM_ERROR("Failed to allocate ringbuffer\n");
2359 obj_priv = obj->driver_private;
2361 ret = i915_gem_object_pin(obj, 4096);
2363 drm_gem_object_unreference(obj);
2367 /* Set up the kernel mapping for the ring. */
2368 dev_priv->ring.Size = obj->size;
2369 dev_priv->ring.tail_mask = obj->size - 1;
2371 dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
2372 dev_priv->ring.map.size = obj->size;
2373 dev_priv->ring.map.type = 0;
2374 dev_priv->ring.map.flags = 0;
2375 dev_priv->ring.map.mtrr = 0;
2377 drm_core_ioremap(&dev_priv->ring.map, dev);
2378 if (dev_priv->ring.map.handle == NULL) {
2379 DRM_ERROR("Failed to map ringbuffer.\n");
2380 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
2381 drm_gem_object_unreference(obj);
2384 dev_priv->ring.ring_obj = obj;
2385 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
2387 /* Stop the ring if it's running. */
2388 I915_WRITE(PRB0_CTL, 0);
2389 I915_WRITE(PRB0_HEAD, 0);
2390 I915_WRITE(PRB0_TAIL, 0);
2391 I915_WRITE(PRB0_START, 0);
2393 /* Initialize the ring. */
2394 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
2395 I915_WRITE(PRB0_CTL,
2396 ((obj->size - 4096) & RING_NR_PAGES) |
2400 /* Update our cache of the ring state */
2401 i915_kernel_lost_context(dev);
2407 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
2409 struct drm_i915_private *dev_priv = dev->dev_private;
2411 if (dev_priv->ring.ring_obj == NULL)
2414 drm_core_ioremapfree(&dev_priv->ring.map, dev);
2416 i915_gem_object_unpin(dev_priv->ring.ring_obj);
2417 drm_gem_object_unreference(dev_priv->ring.ring_obj);
2418 dev_priv->ring.ring_obj = NULL;
2419 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
2421 if (dev_priv->hws_obj != NULL) {
2422 i915_gem_object_unpin(dev_priv->hws_obj);
2423 drm_gem_object_unreference(dev_priv->hws_obj);
2424 dev_priv->hws_obj = NULL;
2425 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
2427 /* Write high address into HWS_PGA when disabling. */
2428 I915_WRITE(HWS_PGA, 0x1ffff000);
2433 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
2434 struct drm_file *file_priv)
2436 struct drm_i915_private *dev_priv = dev->dev_private;
2439 if (drm_core_check_feature(dev, DRIVER_MODESET))
2442 if (dev_priv->mm.wedged) {
2443 DRM_ERROR("Reenabling wedged hardware, good luck\n");
2444 dev_priv->mm.wedged = 0;
2447 ret = i915_gem_init_ringbuffer(dev);
2451 mutex_lock(&dev->struct_mutex);
2452 BUG_ON(!list_empty(&dev_priv->mm.active_list));
2453 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2454 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
2455 BUG_ON(!list_empty(&dev_priv->mm.request_list));
2456 dev_priv->mm.suspended = 0;
2457 mutex_unlock(&dev->struct_mutex);
2462 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
2463 struct drm_file *file_priv)
2467 if (drm_core_check_feature(dev, DRIVER_MODESET))
2470 mutex_lock(&dev->struct_mutex);
2471 ret = i915_gem_idle(dev);
2473 i915_gem_cleanup_ringbuffer(dev);
2474 mutex_unlock(&dev->struct_mutex);
2480 i915_gem_lastclose(struct drm_device *dev)
2483 struct drm_i915_private *dev_priv = dev->dev_private;
2485 mutex_lock(&dev->struct_mutex);
2487 if (dev_priv->ring.ring_obj != NULL) {
2488 ret = i915_gem_idle(dev);
2490 DRM_ERROR("failed to idle hardware: %d\n", ret);
2492 i915_gem_cleanup_ringbuffer(dev);
2495 mutex_unlock(&dev->struct_mutex);
2498 void i915_gem_load(struct drm_device *dev)
2500 struct drm_i915_private *dev_priv = dev->dev_private;
2502 INIT_LIST_HEAD(&dev_priv->mm.active_list);
2503 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
2504 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
2505 INIT_LIST_HEAD(&dev_priv->mm.request_list);
2506 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
2507 i915_gem_retire_work_handler);
2508 dev_priv->mm.next_gem_seqno = 1;
2510 i915_gem_detect_bit_6_swizzle(dev);