2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
30 #include "drm_compat.h"
33 #include <linux/swap.h>
36 i915_gem_object_set_domain(struct drm_gem_object *obj,
37 uint32_t read_domains,
38 uint32_t write_domain);
40 i915_gem_set_domain(struct drm_gem_object *obj,
41 struct drm_file *file_priv,
42 uint32_t read_domains,
43 uint32_t write_domain);
44 static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
45 static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
46 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
49 i915_gem_init_ioctl(struct drm_device *dev, void *data,
50 struct drm_file *file_priv)
52 drm_i915_private_t *dev_priv = dev->dev_private;
53 struct drm_i915_gem_init *args = data;
55 mutex_lock(&dev->struct_mutex);
57 if (args->gtt_start >= args->gtt_end ||
58 (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
59 (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
60 mutex_unlock(&dev->struct_mutex);
64 drm_memrange_init(&dev_priv->mm.gtt_space, args->gtt_start,
65 args->gtt_end - args->gtt_start);
67 dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
69 mutex_unlock(&dev->struct_mutex);
76 * Creates a new mm object and returns a handle to it.
79 i915_gem_create_ioctl(struct drm_device *dev, void *data,
80 struct drm_file *file_priv)
82 struct drm_i915_gem_create *args = data;
83 struct drm_gem_object *obj;
86 args->size = roundup(args->size, PAGE_SIZE);
88 /* Allocate the new object */
89 obj = drm_gem_object_alloc(dev, args->size);
93 ret = drm_gem_handle_create(file_priv, obj, &handle);
94 mutex_lock(&dev->struct_mutex);
95 drm_gem_object_handle_unreference(obj);
96 mutex_unlock(&dev->struct_mutex);
101 args->handle = handle;
107 * Reads data from the object referenced by handle.
109 * On error, the contents of *data are undefined.
112 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
113 struct drm_file *file_priv)
115 struct drm_i915_gem_pread *args = data;
116 struct drm_gem_object *obj;
117 struct drm_i915_gem_object *obj_priv;
122 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
125 obj_priv = obj->driver_private;
127 /* Bounds check source.
129 * XXX: This could use review for overflow issues...
131 if (args->offset > obj->size || args->size > obj->size ||
132 args->offset + args->size > obj->size) {
133 drm_gem_object_unreference(obj);
137 mutex_lock(&dev->struct_mutex);
139 /* Do a partial equivalent of i915_gem_set_domain(CPU, 0), as
140 * we don't want to clflush whole objects to read a portion of them.
142 * The side effect of doing this is that repeated preads of the same
143 * contents would take extra clflush overhead, since we don't track
144 * flushedness on a page basis.
146 if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
147 ret = i915_gem_object_wait_rendering(obj);
149 drm_gem_object_unreference(obj);
150 mutex_unlock(&dev->struct_mutex);
154 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
155 int first_page = args->offset / PAGE_SIZE;
156 int last_page = (args->offset + args->size - 1) / PAGE_SIZE;
158 /* If we don't have the page list, the pages are unpinned
159 * and swappable, and thus should already be in the CPU domain.
161 BUG_ON(obj_priv->page_list == NULL);
163 drm_ttm_cache_flush(&obj_priv->page_list[first_page],
164 last_page - first_page + 1);
167 offset = args->offset;
169 read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
170 args->size, &offset);
171 if (read != args->size) {
172 drm_gem_object_unreference(obj);
173 mutex_unlock(&dev->struct_mutex);
180 drm_gem_object_unreference(obj);
181 mutex_unlock(&dev->struct_mutex);
186 #include "drm_compat.h"
189 i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
190 struct drm_i915_gem_pwrite *args,
191 struct drm_file *file_priv)
193 struct drm_i915_gem_object *obj_priv = obj->driver_private;
196 char __user *user_data;
201 unsigned long unwritten;
203 user_data = (char __user *) (uintptr_t) args->data_ptr;
205 if (!access_ok(VERIFY_READ, user_data, remain))
209 mutex_lock(&dev->struct_mutex);
210 ret = i915_gem_object_pin(obj, 0);
212 mutex_unlock(&dev->struct_mutex);
215 ret = i915_gem_set_domain(obj, file_priv,
216 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
220 obj_priv = obj->driver_private;
221 offset = obj_priv->gtt_offset + args->offset;
226 /** Operation in this page
229 * o = offset within page
232 i = offset >> PAGE_SHIFT;
233 o = offset & (PAGE_SIZE-1);
235 if ((o + l) > PAGE_SIZE)
238 pfn = (dev->agp->base >> PAGE_SHIFT) + i;
240 #ifdef DRM_KMAP_ATOMIC_PROT_PFN
241 /* kmap_atomic can't map IO pages on non-HIGHMEM kernels
243 vaddr = kmap_atomic_prot_pfn(pfn, KM_USER0,
244 __pgprot(__PAGE_KERNEL));
246 DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
247 i, o, l, pfn, vaddr);
249 unwritten = __copy_from_user_inatomic_nocache(vaddr + o, user_data, l);
250 kunmap_atomic(vaddr, KM_USER0);
255 vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
257 DRM_INFO("pwrite slow i %d o %d l %d pfn %ld vaddr %p\n",
258 i, o, l, pfn, vaddr);
264 unwritten = __copy_from_user(vaddr + o, user_data, l);
266 DRM_INFO("unwritten %ld\n", unwritten);
279 #if WATCH_PWRITE && 1
280 i915_gem_clflush_object(obj);
281 i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0);
282 i915_gem_clflush_object(obj);
286 i915_gem_object_unpin (obj);
287 mutex_unlock(&dev->struct_mutex);
293 i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
294 struct drm_i915_gem_pwrite *args,
295 struct drm_file *file_priv)
301 mutex_lock(&dev->struct_mutex);
303 ret = i915_gem_set_domain(obj, file_priv,
304 I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
306 mutex_unlock(&dev->struct_mutex);
310 offset = args->offset;
312 written = vfs_write(obj->filp,
313 (char __user *)(uintptr_t) args->data_ptr,
314 args->size, &offset);
315 if (written != args->size) {
316 mutex_unlock(&dev->struct_mutex);
323 mutex_unlock(&dev->struct_mutex);
329 * Writes data to the object referenced by handle.
331 * On error, the contents of the buffer that were to be modified are undefined.
334 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
335 struct drm_file *file_priv)
337 struct drm_i915_gem_pwrite *args = data;
338 struct drm_gem_object *obj;
339 struct drm_i915_gem_object *obj_priv;
342 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
345 obj_priv = obj->driver_private;
347 /** Bounds check destination.
349 * XXX: This could use review for overflow issues...
351 if (args->offset > obj->size || args->size > obj->size ||
352 args->offset + args->size > obj->size) {
353 drm_gem_object_unreference(obj);
357 /* We can only do the GTT pwrite on untiled buffers, as otherwise
358 * it would end up going through the fenced access, and we'll get
359 * different detiling behavior between reading and writing.
360 * pread/pwrite currently are reading and writing from the CPU
361 * perspective, requiring manual detiling by the client.
363 if (obj_priv->tiling_mode == I915_TILING_NONE)
364 ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
366 ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
370 DRM_INFO("pwrite failed %d\n", ret);
373 drm_gem_object_unreference(obj);
379 * Called when user space prepares to use an object
382 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
383 struct drm_file *file_priv)
385 struct drm_i915_gem_set_domain *args = data;
386 struct drm_gem_object *obj;
389 if (!(dev->driver->driver_features & DRIVER_GEM))
392 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
396 mutex_lock(&dev->struct_mutex);
397 ret = i915_gem_set_domain(obj, file_priv,
398 args->read_domains, args->write_domain);
399 drm_gem_object_unreference(obj);
400 mutex_unlock(&dev->struct_mutex);
405 * Called when user space has done writes to this buffer
408 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
409 struct drm_file *file_priv)
411 struct drm_i915_gem_sw_finish *args = data;
412 struct drm_gem_object *obj;
413 struct drm_i915_gem_object *obj_priv;
416 if (!(dev->driver->driver_features & DRIVER_GEM))
419 mutex_lock(&dev->struct_mutex);
420 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
422 mutex_unlock(&dev->struct_mutex);
427 DRM_INFO("%s: sw_finish %d (%p)\n",
428 __func__, args->handle, obj);
430 obj_priv = obj->driver_private;
432 /** Pinned buffers may be scanout, so flush the cache
434 if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
435 i915_gem_clflush_object(obj);
436 drm_agp_chipset_flush(dev);
438 drm_gem_object_unreference(obj);
439 mutex_unlock(&dev->struct_mutex);
444 * Maps the contents of an object, returning the address it is mapped
447 * While the mapping holds a reference on the contents of the object, it doesn't
448 * imply a ref on the object itself.
451 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
452 struct drm_file *file_priv)
454 struct drm_i915_gem_mmap *args = data;
455 struct drm_gem_object *obj;
459 if (!(dev->driver->driver_features & DRIVER_GEM))
462 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
466 offset = args->offset;
468 down_write(¤t->mm->mmap_sem);
469 addr = do_mmap(obj->filp, 0, args->size,
470 PROT_READ | PROT_WRITE, MAP_SHARED,
472 up_write(¤t->mm->mmap_sem);
473 mutex_lock(&dev->struct_mutex);
474 drm_gem_object_unreference(obj);
475 mutex_unlock(&dev->struct_mutex);
476 if (IS_ERR((void *)addr))
479 args->addr_ptr = (uint64_t) addr;
485 i915_gem_object_free_page_list(struct drm_gem_object *obj)
487 struct drm_i915_gem_object *obj_priv = obj->driver_private;
488 int page_count = obj->size / PAGE_SIZE;
491 if (obj_priv->page_list == NULL)
495 for (i = 0; i < page_count; i++)
496 if (obj_priv->page_list[i] != NULL) {
498 set_page_dirty(obj_priv->page_list[i]);
499 mark_page_accessed(obj_priv->page_list[i]);
500 page_cache_release(obj_priv->page_list[i]);
504 drm_free(obj_priv->page_list,
505 page_count * sizeof(struct page *),
507 obj_priv->page_list = NULL;
511 i915_gem_object_move_to_active(struct drm_gem_object *obj)
513 struct drm_device *dev = obj->dev;
514 drm_i915_private_t *dev_priv = dev->dev_private;
515 struct drm_i915_gem_object *obj_priv = obj->driver_private;
517 /* Add a reference if we're newly entering the active list. */
518 if (!obj_priv->active) {
519 drm_gem_object_reference(obj);
520 obj_priv->active = 1;
522 /* Move from whatever list we were on to the tail of execution. */
523 list_move_tail(&obj_priv->list,
524 &dev_priv->mm.active_list);
529 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
531 struct drm_device *dev = obj->dev;
532 drm_i915_private_t *dev_priv = dev->dev_private;
533 struct drm_i915_gem_object *obj_priv = obj->driver_private;
535 i915_verify_inactive(dev, __FILE__, __LINE__);
536 if (obj_priv->pin_count != 0)
537 list_del_init(&obj_priv->list);
539 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
541 if (obj_priv->active) {
542 obj_priv->active = 0;
543 drm_gem_object_unreference(obj);
545 i915_verify_inactive(dev, __FILE__, __LINE__);
549 * Creates a new sequence number, emitting a write of it to the status page
550 * plus an interrupt, which will trigger i915_user_interrupt_handler.
552 * Must be called with struct_lock held.
554 * Returned sequence numbers are nonzero on success.
557 i915_add_request(struct drm_device *dev, uint32_t flush_domains)
559 drm_i915_private_t *dev_priv = dev->dev_private;
560 struct drm_i915_gem_request *request;
565 request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
569 /* Grab the seqno we're going to make this request be, and bump the
570 * next (skipping 0 so it can be the reserved no-seqno value).
572 seqno = dev_priv->mm.next_gem_seqno;
573 dev_priv->mm.next_gem_seqno++;
574 if (dev_priv->mm.next_gem_seqno == 0)
575 dev_priv->mm.next_gem_seqno++;
578 OUT_RING(CMD_STORE_DWORD_IDX);
579 OUT_RING(I915_GEM_HWS_INDEX << STORE_DWORD_INDEX_SHIFT);
582 OUT_RING(GFX_OP_USER_INTERRUPT);
585 DRM_DEBUG("%d\n", seqno);
587 request->seqno = seqno;
588 request->emitted_jiffies = jiffies;
589 request->flush_domains = flush_domains;
590 was_empty = list_empty(&dev_priv->mm.request_list);
591 list_add_tail(&request->list, &dev_priv->mm.request_list);
594 schedule_delayed_work (&dev_priv->mm.retire_work, HZ);
599 * Command execution barrier
601 * Ensures that all commands in the ring are finished
602 * before signalling the CPU
606 i915_retire_commands(struct drm_device *dev)
608 drm_i915_private_t *dev_priv = dev->dev_private;
609 uint32_t cmd = CMD_MI_FLUSH | MI_NO_WRITE_FLUSH;
610 uint32_t flush_domains = 0;
613 /* The sampler always gets flushed on i965 (sigh) */
615 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
618 OUT_RING(0); /* noop */
620 return flush_domains;
624 * Moves buffers associated only with the given active seqno from the active
625 * to inactive list, potentially freeing them.
628 i915_gem_retire_request(struct drm_device *dev,
629 struct drm_i915_gem_request *request)
631 drm_i915_private_t *dev_priv = dev->dev_private;
633 if (request->flush_domains != 0) {
634 struct drm_i915_gem_object *obj_priv, *next;
636 /* First clear any buffers that were only waiting for a flush
637 * matching the one just retired.
640 list_for_each_entry_safe(obj_priv, next,
641 &dev_priv->mm.flushing_list, list) {
642 struct drm_gem_object *obj = obj_priv->obj;
644 if (obj->write_domain & request->flush_domains) {
645 obj->write_domain = 0;
646 i915_gem_object_move_to_inactive(obj);
652 /* Move any buffers on the active list that are no longer referenced
653 * by the ringbuffer to the flushing/inactive lists as appropriate.
655 while (!list_empty(&dev_priv->mm.active_list)) {
656 struct drm_gem_object *obj;
657 struct drm_i915_gem_object *obj_priv;
659 obj_priv = list_first_entry(&dev_priv->mm.active_list,
660 struct drm_i915_gem_object,
664 /* If the seqno being retired doesn't match the oldest in the
665 * list, then the oldest in the list must still be newer than
668 if (obj_priv->last_rendering_seqno != request->seqno)
671 DRM_INFO("%s: retire %d moves to inactive list %p\n",
672 __func__, request->seqno, obj);
675 if (obj->write_domain != 0) {
676 list_move_tail(&obj_priv->list,
677 &dev_priv->mm.flushing_list);
679 i915_gem_object_move_to_inactive(obj);
685 * Returns true if seq1 is later than seq2.
688 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
690 return (int32_t)(seq1 - seq2) >= 0;
694 i915_get_gem_seqno(struct drm_device *dev)
696 drm_i915_private_t *dev_priv = dev->dev_private;
698 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
702 * This function clears the request list as sequence numbers are passed.
705 i915_gem_retire_requests(struct drm_device *dev)
707 drm_i915_private_t *dev_priv = dev->dev_private;
710 seqno = i915_get_gem_seqno(dev);
712 while (!list_empty(&dev_priv->mm.request_list)) {
713 struct drm_i915_gem_request *request;
714 uint32_t retiring_seqno;
716 request = list_first_entry(&dev_priv->mm.request_list,
717 struct drm_i915_gem_request,
719 retiring_seqno = request->seqno;
721 if (i915_seqno_passed(seqno, retiring_seqno) || dev_priv->mm.wedged) {
722 i915_gem_retire_request(dev, request);
724 list_del(&request->list);
725 drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
732 i915_gem_retire_work_handler(struct work_struct *work)
734 drm_i915_private_t *dev_priv;
735 struct drm_device *dev;
737 dev_priv = container_of(work, drm_i915_private_t,
738 mm.retire_work.work);
741 mutex_lock(&dev->struct_mutex);
742 i915_gem_retire_requests(dev);
743 if (!list_empty(&dev_priv->mm.request_list))
744 schedule_delayed_work (&dev_priv->mm.retire_work, HZ);
745 mutex_unlock(&dev->struct_mutex);
749 * Waits for a sequence number to be signaled, and cleans up the
750 * request and object lists appropriately for that event.
753 i915_wait_request(struct drm_device *dev, uint32_t seqno)
755 drm_i915_private_t *dev_priv = dev->dev_private;
760 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
761 dev_priv->mm.waiting_gem_seqno = seqno;
762 i915_user_irq_on(dev_priv);
763 ret = wait_event_interruptible(dev_priv->irq_queue,
764 i915_seqno_passed(i915_get_gem_seqno(dev),
765 seqno) || dev_priv->mm.wedged);
766 i915_user_irq_off(dev_priv);
767 dev_priv->mm.waiting_gem_seqno = 0;
769 if (dev_priv->mm.wedged)
773 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
774 __func__, ret, seqno, i915_get_gem_seqno(dev));
776 /* Directly dispatch request retiring. While we have the work queue
777 * to handle this, the waiter on a request often wants an associated
778 * buffer to have made it to the inactive list, and we would need
779 * a separate wait queue to handle that.
782 i915_gem_retire_requests(dev);
788 i915_gem_flush(struct drm_device *dev,
789 uint32_t invalidate_domains,
790 uint32_t flush_domains)
792 drm_i915_private_t *dev_priv = dev->dev_private;
797 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
798 invalidate_domains, flush_domains);
801 if (flush_domains & I915_GEM_DOMAIN_CPU)
802 drm_agp_chipset_flush(dev);
804 if ((invalidate_domains|flush_domains) & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
808 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
809 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
810 * also flushed at 2d versus 3d pipeline switches.
814 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
815 * MI_READ_FLUSH is set, and is always flushed on 965.
817 * I915_GEM_DOMAIN_COMMAND may not exist?
819 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
820 * invalidated when MI_EXE_FLUSH is set.
822 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
823 * invalidated with every MI_FLUSH.
827 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
828 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
829 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
830 * are flushed at any MI_FLUSH.
833 cmd = CMD_MI_FLUSH | MI_NO_WRITE_FLUSH;
834 if ((invalidate_domains|flush_domains) &
835 I915_GEM_DOMAIN_RENDER)
836 cmd &= ~MI_NO_WRITE_FLUSH;
837 if (!IS_I965G(dev)) {
839 * On the 965, the sampler cache always gets flushed
840 * and this bit is reserved.
842 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
843 cmd |= MI_READ_FLUSH;
845 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
849 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
853 OUT_RING(0); /* noop */
859 * Ensures that all rendering to the object has completed and the object is
860 * safe to unbind from the GTT or access from the CPU.
863 i915_gem_object_wait_rendering(struct drm_gem_object *obj)
865 struct drm_device *dev = obj->dev;
866 struct drm_i915_gem_object *obj_priv = obj->driver_private;
869 /* If there are writes queued to the buffer, flush and
870 * create a new seqno to wait for.
872 if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
873 uint32_t write_domain = obj->write_domain;
875 DRM_INFO("%s: flushing object %p from write domain %08x\n",
876 __func__, obj, write_domain);
878 i915_gem_flush(dev, 0, write_domain);
879 obj->write_domain = 0;
881 i915_gem_object_move_to_active(obj);
882 obj_priv->last_rendering_seqno = i915_add_request(dev,
884 BUG_ON(obj_priv->last_rendering_seqno == 0);
886 DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
889 /* If there is rendering queued on the buffer being evicted, wait for
892 if (obj_priv->active) {
894 DRM_INFO("%s: object %p wait for seqno %08x\n",
895 __func__, obj, obj_priv->last_rendering_seqno);
897 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
906 * Unbinds an object from the GTT aperture.
909 i915_gem_object_unbind(struct drm_gem_object *obj)
911 struct drm_device *dev = obj->dev;
912 struct drm_i915_gem_object *obj_priv = obj->driver_private;
916 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
917 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
919 if (obj_priv->gtt_space == NULL)
922 if (obj_priv->pin_count != 0) {
923 DRM_ERROR("Attempting to unbind pinned buffer\n");
927 /* Wait for any rendering to complete
929 ret = i915_gem_object_wait_rendering(obj);
931 DRM_ERROR ("wait_rendering failed: %d\n", ret);
935 /* Move the object to the CPU domain to ensure that
936 * any possible CPU writes while it's not in the GTT
937 * are flushed when we go to remap it. This will
938 * also ensure that all pending GPU writes are finished
941 ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
942 I915_GEM_DOMAIN_CPU);
944 DRM_ERROR("set_domain failed: %d\n", ret);
948 if (obj_priv->agp_mem != NULL) {
949 drm_unbind_agp(obj_priv->agp_mem);
950 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
951 obj_priv->agp_mem = NULL;
954 BUG_ON(obj_priv->active);
956 i915_gem_object_free_page_list(obj);
958 if (obj_priv->gtt_space) {
959 atomic_dec(&dev->gtt_count);
960 atomic_sub(obj->size, &dev->gtt_memory);
962 drm_memrange_put_block(obj_priv->gtt_space);
963 obj_priv->gtt_space = NULL;
966 /* Remove ourselves from the LRU list if present. */
967 if (!list_empty(&obj_priv->list))
968 list_del_init(&obj_priv->list);
974 i915_gem_evict_something(struct drm_device *dev)
976 drm_i915_private_t *dev_priv = dev->dev_private;
977 struct drm_gem_object *obj;
978 struct drm_i915_gem_object *obj_priv;
982 /* If there's an inactive buffer available now, grab it
985 if (!list_empty(&dev_priv->mm.inactive_list)) {
986 obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
987 struct drm_i915_gem_object,
990 BUG_ON(obj_priv->pin_count != 0);
992 DRM_INFO("%s: evicting %p\n", __func__, obj);
994 BUG_ON(obj_priv->active);
996 /* Wait on the rendering and unbind the buffer. */
997 ret = i915_gem_object_unbind(obj);
1001 /* If we didn't get anything, but the ring is still processing
1002 * things, wait for one of those things to finish and hopefully
1003 * leave us a buffer to evict.
1005 if (!list_empty(&dev_priv->mm.request_list)) {
1006 struct drm_i915_gem_request *request;
1008 request = list_first_entry(&dev_priv->mm.request_list,
1009 struct drm_i915_gem_request,
1012 ret = i915_wait_request(dev, request->seqno);
1016 /* if waiting caused an object to become inactive,
1017 * then loop around and wait for it. Otherwise, we
1018 * assume that waiting freed and unbound something,
1019 * so there should now be some space in the GTT
1021 if (!list_empty(&dev_priv->mm.inactive_list))
1026 /* If we didn't have anything on the request list but there
1027 * are buffers awaiting a flush, emit one and try again.
1028 * When we wait on it, those buffers waiting for that flush
1029 * will get moved to inactive.
1031 if (!list_empty(&dev_priv->mm.flushing_list)) {
1032 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1033 struct drm_i915_gem_object,
1035 obj = obj_priv->obj;
1040 i915_add_request(dev, obj->write_domain);
1046 DRM_ERROR("inactive empty %d request empty %d flushing empty %d\n",
1047 list_empty(&dev_priv->mm.inactive_list),
1048 list_empty(&dev_priv->mm.request_list),
1049 list_empty(&dev_priv->mm.flushing_list));
1050 /* If we didn't do any of the above, there's nothing to be done
1051 * and we just can't fit it in.
1059 i915_gem_object_get_page_list(struct drm_gem_object *obj)
1061 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1063 struct address_space *mapping;
1064 struct inode *inode;
1068 if (obj_priv->page_list)
1071 /* Get the list of pages out of our struct file. They'll be pinned
1072 * at this point until we release them.
1074 page_count = obj->size / PAGE_SIZE;
1075 BUG_ON(obj_priv->page_list != NULL);
1076 obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
1078 if (obj_priv->page_list == NULL) {
1079 DRM_ERROR("Faled to allocate page list\n");
1083 inode = obj->filp->f_path.dentry->d_inode;
1084 mapping = inode->i_mapping;
1085 for (i = 0; i < page_count; i++) {
1086 page = find_get_page(mapping, i);
1087 if (page == NULL || !PageUptodate(page)) {
1089 page_cache_release(page);
1092 ret = shmem_getpage(inode, i, &page, SGP_DIRTY, NULL);
1095 DRM_ERROR("shmem_getpage failed: %d\n", ret);
1096 i915_gem_object_free_page_list(obj);
1101 obj_priv->page_list[i] = page;
1107 * Finds free space in the GTT aperture and binds the object there.
1110 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1112 struct drm_device *dev = obj->dev;
1113 drm_i915_private_t *dev_priv = dev->dev_private;
1114 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1115 struct drm_memrange_node *free_space;
1116 int page_count, ret;
1119 alignment = PAGE_SIZE;
1120 if (alignment & (PAGE_SIZE - 1)) {
1121 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
1126 free_space = drm_memrange_search_free(&dev_priv->mm.gtt_space,
1129 if (free_space != NULL) {
1130 obj_priv->gtt_space =
1131 drm_memrange_get_block(free_space, obj->size,
1133 if (obj_priv->gtt_space != NULL) {
1134 obj_priv->gtt_space->private = obj;
1135 obj_priv->gtt_offset = obj_priv->gtt_space->start;
1138 if (obj_priv->gtt_space == NULL) {
1139 /* If the gtt is empty and we're still having trouble
1140 * fitting our object in, we're out of memory.
1143 DRM_INFO("%s: GTT full, evicting something\n", __func__);
1145 if (list_empty(&dev_priv->mm.inactive_list) &&
1146 list_empty(&dev_priv->mm.flushing_list) &&
1147 list_empty(&dev_priv->mm.active_list)) {
1148 DRM_ERROR("GTT full, but LRU list empty\n");
1152 ret = i915_gem_evict_something(dev);
1154 DRM_ERROR("Failed to evict a buffer %d\n", ret);
1161 DRM_INFO("Binding object of size %d at 0x%08x\n",
1162 obj->size, obj_priv->gtt_offset);
1164 ret = i915_gem_object_get_page_list(obj);
1166 drm_memrange_put_block(obj_priv->gtt_space);
1167 obj_priv->gtt_space = NULL;
1171 page_count = obj->size / PAGE_SIZE;
1172 /* Create an AGP memory structure pointing at our pages, and bind it
1175 obj_priv->agp_mem = drm_agp_bind_pages(dev,
1176 obj_priv->page_list,
1178 obj_priv->gtt_offset);
1179 if (obj_priv->agp_mem == NULL) {
1180 i915_gem_object_free_page_list(obj);
1181 drm_memrange_put_block(obj_priv->gtt_space);
1182 obj_priv->gtt_space = NULL;
1185 atomic_inc(&dev->gtt_count);
1186 atomic_add(obj->size, &dev->gtt_memory);
1188 /* Assert that the object is not currently in any GPU domain. As it
1189 * wasn't in the GTT, there shouldn't be any way it could have been in
1192 BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1193 BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1199 i915_gem_clflush_object(struct drm_gem_object *obj)
1201 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1203 /* If we don't have a page list set up, then we're not pinned
1204 * to GPU, and we can ignore the cache flush because it'll happen
1205 * again at bind time.
1207 if (obj_priv->page_list == NULL)
1210 drm_ttm_cache_flush(obj_priv->page_list, obj->size / PAGE_SIZE);
1214 * Set the next domain for the specified object. This
1215 * may not actually perform the necessary flushing/invaliding though,
1216 * as that may want to be batched with other set_domain operations
1218 * This is (we hope) the only really tricky part of gem. The goal
1219 * is fairly simple -- track which caches hold bits of the object
1220 * and make sure they remain coherent. A few concrete examples may
1221 * help to explain how it works. For shorthand, we use the notation
1222 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
1223 * a pair of read and write domain masks.
1225 * Case 1: the batch buffer
1231 * 5. Unmapped from GTT
1234 * Let's take these a step at a time
1237 * Pages allocated from the kernel may still have
1238 * cache contents, so we set them to (CPU, CPU) always.
1239 * 2. Written by CPU (using pwrite)
1240 * The pwrite function calls set_domain (CPU, CPU) and
1241 * this function does nothing (as nothing changes)
1243 * This function asserts that the object is not
1244 * currently in any GPU-based read or write domains
1246 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
1247 * As write_domain is zero, this function adds in the
1248 * current read domains (CPU+COMMAND, 0).
1249 * flush_domains is set to CPU.
1250 * invalidate_domains is set to COMMAND
1251 * clflush is run to get data out of the CPU caches
1252 * then i915_dev_set_domain calls i915_gem_flush to
1253 * emit an MI_FLUSH and drm_agp_chipset_flush
1254 * 5. Unmapped from GTT
1255 * i915_gem_object_unbind calls set_domain (CPU, CPU)
1256 * flush_domains and invalidate_domains end up both zero
1257 * so no flushing/invalidating happens
1261 * Case 2: The shared render buffer
1265 * 3. Read/written by GPU
1266 * 4. set_domain to (CPU,CPU)
1267 * 5. Read/written by CPU
1268 * 6. Read/written by GPU
1271 * Same as last example, (CPU, CPU)
1273 * Nothing changes (assertions find that it is not in the GPU)
1274 * 3. Read/written by GPU
1275 * execbuffer calls set_domain (RENDER, RENDER)
1276 * flush_domains gets CPU
1277 * invalidate_domains gets GPU
1279 * MI_FLUSH and drm_agp_chipset_flush
1280 * 4. set_domain (CPU, CPU)
1281 * flush_domains gets GPU
1282 * invalidate_domains gets CPU
1283 * wait_rendering (obj) to make sure all drawing is complete.
1284 * This will include an MI_FLUSH to get the data from GPU
1286 * clflush (obj) to invalidate the CPU cache
1287 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
1288 * 5. Read/written by CPU
1289 * cache lines are loaded and dirtied
1290 * 6. Read written by GPU
1291 * Same as last GPU access
1293 * Case 3: The constant buffer
1298 * 4. Updated (written) by CPU again
1307 * flush_domains = CPU
1308 * invalidate_domains = RENDER
1311 * drm_agp_chipset_flush
1312 * 4. Updated (written) by CPU again
1314 * flush_domains = 0 (no previous write domain)
1315 * invalidate_domains = 0 (no new read domains)
1318 * flush_domains = CPU
1319 * invalidate_domains = RENDER
1322 * drm_agp_chipset_flush
1325 i915_gem_object_set_domain(struct drm_gem_object *obj,
1326 uint32_t read_domains,
1327 uint32_t write_domain)
1329 struct drm_device *dev = obj->dev;
1330 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1331 uint32_t invalidate_domains = 0;
1332 uint32_t flush_domains = 0;
1336 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
1338 obj->read_domains, read_domains,
1339 obj->write_domain, write_domain);
1342 * If the object isn't moving to a new write domain,
1343 * let the object stay in multiple read domains
1345 if (write_domain == 0)
1346 read_domains |= obj->read_domains;
1348 obj_priv->dirty = 1;
1351 * Flush the current write domain if
1352 * the new read domains don't match. Invalidate
1353 * any read domains which differ from the old
1356 if (obj->write_domain && obj->write_domain != read_domains) {
1357 flush_domains |= obj->write_domain;
1358 invalidate_domains |= read_domains & ~obj->write_domain;
1361 * Invalidate any read caches which may have
1362 * stale data. That is, any new read domains.
1364 invalidate_domains |= read_domains & ~obj->read_domains;
1365 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
1367 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
1368 __func__, flush_domains, invalidate_domains);
1371 * If we're invaliding the CPU cache and flushing a GPU cache,
1372 * then pause for rendering so that the GPU caches will be
1373 * flushed before the cpu cache is invalidated
1375 if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
1376 (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))) {
1377 ret = i915_gem_object_wait_rendering(obj);
1381 i915_gem_clflush_object(obj);
1384 if ((write_domain | flush_domains) != 0)
1385 obj->write_domain = write_domain;
1386 obj->read_domains = read_domains;
1387 dev->invalidate_domains |= invalidate_domains;
1388 dev->flush_domains |= flush_domains;
1390 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
1392 obj->read_domains, obj->write_domain,
1393 dev->invalidate_domains, dev->flush_domains);
1399 * Once all of the objects have been set in the proper domain,
1400 * perform the necessary flush and invalidate operations.
1402 * Returns the write domains flushed, for use in flush tracking.
1405 i915_gem_dev_set_domain(struct drm_device *dev)
1407 uint32_t flush_domains = dev->flush_domains;
1410 * Now that all the buffers are synced to the proper domains,
1411 * flush and invalidate the collected domains
1413 if (dev->invalidate_domains | dev->flush_domains) {
1415 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
1417 dev->invalidate_domains,
1418 dev->flush_domains);
1421 dev->invalidate_domains,
1422 dev->flush_domains);
1423 dev->invalidate_domains = 0;
1424 dev->flush_domains = 0;
1427 return flush_domains;
1431 * Pin an object to the GTT and evaluate the relocations landing in it.
1434 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1435 struct drm_file *file_priv,
1436 struct drm_i915_gem_exec_object *entry)
1438 struct drm_device *dev = obj->dev;
1439 struct drm_i915_gem_relocation_entry reloc;
1440 struct drm_i915_gem_relocation_entry __user *relocs;
1441 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1443 uint32_t last_reloc_offset = -1;
1444 void *reloc_page = NULL;
1446 /* Choose the GTT offset for our buffer and put it there. */
1447 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
1451 entry->offset = obj_priv->gtt_offset;
1453 relocs = (struct drm_i915_gem_relocation_entry __user *)
1454 (uintptr_t) entry->relocs_ptr;
1455 /* Apply the relocations, using the GTT aperture to avoid cache
1456 * flushing requirements.
1458 for (i = 0; i < entry->relocation_count; i++) {
1459 struct drm_gem_object *target_obj;
1460 struct drm_i915_gem_object *target_obj_priv;
1461 uint32_t reloc_val, reloc_offset, *reloc_entry;
1464 ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
1466 i915_gem_object_unpin(obj);
1470 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
1471 reloc.target_handle);
1472 if (target_obj == NULL) {
1473 i915_gem_object_unpin(obj);
1476 target_obj_priv = target_obj->driver_private;
1478 /* The target buffer should have appeared before us in the
1479 * exec_object list, so it should have a GTT space bound by now.
1481 if (target_obj_priv->gtt_space == NULL) {
1482 DRM_ERROR("No GTT space found for object %d\n",
1483 reloc.target_handle);
1484 drm_gem_object_unreference(target_obj);
1485 i915_gem_object_unpin(obj);
1489 if (reloc.offset > obj->size - 4) {
1490 DRM_ERROR("Relocation beyond object bounds: "
1491 "obj %p target %d offset %d size %d.\n",
1492 obj, reloc.target_handle,
1493 (int) reloc.offset, (int) obj->size);
1494 drm_gem_object_unreference(target_obj);
1495 i915_gem_object_unpin(obj);
1498 if (reloc.offset & 3) {
1499 DRM_ERROR("Relocation not 4-byte aligned: "
1500 "obj %p target %d offset %d.\n",
1501 obj, reloc.target_handle,
1502 (int) reloc.offset);
1503 drm_gem_object_unreference(target_obj);
1504 i915_gem_object_unpin(obj);
1508 if (reloc.write_domain && target_obj->pending_write_domain &&
1509 reloc.write_domain != target_obj->pending_write_domain) {
1510 DRM_ERROR("Write domain conflict: "
1511 "obj %p target %d offset %d "
1512 "new %08x old %08x\n",
1513 obj, reloc.target_handle,
1516 target_obj->pending_write_domain);
1517 drm_gem_object_unreference(target_obj);
1518 i915_gem_object_unpin(obj);
1523 DRM_INFO("%s: obj %p offset %08x target %d "
1524 "read %08x write %08x gtt %08x "
1525 "presumed %08x delta %08x\n",
1529 (int) reloc.target_handle,
1530 (int) reloc.read_domains,
1531 (int) reloc.write_domain,
1532 (int) target_obj_priv->gtt_offset,
1533 (int) reloc.presumed_offset,
1537 target_obj->pending_read_domains |= reloc.read_domains;
1538 target_obj->pending_write_domain |= reloc.write_domain;
1540 /* If the relocation already has the right value in it, no
1541 * more work needs to be done.
1543 if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
1544 drm_gem_object_unreference(target_obj);
1548 /* Now that we're going to actually write some data in,
1549 * make sure that any rendering using this buffer's contents
1552 i915_gem_object_wait_rendering(obj);
1554 /* As we're writing through the gtt, flush
1555 * any CPU writes before we write the relocations
1557 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
1558 i915_gem_clflush_object(obj);
1559 drm_agp_chipset_flush(dev);
1560 obj->write_domain = 0;
1563 /* Map the page containing the relocation we're going to
1566 reloc_offset = obj_priv->gtt_offset + reloc.offset;
1567 if (reloc_page == NULL ||
1568 (last_reloc_offset & ~(PAGE_SIZE - 1)) !=
1569 (reloc_offset & ~(PAGE_SIZE - 1))) {
1570 if (reloc_page != NULL)
1571 iounmap(reloc_page);
1573 reloc_page = ioremap(dev->agp->base +
1574 (reloc_offset & ~(PAGE_SIZE - 1)),
1576 last_reloc_offset = reloc_offset;
1577 if (reloc_page == NULL) {
1578 drm_gem_object_unreference(target_obj);
1579 i915_gem_object_unpin(obj);
1584 reloc_entry = (uint32_t *)((char *)reloc_page +
1585 (reloc_offset & (PAGE_SIZE - 1)));
1586 reloc_val = target_obj_priv->gtt_offset + reloc.delta;
1589 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
1590 obj, (unsigned int) reloc.offset,
1591 readl(reloc_entry), reloc_val);
1593 writel(reloc_val, reloc_entry);
1595 /* Write the updated presumed offset for this entry back out
1598 reloc.presumed_offset = target_obj_priv->gtt_offset;
1599 ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
1601 drm_gem_object_unreference(target_obj);
1602 i915_gem_object_unpin(obj);
1606 drm_gem_object_unreference(target_obj);
1609 if (reloc_page != NULL)
1610 iounmap(reloc_page);
1614 i915_gem_dump_object(obj, 128, __func__, ~0);
1619 /** Dispatch a batchbuffer to the ring
1622 i915_dispatch_gem_execbuffer(struct drm_device *dev,
1623 struct drm_i915_gem_execbuffer *exec,
1624 uint64_t exec_offset)
1626 drm_i915_private_t *dev_priv = dev->dev_private;
1627 struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
1628 (uintptr_t) exec->cliprects_ptr;
1629 int nbox = exec->num_cliprects;
1631 uint32_t exec_start, exec_len;
1634 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
1635 exec_len = (uint32_t) exec->batch_len;
1637 if ((exec_start | exec_len) & 0x7) {
1638 DRM_ERROR("alignment\n");
1645 count = nbox ? nbox : 1;
1647 for (i = 0; i < count; i++) {
1649 int ret = i915_emit_box(dev, boxes, i,
1650 exec->DR1, exec->DR4);
1655 if (IS_I830(dev) || IS_845G(dev)) {
1657 OUT_RING(MI_BATCH_BUFFER);
1658 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
1659 OUT_RING(exec_start + exec_len - 4);
1664 if (IS_I965G(dev)) {
1665 OUT_RING(MI_BATCH_BUFFER_START |
1667 MI_BATCH_NON_SECURE_I965);
1668 OUT_RING(exec_start);
1670 OUT_RING(MI_BATCH_BUFFER_START |
1672 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
1678 /* XXX breadcrumb */
1682 /* Throttle our rendering by waiting until the ring has completed our requests
1683 * emitted over 20 msec ago.
1685 * This should get us reasonable parallelism between CPU and GPU but also
1686 * relatively low latency when blocking on a particular request to finish.
1689 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
1691 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1695 mutex_lock(&dev->struct_mutex);
1696 seqno = i915_file_priv->mm.last_gem_throttle_seqno;
1697 i915_file_priv->mm.last_gem_throttle_seqno = i915_file_priv->mm.last_gem_seqno;
1699 ret = i915_wait_request(dev, seqno);
1700 mutex_unlock(&dev->struct_mutex);
1705 i915_gem_execbuffer(struct drm_device *dev, void *data,
1706 struct drm_file *file_priv)
1708 drm_i915_private_t *dev_priv = dev->dev_private;
1709 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1710 struct drm_i915_gem_execbuffer *args = data;
1711 struct drm_i915_gem_exec_object *exec_list = NULL;
1712 struct drm_gem_object **object_list = NULL;
1713 struct drm_gem_object *batch_obj;
1714 int ret, i, pinned = 0;
1715 uint64_t exec_offset;
1716 uint32_t seqno, flush_domains;
1719 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1720 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1723 /* Copy in the exec list from userland */
1724 exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
1726 object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
1728 if (exec_list == NULL || object_list == NULL) {
1729 DRM_ERROR("Failed to allocate exec or object list "
1731 args->buffer_count);
1735 ret = copy_from_user(exec_list,
1736 (struct drm_i915_relocation_entry __user *)
1737 (uintptr_t) args->buffers_ptr,
1738 sizeof(*exec_list) * args->buffer_count);
1740 DRM_ERROR("copy %d exec entries failed %d\n",
1741 args->buffer_count, ret);
1745 mutex_lock(&dev->struct_mutex);
1747 i915_verify_inactive(dev, __FILE__, __LINE__);
1749 if (dev_priv->mm.wedged) {
1750 DRM_ERROR("Execbuf while wedged\n");
1751 mutex_unlock(&dev->struct_mutex);
1755 if (dev_priv->mm.suspended) {
1756 DRM_ERROR("Execbuf while VT-switched.\n");
1757 mutex_unlock(&dev->struct_mutex);
1761 /* Zero the gloabl flush/invalidate flags. These
1762 * will be modified as each object is bound to the
1765 dev->invalidate_domains = 0;
1766 dev->flush_domains = 0;
1768 /* Look up object handles and perform the relocations */
1769 for (i = 0; i < args->buffer_count; i++) {
1770 object_list[i] = drm_gem_object_lookup(dev, file_priv,
1771 exec_list[i].handle);
1772 if (object_list[i] == NULL) {
1773 DRM_ERROR("Invalid object handle %d at index %d\n",
1774 exec_list[i].handle, i);
1779 object_list[i]->pending_read_domains = 0;
1780 object_list[i]->pending_write_domain = 0;
1781 ret = i915_gem_object_pin_and_relocate(object_list[i],
1785 DRM_ERROR("object bind and relocate failed %d\n", ret);
1791 /* Set the pending read domains for the batch buffer to COMMAND */
1792 batch_obj = object_list[args->buffer_count-1];
1793 batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
1794 batch_obj->pending_write_domain = 0;
1796 i915_verify_inactive(dev, __FILE__, __LINE__);
1798 for (i = 0; i < args->buffer_count; i++) {
1799 struct drm_gem_object *obj = object_list[i];
1800 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1802 if (obj_priv->gtt_space == NULL) {
1803 /* We evicted the buffer in the process of validating
1804 * our set of buffers in. We could try to recover by
1805 * kicking them everything out and trying again from
1812 /* make sure all previous memory operations have passed */
1813 ret = i915_gem_object_set_domain(obj,
1814 obj->pending_read_domains,
1815 obj->pending_write_domain);
1820 i915_verify_inactive(dev, __FILE__, __LINE__);
1822 /* Flush/invalidate caches and chipset buffer */
1823 flush_domains = i915_gem_dev_set_domain(dev);
1825 i915_verify_inactive(dev, __FILE__, __LINE__);
1828 for (i = 0; i < args->buffer_count; i++) {
1829 i915_gem_object_check_coherency(object_list[i],
1830 exec_list[i].handle);
1834 exec_offset = exec_list[args->buffer_count - 1].offset;
1837 i915_gem_dump_object(object_list[args->buffer_count - 1],
1843 /* Exec the batchbuffer */
1844 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
1846 DRM_ERROR("dispatch failed %d\n", ret);
1851 * Ensure that the commands in the batch buffer are
1852 * finished before the interrupt fires
1854 flush_domains |= i915_retire_commands(dev);
1856 i915_verify_inactive(dev, __FILE__, __LINE__);
1859 * Get a seqno representing the execution of the current buffer,
1860 * which we can wait on. We would like to mitigate these interrupts,
1861 * likely by only creating seqnos occasionally (so that we have
1862 * *some* interrupts representing completion of buffers that we can
1863 * wait on when trying to clear up gtt space).
1865 seqno = i915_add_request(dev, flush_domains);
1867 i915_file_priv->mm.last_gem_seqno = seqno;
1868 for (i = 0; i < args->buffer_count; i++) {
1869 struct drm_gem_object *obj = object_list[i];
1870 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1872 i915_gem_object_move_to_active(obj);
1873 obj_priv->last_rendering_seqno = seqno;
1875 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
1879 i915_dump_lru(dev, __func__);
1882 i915_verify_inactive(dev, __FILE__, __LINE__);
1884 /* Copy the new buffer offsets back to the user's exec list. */
1885 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1886 (uintptr_t) args->buffers_ptr,
1888 sizeof(*exec_list) * args->buffer_count);
1890 DRM_ERROR("failed to copy %d exec entries "
1891 "back to user (%d)\n",
1892 args->buffer_count, ret);
1894 if (object_list != NULL) {
1895 for (i = 0; i < pinned; i++)
1896 i915_gem_object_unpin(object_list[i]);
1898 for (i = 0; i < args->buffer_count; i++)
1899 drm_gem_object_unreference(object_list[i]);
1901 mutex_unlock(&dev->struct_mutex);
1904 drm_free(object_list, sizeof(*object_list) * args->buffer_count,
1906 drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
1913 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
1915 struct drm_device *dev = obj->dev;
1916 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1919 i915_verify_inactive(dev, __FILE__, __LINE__);
1920 if (obj_priv->gtt_space == NULL) {
1921 ret = i915_gem_object_bind_to_gtt(obj, alignment);
1923 DRM_ERROR("Failure to bind: %d", ret);
1927 obj_priv->pin_count++;
1929 /* If the object is not active and not pending a flush,
1930 * remove it from the inactive list
1932 if (obj_priv->pin_count == 1) {
1933 atomic_inc(&dev->pin_count);
1934 atomic_add(obj->size, &dev->pin_memory);
1935 if (!obj_priv->active && (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) == 0 &&
1936 !list_empty(&obj_priv->list))
1937 list_del_init(&obj_priv->list);
1939 i915_verify_inactive(dev, __FILE__, __LINE__);
1945 i915_gem_object_unpin(struct drm_gem_object *obj)
1947 struct drm_device *dev = obj->dev;
1948 drm_i915_private_t *dev_priv = dev->dev_private;
1949 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1951 i915_verify_inactive(dev, __FILE__, __LINE__);
1952 obj_priv->pin_count--;
1953 BUG_ON(obj_priv->pin_count < 0);
1954 BUG_ON(obj_priv->gtt_space == NULL);
1956 /* If the object is no longer pinned, and is
1957 * neither active nor being flushed, then stick it on
1960 if (obj_priv->pin_count == 0) {
1961 if (!obj_priv->active && (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) == 0)
1962 list_move_tail(&obj_priv->list,
1963 &dev_priv->mm.inactive_list);
1964 atomic_dec(&dev->pin_count);
1965 atomic_sub(obj->size, &dev->pin_memory);
1967 i915_verify_inactive(dev, __FILE__, __LINE__);
1971 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
1972 struct drm_file *file_priv)
1974 struct drm_i915_gem_pin *args = data;
1975 struct drm_gem_object *obj;
1976 struct drm_i915_gem_object *obj_priv;
1979 mutex_lock(&dev->struct_mutex);
1981 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1983 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
1985 mutex_unlock(&dev->struct_mutex);
1988 obj_priv = obj->driver_private;
1990 ret = i915_gem_object_pin(obj, args->alignment);
1992 drm_gem_object_unreference(obj);
1993 mutex_unlock(&dev->struct_mutex);
1997 /** XXX - flush the CPU caches for pinned objects
1998 * as the X server doesn't manage domains yet
2000 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
2001 i915_gem_clflush_object(obj);
2002 drm_agp_chipset_flush(dev);
2003 obj->write_domain = 0;
2005 args->offset = obj_priv->gtt_offset;
2006 drm_gem_object_unreference(obj);
2007 mutex_unlock(&dev->struct_mutex);
2013 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
2014 struct drm_file *file_priv)
2016 struct drm_i915_gem_pin *args = data;
2017 struct drm_gem_object *obj;
2019 mutex_lock(&dev->struct_mutex);
2021 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2023 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
2025 mutex_unlock(&dev->struct_mutex);
2029 i915_gem_object_unpin(obj);
2031 drm_gem_object_unreference(obj);
2032 mutex_unlock(&dev->struct_mutex);
2037 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2038 struct drm_file *file_priv)
2040 struct drm_i915_gem_busy *args = data;
2041 struct drm_gem_object *obj;
2042 struct drm_i915_gem_object *obj_priv;
2044 mutex_lock(&dev->struct_mutex);
2045 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2047 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
2049 mutex_unlock(&dev->struct_mutex);
2053 obj_priv = obj->driver_private;
2054 args->busy = obj_priv->active;
2056 drm_gem_object_unreference(obj);
2057 mutex_unlock(&dev->struct_mutex);
2062 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
2063 struct drm_file *file_priv)
2065 return i915_gem_ring_throttle(dev, file_priv);
2068 int i915_gem_init_object(struct drm_gem_object *obj)
2070 struct drm_i915_gem_object *obj_priv;
2072 obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
2073 if (obj_priv == NULL)
2077 * We've just allocated pages from the kernel,
2078 * so they've just been written by the CPU with
2079 * zeros. They'll need to be clflushed before we
2080 * use them with the GPU.
2082 obj->write_domain = I915_GEM_DOMAIN_CPU;
2083 obj->read_domains = I915_GEM_DOMAIN_CPU;
2085 obj->driver_private = obj_priv;
2086 obj_priv->obj = obj;
2087 INIT_LIST_HEAD(&obj_priv->list);
2091 void i915_gem_free_object(struct drm_gem_object *obj)
2093 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2095 while (obj_priv->pin_count > 0)
2096 i915_gem_object_unpin(obj);
2098 i915_gem_object_unbind(obj);
2100 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
2104 i915_gem_set_domain(struct drm_gem_object *obj,
2105 struct drm_file *file_priv,
2106 uint32_t read_domains,
2107 uint32_t write_domain)
2109 struct drm_device *dev = obj->dev;
2111 uint32_t flush_domains;
2113 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
2115 ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
2118 flush_domains = i915_gem_dev_set_domain(obj->dev);
2120 if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
2121 (void) i915_add_request(dev, flush_domains);
2126 /** Unbinds all objects that are on the given buffer list. */
2128 i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
2130 struct drm_gem_object *obj;
2131 struct drm_i915_gem_object *obj_priv;
2134 while (!list_empty(head)) {
2135 obj_priv = list_first_entry(head,
2136 struct drm_i915_gem_object,
2138 obj = obj_priv->obj;
2140 if (obj_priv->pin_count != 0) {
2141 DRM_ERROR("Pinned object in unbind list\n");
2142 mutex_unlock(&dev->struct_mutex);
2146 ret = i915_gem_object_unbind(obj);
2148 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
2150 mutex_unlock(&dev->struct_mutex);
2160 i915_gem_idle(struct drm_device *dev)
2162 drm_i915_private_t *dev_priv = dev->dev_private;
2163 uint32_t seqno, cur_seqno, last_seqno;
2166 if (dev_priv->mm.suspended)
2169 /* Hack! Don't let anybody do execbuf while we don't control the chip.
2170 * We need to replace this with a semaphore, or something.
2172 dev_priv->mm.suspended = 1;
2174 i915_kernel_lost_context(dev);
2176 /* Flush the GPU along with all non-CPU write domains
2178 i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
2179 ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2180 seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2183 mutex_unlock(&dev->struct_mutex);
2187 dev_priv->mm.waiting_gem_seqno = seqno;
2191 cur_seqno = i915_get_gem_seqno(dev);
2192 if (i915_seqno_passed(cur_seqno, seqno))
2194 if (last_seqno == cur_seqno) {
2195 if (stuck++ > 100) {
2196 DRM_ERROR("hardware wedged\n");
2197 dev_priv->mm.wedged = 1;
2198 DRM_WAKEUP(&dev_priv->irq_queue);
2203 last_seqno = cur_seqno;
2205 dev_priv->mm.waiting_gem_seqno = 0;
2207 i915_gem_retire_requests(dev);
2209 /* Active and flushing should now be empty as we've
2210 * waited for a sequence higher than any pending execbuffer
2212 BUG_ON(!list_empty(&dev_priv->mm.active_list));
2213 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2215 /* Request should now be empty as we've also waited
2216 * for the last request in the list
2218 BUG_ON(!list_empty(&dev_priv->mm.request_list));
2220 /* Move all buffers out of the GTT. */
2221 i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
2223 BUG_ON(!list_empty(&dev_priv->mm.active_list));
2224 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2225 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
2226 BUG_ON(!list_empty(&dev_priv->mm.request_list));
2231 i915_gem_init_hws(struct drm_device *dev)
2233 drm_i915_private_t *dev_priv = dev->dev_private;
2234 struct drm_gem_object *obj;
2235 struct drm_i915_gem_object *obj_priv;
2238 /* If we need a physical address for the status page, it's already
2239 * initialized at driver load time.
2241 if (!I915_NEED_GFX_HWS(dev))
2244 obj = drm_gem_object_alloc(dev, 4096);
2246 DRM_ERROR("Failed to allocate status page\n");
2249 obj_priv = obj->driver_private;
2251 ret = i915_gem_object_pin(obj, 4096);
2253 drm_gem_object_unreference(obj);
2257 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
2258 dev_priv->hws_map.offset = dev->agp->base + obj_priv->gtt_offset;
2259 dev_priv->hws_map.size = 4096;
2260 dev_priv->hws_map.type = 0;
2261 dev_priv->hws_map.flags = 0;
2262 dev_priv->hws_map.mtrr = 0;
2264 drm_core_ioremap(&dev_priv->hws_map, dev);
2265 if (dev_priv->hws_map.handle == NULL) {
2266 DRM_ERROR("Failed to map status page.\n");
2267 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
2268 drm_gem_object_unreference(obj);
2271 dev_priv->hws_obj = obj;
2272 dev_priv->hw_status_page = dev_priv->hws_map.handle;
2273 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
2274 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
2275 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
2281 i915_gem_init_ringbuffer(struct drm_device *dev)
2283 drm_i915_private_t *dev_priv = dev->dev_private;
2284 struct drm_gem_object *obj;
2285 struct drm_i915_gem_object *obj_priv;
2288 ret = i915_gem_init_hws(dev);
2292 obj = drm_gem_object_alloc(dev, 128 * 1024);
2294 DRM_ERROR("Failed to allocate ringbuffer\n");
2297 obj_priv = obj->driver_private;
2299 ret = i915_gem_object_pin(obj, 4096);
2301 drm_gem_object_unreference(obj);
2305 /* Set up the kernel mapping for the ring. */
2306 dev_priv->ring.Size = obj->size;
2307 dev_priv->ring.tail_mask = obj->size - 1;
2309 dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
2310 dev_priv->ring.map.size = obj->size;
2311 dev_priv->ring.map.type = 0;
2312 dev_priv->ring.map.flags = 0;
2313 dev_priv->ring.map.mtrr = 0;
2315 drm_core_ioremap(&dev_priv->ring.map, dev);
2316 if (dev_priv->ring.map.handle == NULL) {
2317 DRM_ERROR("Failed to map ringbuffer.\n");
2318 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
2319 drm_gem_object_unreference(obj);
2322 dev_priv->ring.ring_obj = obj;
2323 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
2325 /* Stop the ring if it's running. */
2326 I915_WRITE(LP_RING + RING_LEN, 0);
2327 I915_WRITE(LP_RING + RING_HEAD, 0);
2328 I915_WRITE(LP_RING + RING_TAIL, 0);
2329 I915_WRITE(LP_RING + RING_START, 0);
2331 /* Initialize the ring. */
2332 I915_WRITE(LP_RING + RING_START, obj_priv->gtt_offset);
2333 I915_WRITE(LP_RING + RING_LEN,
2334 ((obj->size - 4096) & RING_NR_PAGES) |
2338 /* Update our cache of the ring state */
2339 i915_kernel_lost_context(dev);
2345 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
2347 drm_i915_private_t *dev_priv = dev->dev_private;
2349 if (dev_priv->ring.ring_obj == NULL)
2352 drm_core_ioremapfree(&dev_priv->ring.map, dev);
2354 i915_gem_object_unpin(dev_priv->ring.ring_obj);
2355 drm_gem_object_unreference(dev_priv->ring.ring_obj);
2356 dev_priv->ring.ring_obj = NULL;
2357 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
2359 if (dev_priv->hws_obj != NULL) {
2360 i915_gem_object_unpin(dev_priv->hws_obj);
2361 drm_gem_object_unreference(dev_priv->hws_obj);
2362 dev_priv->hws_obj = NULL;
2363 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
2365 /* Write high address into HWS_PGA when disabling. */
2366 I915_WRITE(HWS_PGA, 0x1ffff000);
2371 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
2372 struct drm_file *file_priv)
2374 drm_i915_private_t *dev_priv = dev->dev_private;
2377 if (dev_priv->mm.wedged) {
2378 DRM_ERROR("Reenabling wedged hardware, good luck\n");
2379 dev_priv->mm.wedged = 0;
2382 ret = i915_gem_init_ringbuffer(dev);
2386 mutex_lock(&dev->struct_mutex);
2387 BUG_ON(!list_empty(&dev_priv->mm.active_list));
2388 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2389 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
2390 BUG_ON(!list_empty(&dev_priv->mm.request_list));
2391 dev_priv->mm.suspended = 0;
2392 mutex_unlock(&dev->struct_mutex);
2397 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
2398 struct drm_file *file_priv)
2402 mutex_lock(&dev->struct_mutex);
2403 ret = i915_gem_idle(dev);
2405 i915_gem_cleanup_ringbuffer(dev);
2406 mutex_unlock(&dev->struct_mutex);
2412 i915_gem_lastclose(struct drm_device *dev)
2415 drm_i915_private_t *dev_priv = dev->dev_private;
2417 mutex_lock(&dev->struct_mutex);
2419 if (dev_priv->ring.ring_obj != NULL) {
2420 ret = i915_gem_idle(dev);
2422 DRM_ERROR("failed to idle hardware: %d\n", ret);
2424 i915_gem_cleanup_ringbuffer(dev);
2427 mutex_unlock(&dev->struct_mutex);
2430 void i915_gem_load(struct drm_device *dev)
2432 drm_i915_private_t *dev_priv = dev->dev_private;
2434 INIT_LIST_HEAD(&dev_priv->mm.active_list);
2435 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
2436 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
2437 INIT_LIST_HEAD(&dev_priv->mm.request_list);
2438 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
2439 i915_gem_retire_work_handler);
2440 dev_priv->mm.next_gem_seqno = 1;
2442 i915_gem_detect_bit_6_swizzle(dev);