1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 * Typically called by the IRQ handler.
37 void drm_fence_handler(struct drm_device * dev, uint32_t fence_class,
38 uint32_t sequence, uint32_t type, uint32_t error)
43 struct drm_fence_manager *fm = &dev->fm;
44 struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
45 struct drm_fence_driver *driver = dev->driver->fence_driver;
46 struct list_head *head;
47 struct drm_fence_object *fence, *next;
49 int is_exe = (type & DRM_FENCE_TYPE_EXE);
53 diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
55 if (fc->pending_exe_flush && is_exe && diff < driver->wrap_diff)
56 fc->pending_exe_flush = 0;
58 diff = (sequence - fc->last_exe_flush) & driver->sequence_mask;
59 ge_last_exe = diff < driver->wrap_diff;
61 if (is_exe && ge_last_exe) {
62 fc->last_exe_flush = sequence;
65 if (list_empty(&fc->ring))
68 list_for_each_entry(fence, &fc->ring, ring) {
69 diff = (sequence - fence->sequence) & driver->sequence_mask;
70 if (diff > driver->wrap_diff) {
76 fc->pending_flush &= ~type;
77 head = (found) ? &fence->ring : &fc->ring;
79 list_for_each_entry_safe_reverse(fence, next, head, ring) {
80 if (&fence->ring == &fc->ring)
85 fence->signaled = fence->type;
86 fence->submitted_flush = fence->type;
87 fence->flush_mask = fence->type;
88 list_del_init(&fence->ring);
93 type |= fence->native_type;
94 relevant = type & fence->type;
96 if ((fence->signaled | relevant) != fence->signaled) {
97 fence->signaled |= relevant;
98 fence->flush_mask |= relevant;
99 fence->submitted_flush |= relevant;
100 DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
101 fence->base.hash.key, fence->signaled);
105 relevant = fence->flush_mask &
106 ~(fence->submitted_flush | fence->signaled);
108 fc->pending_flush |= relevant;
109 fence->submitted_flush |= relevant;
111 if (!(fence->type & ~fence->signaled)) {
112 DRM_DEBUG("Fence completely signaled 0x%08lx\n",
113 fence->base.hash.key);
114 list_del_init(&fence->ring);
120 * Reinstate lost flush flags.
123 if ((fc->pending_flush & type) != type) {
125 list_for_each_entry(fence, head, ring) {
126 if (&fence->ring == &fc->ring)
128 diff = (fc->last_exe_flush - fence->sequence) &
129 driver->sequence_mask;
130 if (diff > driver->wrap_diff)
133 relevant = fence->submitted_flush & ~fence->signaled;
134 fc->pending_flush |= relevant;
139 DRM_WAKEUP(&fc->fence_queue);
143 EXPORT_SYMBOL(drm_fence_handler);
145 static void drm_fence_unring(struct drm_device * dev, struct list_head *ring)
147 struct drm_fence_manager *fm = &dev->fm;
150 write_lock_irqsave(&fm->lock, flags);
152 write_unlock_irqrestore(&fm->lock, flags);
155 void drm_fence_usage_deref_locked(struct drm_fence_object ** fence)
157 struct drm_fence_object *tmp_fence = *fence;
158 struct drm_device *dev = tmp_fence->dev;
159 struct drm_fence_manager *fm = &dev->fm;
161 DRM_ASSERT_LOCKED(&dev->struct_mutex);
163 if (atomic_dec_and_test(&tmp_fence->usage)) {
164 drm_fence_unring(dev, &tmp_fence->ring);
165 DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
166 tmp_fence->base.hash.key);
167 atomic_dec(&fm->count);
168 BUG_ON(!list_empty(&tmp_fence->base.list));
169 drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
172 EXPORT_SYMBOL(drm_fence_usage_deref_locked);
174 void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence)
176 struct drm_fence_object *tmp_fence = *fence;
177 struct drm_device *dev = tmp_fence->dev;
178 struct drm_fence_manager *fm = &dev->fm;
181 if (atomic_dec_and_test(&tmp_fence->usage)) {
182 mutex_lock(&dev->struct_mutex);
183 if (atomic_read(&tmp_fence->usage) == 0) {
184 drm_fence_unring(dev, &tmp_fence->ring);
185 atomic_dec(&fm->count);
186 BUG_ON(!list_empty(&tmp_fence->base.list));
187 drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
189 mutex_unlock(&dev->struct_mutex);
192 EXPORT_SYMBOL(drm_fence_usage_deref_unlocked);
194 struct drm_fence_object
195 *drm_fence_reference_locked(struct drm_fence_object *src)
197 DRM_ASSERT_LOCKED(&src->dev->struct_mutex);
199 atomic_inc(&src->usage);
203 void drm_fence_reference_unlocked(struct drm_fence_object **dst,
204 struct drm_fence_object *src)
206 mutex_lock(&src->dev->struct_mutex);
208 atomic_inc(&src->usage);
209 mutex_unlock(&src->dev->struct_mutex);
211 EXPORT_SYMBOL(drm_fence_reference_unlocked);
213 static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_object * base)
215 struct drm_fence_object *fence =
216 drm_user_object_entry(base, struct drm_fence_object, base);
218 drm_fence_usage_deref_locked(&fence);
221 int drm_fence_object_signaled(struct drm_fence_object * fence,
222 uint32_t mask, int poke_flush)
226 struct drm_device *dev = fence->dev;
227 struct drm_fence_manager *fm = &dev->fm;
228 struct drm_fence_driver *driver = dev->driver->fence_driver;
231 driver->poke_flush(dev, fence->fence_class);
232 read_lock_irqsave(&fm->lock, flags);
234 (fence->type & mask & fence->signaled) == (fence->type & mask);
235 read_unlock_irqrestore(&fm->lock, flags);
239 EXPORT_SYMBOL(drm_fence_object_signaled);
241 static void drm_fence_flush_exe(struct drm_fence_class_manager * fc,
242 struct drm_fence_driver * driver, uint32_t sequence)
246 if (!fc->pending_exe_flush) {
247 fc->exe_flush_sequence = sequence;
248 fc->pending_exe_flush = 1;
251 (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
252 if (diff < driver->wrap_diff) {
253 fc->exe_flush_sequence = sequence;
258 int drm_fence_object_flush(struct drm_fence_object * fence,
261 struct drm_device *dev = fence->dev;
262 struct drm_fence_manager *fm = &dev->fm;
263 struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
264 struct drm_fence_driver *driver = dev->driver->fence_driver;
267 if (type & ~fence->type) {
268 DRM_ERROR("Flush trying to extend fence type, "
269 "0x%x, 0x%x\n", type, fence->type);
273 write_lock_irqsave(&fm->lock, flags);
274 fence->flush_mask |= type;
275 if ((fence->submitted_flush & fence->signaled)
276 == fence->submitted_flush) {
277 if ((fence->type & DRM_FENCE_TYPE_EXE) &&
278 !(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {
279 drm_fence_flush_exe(fc, driver, fence->sequence);
280 fence->submitted_flush |= DRM_FENCE_TYPE_EXE;
282 fc->pending_flush |= (fence->flush_mask &
283 ~fence->submitted_flush);
284 fence->submitted_flush = fence->flush_mask;
287 write_unlock_irqrestore(&fm->lock, flags);
288 driver->poke_flush(dev, fence->fence_class);
293 * Make sure old fence objects are signaled before their fence sequences are
294 * wrapped around and reused.
297 void drm_fence_flush_old(struct drm_device * dev, uint32_t fence_class, uint32_t sequence)
299 struct drm_fence_manager *fm = &dev->fm;
300 struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
301 struct drm_fence_driver *driver = dev->driver->fence_driver;
302 uint32_t old_sequence;
304 struct drm_fence_object *fence;
307 write_lock_irqsave(&fm->lock, flags);
308 old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
309 diff = (old_sequence - fc->last_exe_flush) & driver->sequence_mask;
311 if ((diff < driver->wrap_diff) && !fc->pending_exe_flush) {
312 fc->pending_exe_flush = 1;
313 fc->exe_flush_sequence = sequence - (driver->flush_diff / 2);
315 write_unlock_irqrestore(&fm->lock, flags);
317 mutex_lock(&dev->struct_mutex);
318 read_lock_irqsave(&fm->lock, flags);
320 if (list_empty(&fc->ring)) {
321 read_unlock_irqrestore(&fm->lock, flags);
322 mutex_unlock(&dev->struct_mutex);
325 fence = drm_fence_reference_locked(list_entry(fc->ring.next, struct drm_fence_object, ring));
326 mutex_unlock(&dev->struct_mutex);
327 diff = (old_sequence - fence->sequence) & driver->sequence_mask;
328 read_unlock_irqrestore(&fm->lock, flags);
329 if (diff < driver->wrap_diff) {
330 drm_fence_object_flush(fence, fence->type);
332 drm_fence_usage_deref_unlocked(&fence);
335 EXPORT_SYMBOL(drm_fence_flush_old);
337 static int drm_fence_lazy_wait(struct drm_fence_object *fence,
341 struct drm_device *dev = fence->dev;
342 struct drm_fence_manager *fm = &dev->fm;
343 struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
345 unsigned long _end = jiffies + 3*DRM_HZ;
349 DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
350 (signaled = drm_fence_object_signaled(fence, mask, 1)));
353 if (time_after_eq(jiffies, _end))
355 } while (ret == -EINTR && ignore_signals);
356 if (drm_fence_object_signaled(fence, mask, 0))
358 if (time_after_eq(jiffies, _end))
362 DRM_ERROR("Fence timeout. "
363 "GPU lockup or fence driver was "
364 "taken down. %d 0x%08x 0x%02x 0x%02x 0x%02x\n",
370 DRM_ERROR("Pending exe flush %d 0x%08x\n",
371 fc->pending_exe_flush,
372 fc->exe_flush_sequence);
374 return ((ret == -EINTR) ? -EAGAIN : ret);
379 int drm_fence_object_wait(struct drm_fence_object * fence,
380 int lazy, int ignore_signals, uint32_t mask)
382 struct drm_device *dev = fence->dev;
383 struct drm_fence_driver *driver = dev->driver->fence_driver;
388 if (mask & ~fence->type) {
389 DRM_ERROR("Wait trying to extend fence type"
390 " 0x%08x 0x%08x\n", mask, fence->type);
395 if (drm_fence_object_signaled(fence, mask, 0))
398 _end = jiffies + 3 * DRM_HZ;
400 drm_fence_object_flush(fence, mask);
402 if (lazy && driver->lazy_capable) {
404 ret = drm_fence_lazy_wait(fence, ignore_signals, mask);
410 if (driver->has_irq(dev, fence->fence_class,
411 DRM_FENCE_TYPE_EXE)) {
412 ret = drm_fence_lazy_wait(fence, ignore_signals,
418 if (driver->has_irq(dev, fence->fence_class,
419 mask & ~DRM_FENCE_TYPE_EXE)) {
420 ret = drm_fence_lazy_wait(fence, ignore_signals,
426 if (drm_fence_object_signaled(fence, mask, 0))
430 * Avoid kernel-space busy-waits.
438 signaled = drm_fence_object_signaled(fence, mask, 1);
439 } while (!signaled && !time_after_eq(jiffies, _end));
446 EXPORT_SYMBOL(drm_fence_object_wait);
449 int drm_fence_object_emit(struct drm_fence_object * fence,
450 uint32_t fence_flags, uint32_t fence_class, uint32_t type)
452 struct drm_device *dev = fence->dev;
453 struct drm_fence_manager *fm = &dev->fm;
454 struct drm_fence_driver *driver = dev->driver->fence_driver;
455 struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
458 uint32_t native_type;
461 drm_fence_unring(dev, &fence->ring);
462 ret = driver->emit(dev, fence_class, fence_flags, &sequence, &native_type);
466 write_lock_irqsave(&fm->lock, flags);
467 fence->fence_class = fence_class;
469 fence->flush_mask = 0x00;
470 fence->submitted_flush = 0x00;
471 fence->signaled = 0x00;
472 fence->sequence = sequence;
473 fence->native_type = native_type;
474 if (list_empty(&fc->ring))
475 fc->last_exe_flush = sequence - 1;
476 list_add_tail(&fence->ring, &fc->ring);
477 write_unlock_irqrestore(&fm->lock, flags);
480 EXPORT_SYMBOL(drm_fence_object_emit);
482 static int drm_fence_object_init(struct drm_device * dev, uint32_t fence_class,
484 uint32_t fence_flags,
485 struct drm_fence_object * fence)
489 struct drm_fence_manager *fm = &dev->fm;
491 mutex_lock(&dev->struct_mutex);
492 atomic_set(&fence->usage, 1);
493 mutex_unlock(&dev->struct_mutex);
495 write_lock_irqsave(&fm->lock, flags);
496 INIT_LIST_HEAD(&fence->ring);
499 * Avoid hitting BUG() for kernel-only fence objects.
502 INIT_LIST_HEAD(&fence->base.list);
503 fence->fence_class = fence_class;
505 fence->flush_mask = 0;
506 fence->submitted_flush = 0;
510 write_unlock_irqrestore(&fm->lock, flags);
511 if (fence_flags & DRM_FENCE_FLAG_EMIT) {
512 ret = drm_fence_object_emit(fence, fence_flags,
513 fence->fence_class, type);
518 int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence,
521 struct drm_device *dev = priv->head->dev;
524 mutex_lock(&dev->struct_mutex);
525 ret = drm_add_user_object(priv, &fence->base, shareable);
528 atomic_inc(&fence->usage);
529 fence->base.type = drm_fence_type;
530 fence->base.remove = &drm_fence_object_destroy;
531 DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
533 mutex_unlock(&dev->struct_mutex);
536 EXPORT_SYMBOL(drm_fence_add_user_object);
538 int drm_fence_object_create(struct drm_device * dev, uint32_t fence_class, uint32_t type,
539 unsigned flags, struct drm_fence_object ** c_fence)
541 struct drm_fence_object *fence;
543 struct drm_fence_manager *fm = &dev->fm;
545 fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
548 ret = drm_fence_object_init(dev, fence_class, type, flags, fence);
550 drm_fence_usage_deref_unlocked(&fence);
554 atomic_inc(&fm->count);
559 EXPORT_SYMBOL(drm_fence_object_create);
561 void drm_fence_manager_init(struct drm_device * dev)
563 struct drm_fence_manager *fm = &dev->fm;
564 struct drm_fence_class_manager *fence_class;
565 struct drm_fence_driver *fed = dev->driver->fence_driver;
569 rwlock_init(&fm->lock);
570 write_lock_irqsave(&fm->lock, flags);
576 fm->num_classes = fed->num_classes;
577 BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
579 for (i=0; i<fm->num_classes; ++i) {
580 fence_class = &fm->fence_class[i];
582 INIT_LIST_HEAD(&fence_class->ring);
583 fence_class->pending_flush = 0;
584 DRM_INIT_WAITQUEUE(&fence_class->fence_queue);
587 atomic_set(&fm->count, 0);
589 write_unlock_irqrestore(&fm->lock, flags);
592 void drm_fence_fill_arg(struct drm_fence_object *fence, struct drm_fence_arg *arg)
594 struct drm_device *dev = fence->dev;
595 struct drm_fence_manager *fm = &dev->fm;
596 unsigned long irq_flags;
598 read_lock_irqsave(&fm->lock, irq_flags);
599 arg->handle = fence->base.hash.key;
600 arg->fence_class = fence->fence_class;
601 arg->type = fence->type;
602 arg->signaled = fence->signaled;
603 arg->error = fence->error;
604 arg->sequence = fence->sequence;
605 read_unlock_irqrestore(&fm->lock, irq_flags);
607 EXPORT_SYMBOL(drm_fence_fill_arg);
610 void drm_fence_manager_takedown(struct drm_device * dev)
614 struct drm_fence_object *drm_lookup_fence_object(struct drm_file * priv, uint32_t handle)
616 struct drm_device *dev = priv->head->dev;
617 struct drm_user_object *uo;
618 struct drm_fence_object *fence;
620 mutex_lock(&dev->struct_mutex);
621 uo = drm_lookup_user_object(priv, handle);
622 if (!uo || (uo->type != drm_fence_type)) {
623 mutex_unlock(&dev->struct_mutex);
626 fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base));
627 mutex_unlock(&dev->struct_mutex);
631 int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
634 struct drm_fence_manager *fm = &dev->fm;
635 struct drm_fence_arg *arg = data;
636 struct drm_fence_object *fence;
639 if (!fm->initialized) {
640 DRM_ERROR("The DRM driver does not support fencing.\n");
644 if (arg->flags & DRM_FENCE_FLAG_EMIT)
645 LOCK_TEST_WITH_RETURN(dev, file_priv);
646 ret = drm_fence_object_create(dev, arg->fence_class,
647 arg->type, arg->flags, &fence);
650 ret = drm_fence_add_user_object(file_priv, fence,
652 DRM_FENCE_FLAG_SHAREABLE);
654 drm_fence_usage_deref_unlocked(&fence);
659 * usage > 0. No need to lock dev->struct_mutex;
662 arg->handle = fence->base.hash.key;
665 drm_fence_fill_arg(fence, arg);
666 drm_fence_usage_deref_unlocked(&fence);
671 int drm_fence_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
674 struct drm_fence_manager *fm = &dev->fm;
675 struct drm_fence_arg *arg = data;
676 struct drm_user_object *uo;
679 if (!fm->initialized) {
680 DRM_ERROR("The DRM driver does not support fencing.\n");
684 mutex_lock(&dev->struct_mutex);
685 uo = drm_lookup_user_object(file_priv, arg->handle);
686 if (!uo || (uo->type != drm_fence_type) || uo->owner != file_priv) {
687 mutex_unlock(&dev->struct_mutex);
690 ret = drm_remove_user_object(file_priv, uo);
691 mutex_unlock(&dev->struct_mutex);
696 int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
699 struct drm_fence_manager *fm = &dev->fm;
700 struct drm_fence_arg *arg = data;
701 struct drm_fence_object *fence;
702 struct drm_user_object *uo;
705 if (!fm->initialized) {
706 DRM_ERROR("The DRM driver does not support fencing.\n");
710 ret = drm_user_object_ref(file_priv, arg->handle, drm_fence_type, &uo);
713 fence = drm_lookup_fence_object(file_priv, arg->handle);
714 drm_fence_fill_arg(fence, arg);
715 drm_fence_usage_deref_unlocked(&fence);
721 int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
724 struct drm_fence_manager *fm = &dev->fm;
725 struct drm_fence_arg *arg = data;
728 if (!fm->initialized) {
729 DRM_ERROR("The DRM driver does not support fencing.\n");
733 return drm_user_object_unref(file_priv, arg->handle, drm_fence_type);
736 int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
739 struct drm_fence_manager *fm = &dev->fm;
740 struct drm_fence_arg *arg = data;
741 struct drm_fence_object *fence;
744 if (!fm->initialized) {
745 DRM_ERROR("The DRM driver does not support fencing.\n");
749 fence = drm_lookup_fence_object(file_priv, arg->handle);
753 drm_fence_fill_arg(fence, arg);
754 drm_fence_usage_deref_unlocked(&fence);
759 int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
762 struct drm_fence_manager *fm = &dev->fm;
763 struct drm_fence_arg *arg = data;
764 struct drm_fence_object *fence;
767 if (!fm->initialized) {
768 DRM_ERROR("The DRM driver does not support fencing.\n");
772 fence = drm_lookup_fence_object(file_priv, arg->handle);
775 ret = drm_fence_object_flush(fence, arg->type);
777 drm_fence_fill_arg(fence, arg);
778 drm_fence_usage_deref_unlocked(&fence);
784 int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
787 struct drm_fence_manager *fm = &dev->fm;
788 struct drm_fence_arg *arg = data;
789 struct drm_fence_object *fence;
792 if (!fm->initialized) {
793 DRM_ERROR("The DRM driver does not support fencing.\n");
797 fence = drm_lookup_fence_object(file_priv, arg->handle);
800 ret = drm_fence_object_wait(fence,
801 arg->flags & DRM_FENCE_FLAG_WAIT_LAZY,
804 drm_fence_fill_arg(fence, arg);
805 drm_fence_usage_deref_unlocked(&fence);
811 int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
814 struct drm_fence_manager *fm = &dev->fm;
815 struct drm_fence_arg *arg = data;
816 struct drm_fence_object *fence;
819 if (!fm->initialized) {
820 DRM_ERROR("The DRM driver does not support fencing.\n");
824 LOCK_TEST_WITH_RETURN(dev, file_priv);
825 fence = drm_lookup_fence_object(file_priv, arg->handle);
828 ret = drm_fence_object_emit(fence, arg->flags, arg->fence_class,
831 drm_fence_fill_arg(fence, arg);
832 drm_fence_usage_deref_unlocked(&fence);
837 int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
840 struct drm_fence_manager *fm = &dev->fm;
841 struct drm_fence_arg *arg = data;
842 struct drm_fence_object *fence;
845 if (!fm->initialized) {
846 DRM_ERROR("The DRM driver does not support fencing.\n");
850 if (!dev->bm.initialized) {
851 DRM_ERROR("Buffer object manager is not initialized\n");
854 LOCK_TEST_WITH_RETURN(dev, file_priv);
855 ret = drm_fence_buffer_objects(dev, NULL, arg->flags,
860 if (!(arg->flags & DRM_FENCE_FLAG_NO_USER)) {
861 ret = drm_fence_add_user_object(file_priv, fence,
863 DRM_FENCE_FLAG_SHAREABLE);
868 arg->handle = fence->base.hash.key;
870 drm_fence_fill_arg(fence, arg);
871 drm_fence_usage_deref_unlocked(&fence);