qxl_bo_ref(bo);
entry->tv.bo = &bo->tbo;
+ entry->tv.shared = false;
list_add_tail(&entry->tv.head, &release->bos);
return 0;
}
}
p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
+ p->relocs[i].tv.shared = false;
p->relocs[i].handle = r->handle;
radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].tv.bo = &vm->page_directory->tbo;
+ list[0].tv.shared = false;
list[0].tiling_flags = 0;
list[0].handle = 0;
list_add(&list[0].tv.head, head);
list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].tv.bo = &list[idx].robj->tbo;
+ list[idx].tv.shared = false;
list[idx].tiling_flags = 0;
list[idx].handle = 0;
list_add(&list[idx++].tv.head, head);
memset(&tv, 0, sizeof(tv));
tv.bo = &bo->tbo;
+ tv.shared = false;
INIT_LIST_HEAD(&head);
list_add(&tv.head, &head);
ret = -EBUSY;
}
- if (!ret)
- continue;
+ if (!ret) {
+ if (!entry->shared)
+ continue;
+
+ ret = reservation_object_reserve_shared(bo->resv);
+ if (!ret)
+ continue;
+ }
/* uh oh, we lost out, drop every reservation and try
* to only reserve this buffer, then start over if
ret = 0;
}
+ if (!ret && entry->shared)
+ ret = reservation_object_reserve_shared(bo->resv);
+
if (unlikely(ret != 0)) {
if (ret == -EINTR)
ret = -ERESTARTSYS;
list_for_each_entry(entry, list, head) {
bo = entry->bo;
- reservation_object_add_excl_fence(bo->resv, fence);
+ if (entry->shared)
+ reservation_object_add_shared_fence(bo->resv, fence);
+ else
+ reservation_object_add_excl_fence(bo->resv, fence);
ttm_bo_add_to_lru(bo);
__ttm_bo_unreserve(bo);
}
++sw_context->cur_val_buf;
val_buf = &vval_buf->base;
val_buf->bo = ttm_bo_reference(bo);
+ val_buf->shared = false;
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
vval_buf->validate_as_mob = validate_as_mob;
}
INIT_LIST_HEAD(&validate_list);
pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
+ pinned_val.shared = false;
list_add_tail(&pinned_val.head, &validate_list);
query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
+ query_val.shared = false;
list_add_tail(&query_val.head, &validate_list);
ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false);
struct ttm_validate_buffer val_buf;
val_buf.bo = bo;
+ val_buf.shared = false;
res->func->unbind(res, false, &val_buf);
}
res->backup_dirty = false;
INIT_LIST_HEAD(&val_list);
val_buf->bo = ttm_bo_reference(&res->backup->base);
+ val_buf->shared = false;
list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible);
if (unlikely(ret != 0))
BUG_ON(!func->may_evict);
val_buf.bo = NULL;
+ val_buf.shared = false;
ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
if (unlikely(ret != 0))
return ret;
return 0;
val_buf.bo = NULL;
+ val_buf.shared = false;
if (res->backup)
val_buf.bo = &res->backup->base;
do {
struct ttm_validate_buffer val_buf;
val_buf.bo = bo;
+ val_buf.shared = false;
list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
*
* @head: list head for thread-private list.
* @bo: refcounted buffer object pointer.
+ * @shared: should the fence be added shared?
*/
struct ttm_validate_buffer {
struct list_head head;
struct ttm_buffer_object *bo;
+ bool shared;
};
/**