continue;
for (j = 0; j < exec[i]->bo_count; j++) {
- bo = to_vc4_bo(&exec[i]->bo[j]->base);
+ bo = to_vc4_bo(exec[i]->bo[j]);
/* Retain BOs just in case they were marked purgeable.
* This prevents the BO from being purged before
*/
WARN_ON(!refcount_read(&bo->usecnt));
refcount_inc(&bo->usecnt);
- drm_gem_object_get(&exec[i]->bo[j]->base);
- kernel_state->bo[k++] = &exec[i]->bo[j]->base;
+ drm_gem_object_get(exec[i]->bo[j]);
+ kernel_state->bo[k++] = exec[i]->bo[j];
}
list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
unsigned i;
for (i = 0; i < exec->bo_count; i++) {
- bo = to_vc4_bo(&exec->bo[i]->base);
+ bo = to_vc4_bo(exec->bo[i]);
bo->seqno = seqno;
dma_resv_add_fence(bo->base.base.resv, exec->fence,
{
int i;
- for (i = 0; i < exec->bo_count; i++) {
- struct drm_gem_object *bo = &exec->bo[i]->base;
-
- dma_resv_unlock(bo->resv);
- }
+ for (i = 0; i < exec->bo_count; i++)
+ dma_resv_unlock(exec->bo[i]->resv);
ww_acquire_fini(acquire_ctx);
}
retry:
if (contended_lock != -1) {
- bo = &exec->bo[contended_lock]->base;
+ bo = exec->bo[contended_lock];
ret = dma_resv_lock_slow_interruptible(bo->resv, acquire_ctx);
if (ret) {
ww_acquire_done(acquire_ctx);
if (i == contended_lock)
continue;
- bo = &exec->bo[i]->base;
+ bo = exec->bo[i];
ret = dma_resv_lock_interruptible(bo->resv, acquire_ctx);
if (ret) {
int j;
for (j = 0; j < i; j++) {
- bo = &exec->bo[j]->base;
+ bo = exec->bo[j];
dma_resv_unlock(bo->resv);
}
if (contended_lock != -1 && contended_lock >= i) {
- bo = &exec->bo[contended_lock]->base;
+ bo = exec->bo[contended_lock];
dma_resv_unlock(bo->resv);
}
* before we commit the CL to the hardware.
*/
for (i = 0; i < exec->bo_count; i++) {
- bo = &exec->bo[i]->base;
+ bo = exec->bo[i];
ret = dma_resv_reserve_fences(bo->resv, 1);
if (ret) {
}
drm_gem_object_get(bo);
- exec->bo[i] = (struct drm_gem_dma_object *)bo;
+ exec->bo[i] = bo;
}
spin_unlock(&file_priv->table_lock);
goto fail_put_bo;
for (i = 0; i < exec->bo_count; i++) {
- ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base));
+ ret = vc4_bo_inc_usecnt(to_vc4_bo(exec->bo[i]));
if (ret)
goto fail_dec_usecnt;
}
* step.
*/
for (i-- ; i >= 0; i--)
- vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base));
+ vc4_bo_dec_usecnt(to_vc4_bo(exec->bo[i]));
fail_put_bo:
/* Release any reference to acquired objects. */
for (i = 0; i < exec->bo_count && exec->bo[i]; i++)
- drm_gem_object_put(&exec->bo[i]->base);
+ drm_gem_object_put(exec->bo[i]);
fail:
kvfree(handles);
if (exec->bo) {
for (i = 0; i < exec->bo_count; i++) {
- struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
+ struct vc4_bo *bo = to_vc4_bo(exec->bo[i]);
vc4_bo_dec_usecnt(bo);
- drm_gem_object_put(&exec->bo[i]->base);
+ drm_gem_object_put(exec->bo[i]);
}
kvfree(exec->bo);
}