flushed = id->flushed_updates;
if ((amdgpu_vmid_had_gpu_reset(adev, id)) ||
- (atomic64_read(&id->owner) != vm->client_id) ||
+ (atomic64_read(&id->owner) != vm->entity.fence_context) ||
(job->vm_pd_addr != id->pd_gpu_addr) ||
(updates && (!flushed || updates->context != flushed->context ||
dma_fence_is_later(updates, flushed))) ||
id->flushed_updates = dma_fence_get(updates);
}
id->pd_gpu_addr = job->vm_pd_addr;
- atomic64_set(&id->owner, vm->client_id);
+ atomic64_set(&id->owner, vm->entity.fence_context);
job->vm_needs_flush = needs_flush;
if (needs_flush) {
dma_fence_put(id->last_flush);
if (amdgpu_vmid_had_gpu_reset(adev, id))
continue;
- if (atomic64_read(&id->owner) != vm->client_id)
+ if (atomic64_read(&id->owner) != vm->entity.fence_context)
continue;
if (job->vm_pd_addr != id->pd_gpu_addr)
id->pd_gpu_addr = job->vm_pd_addr;
dma_fence_put(id->flushed_updates);
id->flushed_updates = dma_fence_get(updates);
- atomic64_set(&id->owner, vm->client_id);
+ atomic64_set(&id->owner, vm->entity.fence_context);
needs_flush:
job->vm_needs_flush = true;
uint64_t init_pde_value = 0;
vm->va = RB_ROOT_CACHED;
- vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
vm->reserved_vmid[i] = NULL;
spin_lock_init(&vm->status_lock);
adev->vm_manager.seqno[i] = 0;
atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
- atomic64_set(&adev->vm_manager.client_counter, 0);
spin_lock_init(&adev->vm_manager.prt_lock);
atomic_set(&adev->vm_manager.num_prt_users, 0);
/* Scheduler entity for page table updates */
struct drm_sched_entity entity;
- /* client id and PASID (TODO: replace client_id with PASID) */
- u64 client_id;
unsigned int pasid;
/* dedicated to vm */
struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
unsigned vm_pte_num_rings;
atomic_t vm_pte_next_ring;
- /* client id counter */
- atomic64_t client_counter;
/* partial resident texture handling */
spinlock_t prt_lock;