void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
- struct amdgpu_sync *sync);
+ struct amdgpu_sync *sync, struct fence *fence);
void amdgpu_vm_flush(struct amdgpu_ring *ring,
struct amdgpu_vm *vm,
struct fence *updates);
-void amdgpu_vm_fence(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct fence *fence);
uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
if (fence == NULL && vm && !job->ibs->grabbed_vmid) {
struct amdgpu_ring *ring = job->ibs->ring;
- struct amdgpu_device *adev = ring->adev;
int r;
- mutex_lock(&adev->vm_manager.lock);
- r = amdgpu_vm_grab_id(vm, ring, sync);
- if (r) {
+ r = amdgpu_vm_grab_id(vm, ring, sync,
+ &job->base.s_fence->base);
+ if (r)
DRM_ERROR("Error getting VM ID (%d)\n", r);
- } else {
- fence = &job->base.s_fence->base;
- amdgpu_vm_fence(ring->adev, vm, fence);
+ else
job->ibs->grabbed_vmid = true;
- }
- mutex_unlock(&adev->vm_manager.lock);
fence = amdgpu_sync_get_fence(sync);
}
* @vm: vm to allocate id for
* @ring: ring we want to submit job to
* @sync: sync object where we add dependencies
+ * @fence: fence protecting ID from reuse
*
* Allocate an id for the vm, adding fences to the sync obj as necessary.
*
* Global mutex must be locked!
*/
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
- struct amdgpu_sync *sync)
+ struct amdgpu_sync *sync, struct fence *fence)
{
struct fence *best[AMDGPU_MAX_RINGS] = {};
struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
unsigned choices[2] = {};
unsigned i;
+ mutex_lock(&adev->vm_manager.lock);
+
/* check if the id is still valid */
if (vm_id->id) {
unsigned id = vm_id->id;
owner = atomic_long_read(&adev->vm_manager.ids[id].owner);
if (owner == (long)vm) {
trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx);
+ fence_put(adev->vm_manager.ids[id].active);
+ adev->vm_manager.ids[id].active = fence_get(fence);
+ mutex_unlock(&adev->vm_manager.lock);
return 0;
}
}
/* found a free one */
vm_id->id = i;
trace_amdgpu_vm_grab_id(vm, i, ring->idx);
+ mutex_unlock(&adev->vm_manager.lock);
return 0;
}
}
for (i = 0; i < 2; ++i) {
- if (choices[i]) {
- struct fence *fence;
+ struct fence *active;
+ int r;
- fence = adev->vm_manager.ids[choices[i]].active;
- vm_id->id = choices[i];
+ if (!choices[i])
+ continue;
- trace_amdgpu_vm_grab_id(vm, choices[i], ring->idx);
- return amdgpu_sync_fence(ring->adev, sync, fence);
- }
+ vm_id->id = choices[i];
+ active = adev->vm_manager.ids[vm_id->id].active;
+ r = amdgpu_sync_fence(ring->adev, sync, active);
+
+ trace_amdgpu_vm_grab_id(vm, choices[i], ring->idx);
+ atomic_long_set(&adev->vm_manager.ids[vm_id->id].owner, (long)vm);
+
+ fence_put(adev->vm_manager.ids[vm_id->id].active);
+ adev->vm_manager.ids[vm_id->id].active = fence_get(fence);
+
+ mutex_unlock(&adev->vm_manager.lock);
+ return r;
}
/* should never happen */
BUG();
+ mutex_unlock(&adev->vm_manager.lock);
return -EINVAL;
}
}
/**
- * amdgpu_vm_fence - remember fence for vm
- *
- * @adev: amdgpu_device pointer
- * @vm: vm we want to fence
- * @fence: fence to remember
- *
- * Fence the vm (cayman+).
- * Set the fence used to protect page table and id.
- *
- * Global and local mutex must be locked!
- */
-void amdgpu_vm_fence(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct fence *fence)
-{
- struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence);
- unsigned vm_id = vm->ids[ring->idx].id;
-
- fence_put(adev->vm_manager.ids[vm_id].active);
- adev->vm_manager.ids[vm_id].active = fence_get(fence);
- atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm);
-}
-
-/**
* amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
*
* @vm: requested vm