static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
{
- int r, resched, length;
+ int r, length;
struct amdgpu_ring *ring;
struct dma_fence **fences = NULL;
struct amdgpu_device *adev = (struct amdgpu_device *)data;
/* stop the scheduler */
kthread_park(ring->sched.thread);
- resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
-
/* preempt the IB */
r = amdgpu_ring_preempt_ib(ring);
if (r) {
up_read(&adev->reset_domain->sem);
- ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
-
pro_end:
kfree(fences);
}
amdgpu_fence_driver_hw_fini(adev);
- if (adev->mman.initialized) {
+ if (adev->mman.initialized)
flush_delayed_work(&adev->mman.bdev.wq);
- ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
- }
if (adev->pm_sysfs_en)
amdgpu_pm_sysfs_fini(adev);
bool saved = false;
int i, r;
- int resched;
down_write(&rdev->exclusive_lock);
atomic_inc(&rdev->gpu_reset_counter);
radeon_save_bios_scratch_regs(rdev);
- /* block TTM */
- resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
radeon_suspend(rdev);
radeon_hpd_fini(rdev);
/* reset hpd state */
radeon_hpd_init(rdev);
- ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
-
rdev->in_reset = true;
rdev->needs_reset = false;
static void radeon_dynpm_idle_work_handler(struct work_struct *work)
{
struct radeon_device *rdev;
- int resched;
+
rdev = container_of(work, struct radeon_device,
pm.dynpm_idle_work.work);
- resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
int not_processed = 0;
msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
}
mutex_unlock(&rdev->pm.mutex);
- ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
}
/*
}
EXPORT_SYMBOL(ttm_bo_put);
-int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev)
-{
- return cancel_delayed_work_sync(&bdev->wq);
-}
-EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
-
-void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched)
-{
- if (resched)
- schedule_delayed_work(&bdev->wq,
- ((HZ / 100) < 1) ? 1 : HZ / 100);
-}
-EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
-
static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
struct ttm_resource **mem,
struct ttm_operation_ctx *ctx,
void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
struct ttm_lru_bulk_move *bulk);
-/**
- * ttm_bo_lock_delayed_workqueue
- *
- * Prevent the delayed workqueue from running.
- * Returns
- * True if the workqueue was queued at the time
- */
-int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev);
-
-/**
- * ttm_bo_unlock_delayed_workqueue
- *
- * Allows the delayed workqueue to run.
- */
-void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched);
-
/**
* ttm_bo_eviction_valuable
*