struct work_struct reset_work;
uint32_t amdgpu_reset_level_mask;
+ bool job_hang;
};
static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
RESET_QUEUES, 0, 0);
- if (adev->gfx.kiq.ring.sched.ready)
+ if (adev->gfx.kiq.ring.sched.ready && !adev->job_hang)
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
}
memset(&ti, 0, sizeof(struct amdgpu_task_info));
+ adev->job_hang = true;
if (amdgpu_gpu_recovery &&
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
}
exit:
+ adev->job_hang = false;
drm_dev_exit(idx);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
}
+ if (adev->job_hang && !enable)
+ return 0;
+
for (i = 0; i < adev->usec_timeout; i++) {
if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0)
break;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
PREEMPT_QUEUES, 0, 0);
-
- return amdgpu_ring_test_helper(kiq_ring);
+ if (!adev->job_hang)
+ return amdgpu_ring_test_helper(kiq_ring);
+ else
+ return 0;
}
#endif