drm/amdgpu: don't use MM idle_work for SRIOV(v2)
authorMonk Liu <Monk.Liu@amd.com>
Fri, 19 Jan 2018 12:29:17 +0000 (20:29 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 28 Feb 2018 19:18:06 +0000 (14:18 -0500)
SRIOV doesn't give VF cg/pg feature so the MM's idle_work
is skipped for SR-IOV

v2:
remove superfluous changes
since idle_work is not scheduled for SR-IOV so the condition
check for SR-IOV inside idle_work also can be dropped

v3:
drop the SRIOV check in amdgpu_vce/uvd_suspend

Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c

index 9cd5517..7ad814d 100644 (file)
@@ -1116,9 +1116,6 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
                container_of(work, struct amdgpu_device, uvd.idle_work.work);
        unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
 
-       if (amdgpu_sriov_vf(adev))
-               return;
-
        if (fences == 0) {
                if (adev->pm.dpm_enabled) {
                        amdgpu_dpm_enable_uvd(adev, false);
@@ -1138,11 +1135,12 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
 void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
-       bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
+       bool set_clocks;
 
        if (amdgpu_sriov_vf(adev))
                return;
 
+       set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
        if (set_clocks) {
                if (adev->pm.dpm_enabled) {
                        amdgpu_dpm_enable_uvd(adev, true);
@@ -1158,7 +1156,8 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
 
 void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
 {
-       schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
+       if (!amdgpu_sriov_vf(ring->adev))
+               schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
 }
 
 /**
index d274ae5..9152478 100644 (file)
@@ -300,9 +300,6 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
                container_of(work, struct amdgpu_device, vce.idle_work.work);
        unsigned i, count = 0;
 
-       if (amdgpu_sriov_vf(adev))
-               return;
-
        for (i = 0; i < adev->vce.num_rings; i++)
                count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
 
@@ -362,7 +359,8 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
  */
 void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
 {
-       schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
+       if (!amdgpu_sriov_vf(ring->adev))
+               schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
 }
 
 /**