}
}
-bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev)
+bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
{
return amdgpu_sriov_is_debug(adev) ? true : false;
}
+bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
+{
+ return amdgpu_sriov_is_normal(adev) ? true : false;
+}
+
int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
{
- if (!amdgpu_sriov_vf(adev))
+ if (!amdgpu_sriov_vf(adev) ||
+ amdgpu_virt_access_debugfs_is_kiq(adev))
return 0;
- if (amdgpu_virt_can_access_debugfs(adev))
+ if (amdgpu_virt_access_debugfs_is_mmio(adev))
adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
else
return -EPERM;
((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF)
#define amdgpu_sriov_is_debug(adev) \
((!adev->in_gpu_reset) && adev->virt.tdr_debug)
+#define amdgpu_sriov_is_normal(adev) \
+ ((!adev->in_gpu_reset) && (!adev->virt.tdr_debug))
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);