drm/amdgpu: GPU TLB flush API moved to amdgpu_amdkfd
authorAlex Sierra <alex.sierra@amd.com>
Fri, 20 Dec 2019 05:57:03 +0000 (23:57 -0600)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 16 Jan 2020 18:34:33 +0000 (13:34 -0500)
[Why]
TLB flush method has been deprecated using kfd2kgd interface.
This implementation is now on the amdgpu_amdkfd API.

[How]
TLB flush functions now implemented in amdgpu_amdkfd.

Signed-off-by: Alex Sierra <alex.sierra@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/gpu/drm/amd/amdkfd/kfd_process.c

index 88e10b95641392bfd632ae71cdcad5997ea622e3..8609287620ead9e747457c2dcb6f292c0a884fbd 100644 (file)
@@ -628,6 +628,38 @@ bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
        return false;
 }
 
+int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+       if (adev->family == AMDGPU_FAMILY_AI) {
+               int i;
+
+               for (i = 0; i < adev->num_vmhubs; i++)
+                       amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0);
+       } else {
+               amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB_0, 0);
+       }
+
+       return 0;
+}
+
+int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+       uint32_t flush_type = 0;
+       bool all_hub = false;
+
+       if (adev->gmc.xgmi.num_physical_nodes &&
+               adev->asic_type == CHIP_VEGA20)
+               flush_type = 2;
+
+       if (adev->family == AMDGPU_FAMILY_AI)
+               all_hub = true;
+
+       return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub);
+}
+
 bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
index 069d5d230810c96bfd6a582dc81072dc47ce0de3..47b0f2957d1f6fe548bccf9fa188754d04e20add 100644 (file)
@@ -136,6 +136,8 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
                                uint32_t *ib_cmd, uint32_t ib_len);
 void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle);
 bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd);
+int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid);
+int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid);
 
 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
 
index 536a153ac9a41e70537da0e6e8fb6a0f2b2e177d..25b90f70aecd096b959cf87892686ff6ad95c70a 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/mman.h>
 #include <linux/file.h>
 #include "amdgpu_amdkfd.h"
+#include "amdgpu.h"
 
 struct mm_struct;
 
@@ -1152,16 +1153,17 @@ int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
 void kfd_flush_tlb(struct kfd_process_device *pdd)
 {
        struct kfd_dev *dev = pdd->dev;
-       const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
 
        if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
                /* Nothing to flush until a VMID is assigned, which
                 * only happens when the first queue is created.
                 */
                if (pdd->qpd.vmid)
-                       f2g->invalidate_tlbs_vmid(dev->kgd, pdd->qpd.vmid);
+                       amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->kgd,
+                                                       pdd->qpd.vmid);
        } else {
-               f2g->invalidate_tlbs(dev->kgd, pdd->process->pasid);
+               amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->kgd,
+                                               pdd->process->pasid);
        }
 }