int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
uint64_t *size);
-int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
- struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
- bool *table_freed);
+int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev,
+ struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_sync_memory(
static int update_gpuvm_pte(struct kgd_mem *mem,
struct kfd_mem_attachment *entry,
- struct amdgpu_sync *sync,
- bool *table_freed)
+ struct amdgpu_sync *sync)
{
struct amdgpu_bo_va *bo_va = entry->bo_va;
struct amdgpu_device *adev = entry->adev;
return ret;
/* Update the page tables */
- ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed);
+ ret = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
if (ret) {
pr_err("amdgpu_vm_bo_update failed\n");
return ret;
static int map_bo_to_gpuvm(struct kgd_mem *mem,
struct kfd_mem_attachment *entry,
struct amdgpu_sync *sync,
- bool no_update_pte,
- bool *table_freed)
+ bool no_update_pte)
{
int ret;
if (no_update_pte)
return 0;
- ret = update_gpuvm_pte(mem, entry, sync, table_freed);
+ ret = update_gpuvm_pte(mem, entry, sync);
if (ret) {
pr_err("update_gpuvm_pte() failed\n");
goto update_gpuvm_pte_failed;
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem,
- void *drm_priv, bool *table_freed)
+ void *drm_priv)
{
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
int ret;
entry->va, entry->va + bo_size, entry);
ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
- is_invalid_userptr, table_freed);
+ is_invalid_userptr);
if (ret) {
pr_err("Failed to map bo to gpuvm\n");
goto out_unreserve;
continue;
kfd_mem_dmaunmap_attachment(mem, attachment);
- ret = update_gpuvm_pte(mem, attachment, &sync, NULL);
+ ret = update_gpuvm_pte(mem, attachment, &sync);
if (ret) {
pr_err("%s: update PTE failed\n", __func__);
/* make sure this gets validated again */
continue;
kfd_mem_dmaunmap_attachment(mem, attachment);
- ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL);
+ ret = update_gpuvm_pte(mem, attachment, &sync_obj);
if (ret) {
pr_debug("Memory eviction: update PTE failed. Try again\n");
goto validate_map_fail;
long err = 0;
int i;
uint32_t *devices_arr = NULL;
- bool table_freed = false;
if (!args->n_devices) {
pr_debug("Device IDs array empty\n");
err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
peer_pdd->dev->adev, (struct kgd_mem *)mem,
- peer_pdd->drm_priv, &table_freed);
+ peer_pdd->drm_priv);
if (err) {
struct pci_dev *pdev = peer_pdd->dev->adev->pdev;
}
/* Flush TLBs after waiting for the page table updates to complete */
- if (table_freed || !kfd_flush_tlb_after_unmap(dev)) {
- for (i = 0; i < args->n_devices; i++) {
- peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
- if (WARN_ON_ONCE(!peer_pdd))
- continue;
- kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
- }
+ for (i = 0; i < args->n_devices; i++) {
+ peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
+ if (WARN_ON_ONCE(!peer_pdd))
+ continue;
+ kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
}
kfree(devices_arr);
if (IS_ERR(peer_pdd))
return PTR_ERR(peer_pdd);
- ret = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(peer->adev, kgd_mem, peer_pdd->drm_priv,
- NULL);
+ ret = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(peer->adev, kgd_mem,
+ peer_pdd->drm_priv);
if (ret) {
pr_err("Failed to map to gpu %d/%d\n", j, p->n_pdds);
return ret;
{
struct amdgpu_device *adev = pdd->dev->adev;
struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
- bool table_freed = false;
uint64_t pte_flags;
unsigned long last_start;
int last_domain;
last_start - prange->start,
NULL, dma_addr,
&vm->last_update,
- &table_freed);
+ NULL);
for (j = last_start - prange->start; j <= i; j++)
dma_addr[j] |= last_domain;
if (fence)
*fence = dma_fence_get(vm->last_update);
- if (table_freed)
- kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
out:
return r;
}
break;
}
}
+
+ kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
}
return r;