*
* Return: returns -ENOMEM in case of error, ZERO otherwise
*/
-static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
+int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
uint64_t size, u32 alloc_flag)
{
uint64_t reserved_for_pt =
kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
(kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
kfd_mem_limit.max_ttm_mem_limit) ||
- (adev->kfd.vram_used + vram_needed >
+ (adev && adev->kfd.vram_used + vram_needed >
adev->gmc.real_vram_size -
atomic64_read(&adev->vram_pin_size) -
reserved_for_pt)) {
/* Update memory accounting by decreasing available system
* memory, TTM memory and GPU memory as computed above
*/
- adev->kfd.vram_used += vram_needed;
+ WARN_ONCE(vram_needed && !adev,
+ "adev reference can't be null when vram is used");
+ if (adev)
+ adev->kfd.vram_used += vram_needed;
kfd_mem_limit.system_mem_used += system_mem_needed;
kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
return ret;
}
-static void unreserve_mem_limit(struct amdgpu_device *adev,
+void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
uint64_t size, u32 alloc_flag)
{
spin_lock(&kfd_mem_limit.mem_limit_lock);
kfd_mem_limit.system_mem_used -= size;
kfd_mem_limit.ttm_mem_used -= size;
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
- adev->kfd.vram_used -= ALIGN(size, VRAM_ALLOCATION_ALIGN);
+ WARN_ONCE(!adev,
+ "adev reference can't be null when alloc mem flags vram is set");
+ if (adev)
+ adev->kfd.vram_used -= ALIGN(size, VRAM_ALLOCATION_ALIGN);
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
kfd_mem_limit.system_mem_used -= size;
} else if (!(alloc_flag &
pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
goto release;
}
-
- WARN_ONCE(adev->kfd.vram_used < 0,
+ WARN_ONCE(adev && adev->kfd.vram_used < 0,
"KFD VRAM memory accounting unbalanced");
WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
"KFD TTM memory accounting unbalanced");
u32 alloc_flags = bo->kfd_bo->alloc_flags;
u64 size = amdgpu_bo_size(bo);
- unreserve_mem_limit(adev, size, alloc_flags);
+ amdgpu_amdkfd_unreserve_mem_limit(adev, size, alloc_flags);
kfree(bo->kfd_bo);
}
/* Don't unreserve system mem limit twice */
goto err_reserve_limit;
err_bo_create:
- unreserve_mem_limit(adev, size, flags);
+ amdgpu_amdkfd_unreserve_mem_limit(adev, size, flags);
err_reserve_limit:
mutex_destroy(&(*mem)->lock);
if (gobj)
}
}
-static void svm_range_free(struct svm_range *prange)
+static void svm_range_free(struct svm_range *prange, bool update_mem_usage)
{
+ uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT;
+ struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
+
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
prange->start, prange->last);
svm_range_vram_node_free(prange);
svm_range_free_dma_mappings(prange);
+
+ if (update_mem_usage && !p->xnack_enabled) {
+ pr_debug("unreserve mem limit: %lld\n", size);
+ amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ }
mutex_destroy(&prange->lock);
mutex_destroy(&prange->migrate_mutex);
kfree(prange);
static struct
svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
- uint64_t last)
+ uint64_t last, bool update_mem_usage)
{
uint64_t size = last - start + 1;
struct svm_range *prange;
prange = kzalloc(sizeof(*prange), GFP_KERNEL);
if (!prange)
return NULL;
+
+ p = container_of(svms, struct kfd_process, svms);
+ if (!p->xnack_enabled && update_mem_usage &&
+ amdgpu_amdkfd_reserve_mem_limit(NULL, size << PAGE_SHIFT,
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)) {
+ pr_info("SVM mapping failed, exceeds resident system memory limit\n");
+ kfree(prange);
+ return NULL;
+ }
prange->npages = size;
prange->svms = svms;
prange->start = start;
mutex_init(&prange->migrate_mutex);
mutex_init(&prange->lock);
- p = container_of(svms, struct kfd_process, svms);
if (p->xnack_enabled)
bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
MAX_GPU_INSTANCE);
svms = prange->svms;
if (old_start == start)
- *new = svm_range_new(svms, last + 1, old_last);
+ *new = svm_range_new(svms, last + 1, old_last, false);
else
- *new = svm_range_new(svms, old_start, start - 1);
+ *new = svm_range_new(svms, old_start, start - 1, false);
if (!*new)
return -ENOMEM;
if (r) {
pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n",
r, old_start, old_last, start, last);
- svm_range_free(*new);
+ svm_range_free(*new, false);
*new = NULL;
}
{
struct svm_range *new;
- new = svm_range_new(old->svms, old->start, old->last);
+ new = svm_range_new(old->svms, old->start, old->last, false);
if (!new)
return NULL;
while (last >= start) {
l = min(last, ALIGN_DOWN(start + max_pages, max_pages) - 1);
- prange = svm_range_new(svms, start, l);
+ prange = svm_range_new(svms, start, l, true);
if (!prange)
return -ENOMEM;
list_add(&prange->list, insert_list);
struct interval_tree_node *node;
struct svm_range *prange;
struct svm_range *tmp;
+ struct list_head new_list;
int r = 0;
pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
INIT_LIST_HEAD(update_list);
INIT_LIST_HEAD(insert_list);
INIT_LIST_HEAD(remove_list);
+ INIT_LIST_HEAD(&new_list);
node = interval_tree_iter_first(&svms->objects, start, last);
while (node) {
if (node->start > start) {
r = svm_range_split_new(svms, start, node->start - 1,
READ_ONCE(max_svm_range_pages),
- insert_list, update_list);
+ &new_list, update_list);
if (r)
goto out;
}
if (start <= last)
r = svm_range_split_new(svms, start, last,
READ_ONCE(max_svm_range_pages),
- insert_list, update_list);
+ &new_list, update_list);
out:
- if (r)
+ if (r) {
list_for_each_entry_safe(prange, tmp, insert_list, list)
- svm_range_free(prange);
+ svm_range_free(prange, false);
+ list_for_each_entry_safe(prange, tmp, &new_list, list)
+ svm_range_free(prange, true);
+ } else {
+ list_splice(&new_list, insert_list);
+ }
return r;
}
svms, prange, prange->start, prange->last);
svm_range_unlink(prange);
svm_range_remove_notifier(prange);
- svm_range_free(prange);
+ svm_range_free(prange, true);
break;
case SVM_OP_UPDATE_RANGE_NOTIFIER:
pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
last = addr;
}
- prange = svm_range_new(&p->svms, start, last);
+ prange = svm_range_new(&p->svms, start, last, true);
if (!prange) {
pr_debug("Failed to create prange in address [0x%llx]\n", addr);
return NULL;
}
if (kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx)) {
pr_debug("failed to get gpuid from kgd\n");
- svm_range_free(prange);
+ svm_range_free(prange, true);
return NULL;
}
list_for_each_entry_safe(prange, next, &p->svms.list, list) {
svm_range_unlink(prange);
svm_range_remove_notifier(prange);
- svm_range_free(prange);
+ svm_range_free(prange, true);
}
mutex_destroy(&p->svms.lock);
prange->last);
svm_range_unlink(prange);
svm_range_remove_notifier(prange);
- svm_range_free(prange);
+ svm_range_free(prange, false);
}
mmap_write_downgrade(mm);