drm/amdgpu: user temporary GTT as bounce buffer
authorLang Yu <Lang.Yu@amd.com>
Tue, 22 Jun 2021 16:23:35 +0000 (12:23 -0400)
committerAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Wed, 23 Jun 2021 18:59:39 +0000 (14:59 -0400)
Currently, we have a limitted GTT memory size and need a bounce buffer
when doing buffer migration between VRAM and SYSTEM domain.

The problem is under GTT memory pressure we can't do buffer migration
between VRAM and SYSTEM domain. But in some cases we really need that.
Eespecially when validating a VRAM backing store BO which resides in
SYSTEM domain.

v2: still account temporary GTT allocations
v3 (chk): revert to the simpler change for now

Signed-off-by: Lang Yu <Lang.Yu@amd.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Acked-by: Nirmoy Das <nirmoy.das@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210622162339.761651-2-andrey.grodzovsky@amd.com
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c

index ec96e0b26b1163bf3c8b6544880a7d5c587ffcc1..3b452085616222ecc70b2630262ef460ca57d27d 100644 (file)
@@ -132,14 +132,15 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
        struct amdgpu_gtt_node *node;
        int r;
 
-       spin_lock(&mgr->lock);
-       if (tbo->resource && tbo->resource->mem_type != TTM_PL_TT &&
-           atomic64_read(&mgr->available) < num_pages) {
+       if (!(place->flags & TTM_PL_FLAG_TEMPORARY)) {
+               spin_lock(&mgr->lock);
+               if (atomic64_read(&mgr->available) < num_pages) {
+                       spin_unlock(&mgr->lock);
+                       return -ENOSPC;
+               }
+               atomic64_sub(num_pages, &mgr->available);
                spin_unlock(&mgr->lock);
-               return -ENOSPC;
        }
-       atomic64_sub(num_pages, &mgr->available);
-       spin_unlock(&mgr->lock);
 
        node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
        if (!node) {
@@ -175,7 +176,8 @@ err_free:
        kfree(node);
 
 err_out:
-       atomic64_add(num_pages, &mgr->available);
+       if (!(place->flags & TTM_PL_FLAG_TEMPORARY))
+               atomic64_add(num_pages, &mgr->available);
 
        return r;
 }
@@ -198,7 +200,9 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
        if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
                drm_mm_remove_node(&node->base.mm_nodes[0]);
        spin_unlock(&mgr->lock);
-       atomic64_add(res->num_pages, &mgr->available);
+
+       if (!(res->placement & TTM_PL_FLAG_TEMPORARY))
+               atomic64_add(res->num_pages, &mgr->available);
 
        kfree(node);
 }
index 80dff29f2bc721197413b159b81a847c189337eb..79f875792b30801e9191feaa78a9475cb3d419c0 100644 (file)
@@ -521,7 +521,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
                        hop->fpfn = 0;
                        hop->lpfn = 0;
                        hop->mem_type = TTM_PL_TT;
-                       hop->flags = 0;
+                       hop->flags = TTM_PL_FLAG_TEMPORARY;
                        return -EMULTIHOP;
                }