drm/amdgpu/ttm: optimize vram access in amdgpu_ttm_access_memory()
authorKevin Wang <kevin1.wang@amd.com>
Fri, 16 Jul 2021 18:03:08 +0000 (14:03 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 16 Jul 2021 18:03:29 +0000 (14:03 -0400)
1. using vram aper to access vram if possible
2. avoid MM_INDEX/MM_DATA is not working when mmio protect feature is
enabled.

Signed-off-by: Kevin Wang <kevin1.wang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c

index 6e02bdd8621a58b3f9079898dad16f72fa8ca936..6a3ffa38f0c3bf3e05ef98c5c497da5e63df240c 100644 (file)
@@ -1393,6 +1393,41 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
        return ttm_bo_eviction_valuable(bo, place);
 }
 
+static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos,
+                                     void *buf, size_t size, bool write)
+{
+       while (size) {
+               uint64_t aligned_pos = ALIGN_DOWN(pos, 4);
+               uint64_t bytes = 4 - (pos & 0x3);
+               uint32_t shift = (pos & 0x3) * 8;
+               uint32_t mask = 0xffffffff << shift;
+               uint32_t value = 0;
+
+               if (size < bytes) {
+                       mask &= 0xffffffff >> (bytes - size) * 8;
+                       bytes = size;
+               }
+
+               if (mask != 0xffffffff) {
+                       amdgpu_device_mm_access(adev, aligned_pos, &value, 4, false);
+                       if (write) {
+                               value &= ~mask;
+                               value |= (*(uint32_t *)buf << shift) & mask;
+                               amdgpu_device_mm_access(adev, aligned_pos, &value, 4, true);
+                       } else {
+                               value = (value & mask) >> shift;
+                               memcpy(buf, &value, bytes);
+                       }
+               } else {
+                       amdgpu_device_mm_access(adev, aligned_pos, buf, 4, write);
+               }
+
+               pos += bytes;
+               buf += bytes;
+               size -= bytes;
+       }
+}
+
 /**
  * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
  *
@@ -1412,8 +1447,6 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
        struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
        struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
        struct amdgpu_res_cursor cursor;
-       unsigned long flags;
-       uint32_t value = 0;
        int ret = 0;
 
        if (bo->resource->mem_type != TTM_PL_VRAM)
@@ -1421,41 +1454,21 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
 
        amdgpu_res_first(bo->resource, offset, len, &cursor);
        while (cursor.remaining) {
-               uint64_t aligned_pos = cursor.start & ~(uint64_t)3;
-               uint64_t bytes = 4 - (cursor.start & 3);
-               uint32_t shift = (cursor.start & 3) * 8;
-               uint32_t mask = 0xffffffff << shift;
-
-               if (cursor.size < bytes) {
-                       mask &= 0xffffffff >> (bytes - cursor.size) * 8;
-                       bytes = cursor.size;
+               size_t count, size = cursor.size;
+               loff_t pos = cursor.start;
+
+               count = amdgpu_device_aper_access(adev, pos, buf, size, write);
+               size -= count;
+               if (size) {
+                       /* using MM to access rest vram and handle un-aligned address */
+                       pos += count;
+                       buf += count;
+                       amdgpu_ttm_vram_mm_access(adev, pos, buf, size, write);
                }
 
-               if (mask != 0xffffffff) {
-                       spin_lock_irqsave(&adev->mmio_idx_lock, flags);
-                       WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
-                       WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
-                       value = RREG32_NO_KIQ(mmMM_DATA);
-                       if (write) {
-                               value &= ~mask;
-                               value |= (*(uint32_t *)buf << shift) & mask;
-                               WREG32_NO_KIQ(mmMM_DATA, value);
-                       }
-                       spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
-                       if (!write) {
-                               value = (value & mask) >> shift;
-                               memcpy(buf, &value, bytes);
-                       }
-               } else {
-                       bytes = cursor.size & ~0x3ULL;
-                       amdgpu_device_vram_access(adev, cursor.start,
-                                                 (uint32_t *)buf, bytes,
-                                                 write);
-               }
-
-               ret += bytes;
-               buf = (uint8_t *)buf + bytes;
-               amdgpu_res_next(&cursor, bytes);
+               ret += cursor.size;
+               buf += cursor.size;
+               amdgpu_res_next(&cursor, cursor.size);
        }
 
        return ret;