drm/amdgpu: used cached gca values for vi_read_register (v2)
authorAlex Deucher <alexander.deucher@amd.com>
Mon, 10 Oct 2016 16:05:32 +0000 (12:05 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 25 Oct 2016 18:38:32 +0000 (14:38 -0400)
Using the cached values has less latency for bare metal
and SR-IOV, and prevents reading back bogus values if the
engine is powergated.

v2: fix typo in tile idx calculation

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/vi.c

index 657de2a..82f72cd 100644 (file)
@@ -556,21 +556,100 @@ static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] =
        {mmPA_SC_RASTER_CONFIG_1, false, true},
 };
 
-static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
-                                        u32 sh_num, u32 reg_offset)
-{
-       uint32_t val;
+static uint32_t vi_get_register_value(struct amdgpu_device *adev,
+                                     bool indexed, u32 se_num,
+                                     u32 sh_num, u32 reg_offset)
+{
+       if (indexed) {
+               uint32_t val;
+               unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
+               unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
+
+               switch (reg_offset) {
+               case mmCC_RB_BACKEND_DISABLE:
+                       return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
+               case mmGC_USER_RB_BACKEND_DISABLE:
+                       return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
+               case mmPA_SC_RASTER_CONFIG:
+                       return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
+               case mmPA_SC_RASTER_CONFIG_1:
+                       return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
+               }
 
-       mutex_lock(&adev->grbm_idx_mutex);
-       if (se_num != 0xffffffff || sh_num != 0xffffffff)
-               amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+               mutex_lock(&adev->grbm_idx_mutex);
+               if (se_num != 0xffffffff || sh_num != 0xffffffff)
+                       amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
 
-       val = RREG32(reg_offset);
+               val = RREG32(reg_offset);
 
-       if (se_num != 0xffffffff || sh_num != 0xffffffff)
-               amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
-       mutex_unlock(&adev->grbm_idx_mutex);
-       return val;
+               if (se_num != 0xffffffff || sh_num != 0xffffffff)
+                       amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+               mutex_unlock(&adev->grbm_idx_mutex);
+               return val;
+       } else {
+               unsigned idx;
+
+               switch (reg_offset) {
+               case mmGB_ADDR_CONFIG:
+                       return adev->gfx.config.gb_addr_config;
+               case mmMC_ARB_RAMCFG:
+                       return adev->gfx.config.mc_arb_ramcfg;
+               case mmGB_TILE_MODE0:
+               case mmGB_TILE_MODE1:
+               case mmGB_TILE_MODE2:
+               case mmGB_TILE_MODE3:
+               case mmGB_TILE_MODE4:
+               case mmGB_TILE_MODE5:
+               case mmGB_TILE_MODE6:
+               case mmGB_TILE_MODE7:
+               case mmGB_TILE_MODE8:
+               case mmGB_TILE_MODE9:
+               case mmGB_TILE_MODE10:
+               case mmGB_TILE_MODE11:
+               case mmGB_TILE_MODE12:
+               case mmGB_TILE_MODE13:
+               case mmGB_TILE_MODE14:
+               case mmGB_TILE_MODE15:
+               case mmGB_TILE_MODE16:
+               case mmGB_TILE_MODE17:
+               case mmGB_TILE_MODE18:
+               case mmGB_TILE_MODE19:
+               case mmGB_TILE_MODE20:
+               case mmGB_TILE_MODE21:
+               case mmGB_TILE_MODE22:
+               case mmGB_TILE_MODE23:
+               case mmGB_TILE_MODE24:
+               case mmGB_TILE_MODE25:
+               case mmGB_TILE_MODE26:
+               case mmGB_TILE_MODE27:
+               case mmGB_TILE_MODE28:
+               case mmGB_TILE_MODE29:
+               case mmGB_TILE_MODE30:
+               case mmGB_TILE_MODE31:
+                       idx = (reg_offset - mmGB_TILE_MODE0);
+                       return adev->gfx.config.tile_mode_array[idx];
+               case mmGB_MACROTILE_MODE0:
+               case mmGB_MACROTILE_MODE1:
+               case mmGB_MACROTILE_MODE2:
+               case mmGB_MACROTILE_MODE3:
+               case mmGB_MACROTILE_MODE4:
+               case mmGB_MACROTILE_MODE5:
+               case mmGB_MACROTILE_MODE6:
+               case mmGB_MACROTILE_MODE7:
+               case mmGB_MACROTILE_MODE8:
+               case mmGB_MACROTILE_MODE9:
+               case mmGB_MACROTILE_MODE10:
+               case mmGB_MACROTILE_MODE11:
+               case mmGB_MACROTILE_MODE12:
+               case mmGB_MACROTILE_MODE13:
+               case mmGB_MACROTILE_MODE14:
+               case mmGB_MACROTILE_MODE15:
+                       idx = (reg_offset - mmGB_MACROTILE_MODE0);
+                       return adev->gfx.config.macrotile_mode_array[idx];
+               default:
+                       return RREG32(reg_offset);
+               }
+       }
 }
 
 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
@@ -605,10 +684,9 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
                        if (reg_offset != asic_register_entry->reg_offset)
                                continue;
                        if (!asic_register_entry->untouched)
-                               *value = asic_register_entry->grbm_indexed ?
-                                       vi_read_indexed_register(adev, se_num,
-                                                                sh_num, reg_offset) :
-                                       RREG32(reg_offset);
+                               *value = vi_get_register_value(adev,
+                                                              asic_register_entry->grbm_indexed,
+                                                              se_num, sh_num, reg_offset);
                        return 0;
                }
        }
@@ -618,10 +696,9 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
                        continue;
 
                if (!vi_allowed_read_registers[i].untouched)
-                       *value = vi_allowed_read_registers[i].grbm_indexed ?
-                               vi_read_indexed_register(adev, se_num,
-                                                        sh_num, reg_offset) :
-                               RREG32(reg_offset);
+                       *value = vi_get_register_value(adev,
+                                                      vi_allowed_read_registers[i].grbm_indexed,
+                                                      se_num, sh_num, reg_offset);
                return 0;
        }
        return -EINVAL;