*/
int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
{
- return amdgpu_bo_create_kernel(adev, adev->gart.table_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->gart.robj,
- &adev->gart.table_addr, &adev->gart.ptr);
+ int r;
+
+ if (adev->gart.robj == NULL) {
+ r = amdgpu_bo_create(adev, adev->gart.table_size,
+ PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
+ NULL, NULL, 0, &adev->gart.robj);
+ if (r) {
+ return r;
+ }
+ }
+ return 0;
+}
+
+/**
+ * amdgpu_gart_table_vram_pin - pin gart page table in vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Pin the GART page table in vram so it will not be moved
+ * by the memory manager (pcie r4xx, r5xx+). These asics require the
+ * gart table to be in video memory.
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
+{
+ uint64_t gpu_addr;
+ int r;
+
+ r = amdgpu_bo_reserve(adev->gart.robj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = amdgpu_bo_pin(adev->gart.robj,
+ AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
+ if (r) {
+ amdgpu_bo_unreserve(adev->gart.robj);
+ return r;
+ }
+ r = amdgpu_bo_kmap(adev->gart.robj, &adev->gart.ptr);
+ if (r)
+ amdgpu_bo_unpin(adev->gart.robj);
+ amdgpu_bo_unreserve(adev->gart.robj);
+ adev->gart.table_addr = gpu_addr;
+ return r;
+}
+
+/**
+ * amdgpu_gart_table_vram_unpin - unpin gart page table in vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Unpin the GART page table in vram (pcie r4xx, r5xx+).
+ * These asics require the gart table to be in video memory.
+ */
+void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (adev->gart.robj == NULL) {
+ return;
+ }
+ r = amdgpu_bo_reserve(adev->gart.robj, true);
+ if (likely(r == 0)) {
+ amdgpu_bo_kunmap(adev->gart.robj);
+ amdgpu_bo_unpin(adev->gart.robj);
+ amdgpu_bo_unreserve(adev->gart.robj);
+ adev->gart.ptr = NULL;
+ }
}
/**
*/
void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
{
- amdgpu_bo_free_kernel(&adev->gart.robj,
- &adev->gart.table_addr,
- &adev->gart.ptr);
+ if (adev->gart.robj == NULL) {
+ return;
+ }
+ amdgpu_bo_unref(&adev->gart.robj);
}
/*
static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
{
- int i;
+ int r, i;
u32 field;
if (adev->gart.robj == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
-
+ r = amdgpu_gart_table_vram_pin(adev);
+ if (r)
+ return r;
/* Setup TLB control */
WREG32(mmMC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
WREG32(mmVM_L2_CNTL3,
VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
(0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
+ amdgpu_gart_table_vram_unpin(adev);
}
static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
*/
static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
{
- int i;
+ int r, i;
u32 tmp, field;
if (adev->gart.robj == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
-
+ r = amdgpu_gart_table_vram_pin(adev);
+ if (r)
+ return r;
/* Setup TLB control */
tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
WREG32(mmVM_L2_CNTL, tmp);
WREG32(mmVM_L2_CNTL2, 0);
+ amdgpu_gart_table_vram_unpin(adev);
}
/**
*/
static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
{
- int i;
+ int r, i;
u32 tmp, field;
if (adev->gart.robj == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
-
+ r = amdgpu_gart_table_vram_pin(adev);
+ if (r)
+ return r;
/* Setup TLB control */
tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
WREG32(mmVM_L2_CNTL, tmp);
WREG32(mmVM_L2_CNTL2, 0);
+ amdgpu_gart_table_vram_unpin(adev);
}
/**