drm/amdgpu: move struct gart_funcs into amdgpu_gmc.h
authorChristian König <christian.koenig@amd.com>
Fri, 12 Jan 2018 14:26:08 +0000 (15:26 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 19 Feb 2018 19:17:44 +0000 (14:17 -0500)
And rename it to struct gmc_funcs.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Samuel Li <Samuel.Li@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
16 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c

index 1b369a6..3cb0707 100644 (file)
@@ -333,28 +333,6 @@ struct amdgpu_vm_pte_funcs {
                            uint32_t incr, uint64_t flags);
 };
 
-/* provided by the gmc block */
-struct amdgpu_gart_funcs {
-       /* flush the vm tlb via mmio */
-       void (*flush_gpu_tlb)(struct amdgpu_device *adev,
-                             uint32_t vmid);
-       /* write pte/pde updates using the cpu */
-       int (*set_pte_pde)(struct amdgpu_device *adev,
-                          void *cpu_pt_addr, /* cpu addr of page table */
-                          uint32_t gpu_page_idx, /* pte/pde to update */
-                          uint64_t addr, /* addr to write into pte/pde */
-                          uint64_t flags); /* access flags */
-       /* enable/disable PRT support */
-       void (*set_prt)(struct amdgpu_device *adev, bool enable);
-       /* set pte flags based per asic */
-       uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
-                                    uint32_t flags);
-       /* get the pde for a given mc addr */
-       void (*get_vm_pde)(struct amdgpu_device *adev, int level,
-                          u64 *dst, u64 *flags);
-       uint32_t (*get_invalidate_req)(unsigned int vmid);
-};
-
 /* provided by the ih block */
 struct amdgpu_ih_funcs {
        /* ring read/write ptr handling, called from interrupt context */
@@ -1797,13 +1775,13 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
 #define amdgpu_asic_flush_hdp(adev) (adev)->asic_funcs->flush_hdp((adev))
 #define amdgpu_asic_invalidate_hdp(adev) (adev)->asic_funcs->invalidate_hdp((adev))
-#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
-#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
-#define amdgpu_gart_get_vm_pde(adev, level, dst, flags) (adev)->gart.gart_funcs->get_vm_pde((adev), (level), (dst), (flags))
+#define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
+#define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
+#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
+#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags))
 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
 #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
-#define amdgpu_vm_get_pte_flags(adev, flags) (adev)->gart.gart_funcs->get_vm_pte_flags((adev),(flags))
 #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
 #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
index a2204c7..113c92d 100644 (file)
@@ -1775,7 +1775,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        adev->mman.buffer_funcs_ring = NULL;
        adev->vm_manager.vm_pte_funcs = NULL;
        adev->vm_manager.vm_pte_num_rings = 0;
-       adev->gart.gart_funcs = NULL;
+       adev->gmc.gmc_funcs = NULL;
        adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
        bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
 
index b730dee..18d2387 100644 (file)
@@ -241,14 +241,14 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
                        continue;
 
                for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
-                       amdgpu_gart_set_pte_pde(adev, adev->gart.ptr,
-                                               t, page_base, flags);
+                       amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
+                                              t, page_base, flags);
                        page_base += AMDGPU_GPU_PAGE_SIZE;
                }
        }
        mb();
        amdgpu_asic_flush_hdp(adev);
-       amdgpu_gart_flush_gpu_tlb(adev, 0);
+       amdgpu_gmc_flush_gpu_tlb(adev, 0);
        return 0;
 }
 
@@ -280,7 +280,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
        for (i = 0; i < pages; i++) {
                page_base = dma_addr[i];
                for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
-                       amdgpu_gart_set_pte_pde(adev, dst, t, page_base, flags);
+                       amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
                        page_base += AMDGPU_GPU_PAGE_SIZE;
                }
        }
@@ -331,7 +331,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
 
        mb();
        amdgpu_asic_flush_hdp(adev);
-       amdgpu_gart_flush_gpu_tlb(adev, 0);
+       amdgpu_gmc_flush_gpu_tlb(adev, 0);
        return 0;
 }
 
index d4a4330..456295c 100644 (file)
@@ -31,7 +31,6 @@
  */
 struct amdgpu_device;
 struct amdgpu_bo;
-struct amdgpu_gart_funcs;
 
 #define AMDGPU_GPU_PAGE_SIZE 4096
 #define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
@@ -52,8 +51,6 @@ struct amdgpu_gart {
 
        /* Asic default pte flags */
        uint64_t                        gart_pte_flags;
-
-       const struct amdgpu_gart_funcs *gart_funcs;
 };
 
 int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
index e48b4ec..77304a8 100644 (file)
@@ -634,7 +634,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
                if (r)
                        goto error_backoff;
 
-               va_flags = amdgpu_vm_get_pte_flags(adev, args->flags);
+               va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
                r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
                                     args->offset_in_bo, args->map_size,
                                     va_flags);
@@ -654,7 +654,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
                if (r)
                        goto error_backoff;
 
-               va_flags = amdgpu_vm_get_pte_flags(adev, args->flags);
+               va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
                r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
                                             args->offset_in_bo, args->map_size,
                                             va_flags);
index e867be5..a4a8374 100644 (file)
@@ -48,6 +48,27 @@ struct amdgpu_vmhub {
 /*
  * GPU MC structures, functions & helpers
  */
+struct amdgpu_gmc_funcs {
+       /* flush the vm tlb via mmio */
+       void (*flush_gpu_tlb)(struct amdgpu_device *adev,
+                             uint32_t vmid);
+       /* write pte/pde updates using the cpu */
+       int (*set_pte_pde)(struct amdgpu_device *adev,
+                          void *cpu_pt_addr, /* cpu addr of page table */
+                          uint32_t gpu_page_idx, /* pte/pde to update */
+                          uint64_t addr, /* addr to write into pte/pde */
+                          uint64_t flags); /* access flags */
+       /* enable/disable PRT support */
+       void (*set_prt)(struct amdgpu_device *adev, bool enable);
+       /* set pte flags based per asic */
+       uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
+                                    uint32_t flags);
+       /* get the pde for a given mc addr */
+       void (*get_vm_pde)(struct amdgpu_device *adev, int level,
+                          u64 *dst, u64 *flags);
+       uint32_t (*get_invalidate_req)(unsigned int vmid);
+};
+
 struct amdgpu_gmc {
        resource_size_t         aper_size;
        resource_size_t         aper_base;
@@ -79,6 +100,8 @@ struct amdgpu_gmc {
        /* protects concurrent invalidation */
        spinlock_t              invalidate_lock;
        bool                    translate_further;
+
+       const struct amdgpu_gmc_funcs   *gmc_funcs;
 };
 
 #endif
index 988ccb2..da634ae 100644 (file)
@@ -679,8 +679,8 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
                value = params->pages_addr ?
                        amdgpu_vm_map_gart(params->pages_addr, addr) :
                        addr;
-               amdgpu_gart_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
-                                       i, value, flags);
+               amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
+                                      i, value, flags);
                addr += incr;
        }
 }
@@ -738,7 +738,7 @@ static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
        level += params->adev->vm_manager.root_level;
        pt = amdgpu_bo_gpu_offset(bo);
        flags = AMDGPU_PTE_VALID;
-       amdgpu_gart_get_vm_pde(params->adev, level, &pt, &flags);
+       amdgpu_gmc_get_vm_pde(params->adev, level, &pt, &flags);
        if (shadow) {
                pde = shadow_addr + (entry - parent->entries) * 8;
                params->func(params, pde, pt, 1, 0, flags);
@@ -967,8 +967,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
        }
 
        entry->huge = true;
-       amdgpu_gart_get_vm_pde(p->adev, AMDGPU_VM_PDB0,
-                              &dst, &flags);
+       amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags);
 
        if (p->func == amdgpu_vm_cpu_set_ptes) {
                pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
@@ -1485,7 +1484,7 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
 
        spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
        enable = !!atomic_read(&adev->vm_manager.num_prt_users);
-       adev->gart.gart_funcs->set_prt(adev, enable);
+       adev->gmc.gmc_funcs->set_prt(adev, enable);
        spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
 }
 
@@ -1494,7 +1493,7 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
  */
 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
 {
-       if (!adev->gart.gart_funcs->set_prt)
+       if (!adev->gmc.gmc_funcs->set_prt)
                return;
 
        if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
@@ -1529,7 +1528,7 @@ static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
 {
        struct amdgpu_prt_cb *cb;
 
-       if (!adev->gart.gart_funcs->set_prt)
+       if (!adev->gmc.gmc_funcs->set_prt)
                return;
 
        cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
@@ -2405,7 +2404,7 @@ static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 {
        struct amdgpu_bo_va_mapping *mapping, *tmp;
-       bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
+       bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
        struct amdgpu_bo *root;
        u64 fault;
        int i, r;
index 59928b7..aaa990c 100644 (file)
@@ -3688,11 +3688,11 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
 {
        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
        int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
-       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
+       uint32_t req = ring->adev->gmc.gmc_funcs->get_invalidate_req(vmid);
        uint64_t flags = AMDGPU_PTE_VALID;
        unsigned eng = ring->vm_inv_eng;
 
-       amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
+       amdgpu_gmc_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
        pd_addr |= flags;
 
        gfx_v9_0_write_data_to_reg(ring, usepfp, true,
index 98411e3..daaad3f 100644 (file)
@@ -37,7 +37,7 @@
 #include "dce/dce_6_0_sh_mask.h"
 #include "si_enums.h"
 
-static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev);
+static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
 static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
 static int gmc_v6_0_wait_for_idle(void *handle);
 
@@ -357,17 +357,14 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
        return 0;
 }
 
-static void gmc_v6_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
-                                       uint32_t vmid)
+static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
 {
        WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
 }
 
-static int gmc_v6_0_gart_set_pte_pde(struct amdgpu_device *adev,
-                                    void *cpu_pt_addr,
-                                    uint32_t gpu_page_idx,
-                                    uint64_t addr,
-                                    uint64_t flags)
+static int gmc_v6_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
+                               uint32_t gpu_page_idx, uint64_t addr,
+                               uint64_t flags)
 {
        void __iomem *ptr = (void *)cpu_pt_addr;
        uint64_t value;
@@ -559,7 +556,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
        else
                gmc_v6_0_set_fault_enable_default(adev, true);
 
-       gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
+       gmc_v6_0_flush_gpu_tlb(adev, 0);
        dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
                 (unsigned)(adev->gmc.gart_size >> 20),
                 (unsigned long long)adev->gart.table_addr);
@@ -793,7 +790,7 @@ static int gmc_v6_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       gmc_v6_0_set_gart_funcs(adev);
+       gmc_v6_0_set_gmc_funcs(adev);
        gmc_v6_0_set_irq_funcs(adev);
 
        return 0;
@@ -1127,9 +1124,9 @@ static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
        .set_powergating_state = gmc_v6_0_set_powergating_state,
 };
 
-static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = {
-       .flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb,
-       .set_pte_pde = gmc_v6_0_gart_set_pte_pde,
+static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
+       .flush_gpu_tlb = gmc_v6_0_flush_gpu_tlb,
+       .set_pte_pde = gmc_v6_0_set_pte_pde,
        .set_prt = gmc_v6_0_set_prt,
        .get_vm_pde = gmc_v6_0_get_vm_pde,
        .get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
@@ -1140,10 +1137,10 @@ static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
        .process = gmc_v6_0_process_interrupt,
 };
 
-static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev)
+static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev)
 {
-       if (adev->gart.gart_funcs == NULL)
-               adev->gart.gart_funcs = &gmc_v6_0_gart_funcs;
+       if (adev->gmc.gmc_funcs == NULL)
+               adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
 }
 
 static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
index 71986cd..0825002 100644 (file)
@@ -43,7 +43,7 @@
 
 #include "amdgpu_atombios.h"
 
-static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
+static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
 static int gmc_v7_0_wait_for_idle(void *handle);
 
@@ -422,22 +422,21 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
  */
 
 /**
- * gmc_v7_0_gart_flush_gpu_tlb - gart tlb flush callback
+ * gmc_v7_0_flush_gpu_tlb - gart tlb flush callback
  *
  * @adev: amdgpu_device pointer
  * @vmid: vm instance to flush
  *
  * Flush the TLB for the requested page table (CIK).
  */
-static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
-                                       uint32_t vmid)
+static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
 {
        /* bits 0-15 are the VM contexts0-15 */
        WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
 }
 
 /**
- * gmc_v7_0_gart_set_pte_pde - update the page tables using MMIO
+ * gmc_v7_0_set_pte_pde - update the page tables using MMIO
  *
  * @adev: amdgpu_device pointer
  * @cpu_pt_addr: cpu address of the page table
@@ -447,11 +446,9 @@ static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
  *
  * Update the page tables using the CPU.
  */
-static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device *adev,
-                                    void *cpu_pt_addr,
-                                    uint32_t gpu_page_idx,
-                                    uint64_t addr,
-                                    uint64_t flags)
+static int gmc_v7_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
+                                uint32_t gpu_page_idx, uint64_t addr,
+                                uint64_t flags)
 {
        void __iomem *ptr = (void *)cpu_pt_addr;
        uint64_t value;
@@ -672,7 +669,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
                WREG32(mmCHUB_CONTROL, tmp);
        }
 
-       gmc_v7_0_gart_flush_gpu_tlb(adev, 0);
+       gmc_v7_0_flush_gpu_tlb(adev, 0);
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
                 (unsigned)(adev->gmc.gart_size >> 20),
                 (unsigned long long)adev->gart.table_addr);
@@ -919,7 +916,7 @@ static int gmc_v7_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       gmc_v7_0_set_gart_funcs(adev);
+       gmc_v7_0_set_gmc_funcs(adev);
        gmc_v7_0_set_irq_funcs(adev);
 
        adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
@@ -1306,9 +1303,9 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
        .set_powergating_state = gmc_v7_0_set_powergating_state,
 };
 
-static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = {
-       .flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb,
-       .set_pte_pde = gmc_v7_0_gart_set_pte_pde,
+static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
+       .flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb,
+       .set_pte_pde = gmc_v7_0_set_pte_pde,
        .set_prt = gmc_v7_0_set_prt,
        .get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags,
        .get_vm_pde = gmc_v7_0_get_vm_pde
@@ -1319,10 +1316,10 @@ static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
        .process = gmc_v7_0_process_interrupt,
 };
 
-static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev)
+static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev)
 {
-       if (adev->gart.gart_funcs == NULL)
-               adev->gart.gart_funcs = &gmc_v7_0_gart_funcs;
+       if (adev->gmc.gmc_funcs == NULL)
+               adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs;
 }
 
 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
index 7a021c6..ac73b2c 100644 (file)
@@ -45,7 +45,7 @@
 #include "amdgpu_atombios.h"
 
 
-static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
+static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
 static int gmc_v8_0_wait_for_idle(void *handle);
 
@@ -597,14 +597,14 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
  */
 
 /**
- * gmc_v8_0_gart_flush_gpu_tlb - gart tlb flush callback
+ * gmc_v8_0_flush_gpu_tlb - gart tlb flush callback
  *
  * @adev: amdgpu_device pointer
  * @vmid: vm instance to flush
  *
  * Flush the TLB for the requested page table (CIK).
  */
-static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
+static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev,
                                        uint32_t vmid)
 {
        /* bits 0-15 are the VM contexts0-15 */
@@ -612,7 +612,7 @@ static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
 }
 
 /**
- * gmc_v8_0_gart_set_pte_pde - update the page tables using MMIO
+ * gmc_v8_0_set_pte_pde - update the page tables using MMIO
  *
  * @adev: amdgpu_device pointer
  * @cpu_pt_addr: cpu address of the page table
@@ -622,11 +622,9 @@ static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
  *
  * Update the page tables using the CPU.
  */
-static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev,
-                                    void *cpu_pt_addr,
-                                    uint32_t gpu_page_idx,
-                                    uint64_t addr,
-                                    uint64_t flags)
+static int gmc_v8_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
+                               uint32_t gpu_page_idx, uint64_t addr,
+                               uint64_t flags)
 {
        void __iomem *ptr = (void *)cpu_pt_addr;
        uint64_t value;
@@ -888,7 +886,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
        else
                gmc_v8_0_set_fault_enable_default(adev, true);
 
-       gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
+       gmc_v8_0_flush_gpu_tlb(adev, 0);
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
                 (unsigned)(adev->gmc.gart_size >> 20),
                 (unsigned long long)adev->gart.table_addr);
@@ -1009,7 +1007,7 @@ static int gmc_v8_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       gmc_v8_0_set_gart_funcs(adev);
+       gmc_v8_0_set_gmc_funcs(adev);
        gmc_v8_0_set_irq_funcs(adev);
 
        adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
@@ -1640,9 +1638,9 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
        .get_clockgating_state = gmc_v8_0_get_clockgating_state,
 };
 
-static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = {
-       .flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb,
-       .set_pte_pde = gmc_v8_0_gart_set_pte_pde,
+static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
+       .flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
+       .set_pte_pde = gmc_v8_0_set_pte_pde,
        .set_prt = gmc_v8_0_set_prt,
        .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
        .get_vm_pde = gmc_v8_0_get_vm_pde
@@ -1653,10 +1651,10 @@ static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
        .process = gmc_v8_0_process_interrupt,
 };
 
-static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev)
+static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev)
 {
-       if (adev->gart.gart_funcs == NULL)
-               adev->gart.gart_funcs = &gmc_v8_0_gart_funcs;
+       if (adev->gmc.gmc_funcs == NULL)
+               adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
 }
 
 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
index e3d0098..f049c84 100644 (file)
@@ -316,14 +316,14 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
  */
 
 /**
- * gmc_v9_0_gart_flush_gpu_tlb - gart tlb flush callback
+ * gmc_v9_0_flush_gpu_tlb - gart tlb flush callback
  *
  * @adev: amdgpu_device pointer
  * @vmid: vm instance to flush
  *
  * Flush the TLB for the requested page table.
  */
-static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
+static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
                                        uint32_t vmid)
 {
        /* Use register 17 for GART */
@@ -367,7 +367,7 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
 }
 
 /**
- * gmc_v9_0_gart_set_pte_pde - update the page tables using MMIO
+ * gmc_v9_0_set_pte_pde - update the page tables using MMIO
  *
  * @adev: amdgpu_device pointer
  * @cpu_pt_addr: cpu address of the page table
@@ -377,11 +377,9 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
  *
  * Update the page tables using the CPU.
  */
-static int gmc_v9_0_gart_set_pte_pde(struct amdgpu_device *adev,
-                                       void *cpu_pt_addr,
-                                       uint32_t gpu_page_idx,
-                                       uint64_t addr,
-                                       uint64_t flags)
+static int gmc_v9_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
+                               uint32_t gpu_page_idx, uint64_t addr,
+                               uint64_t flags)
 {
        void __iomem *ptr = (void *)cpu_pt_addr;
        uint64_t value;
@@ -491,25 +489,25 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
        }
 }
 
-static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
-       .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
-       .set_pte_pde = gmc_v9_0_gart_set_pte_pde,
+static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
+       .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
+       .set_pte_pde = gmc_v9_0_set_pte_pde,
        .get_invalidate_req = gmc_v9_0_get_invalidate_req,
        .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
        .get_vm_pde = gmc_v9_0_get_vm_pde
 };
 
-static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
+static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
 {
-       if (adev->gart.gart_funcs == NULL)
-               adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
+       if (adev->gmc.gmc_funcs == NULL)
+               adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
 }
 
 static int gmc_v9_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       gmc_v9_0_set_gart_funcs(adev);
+       gmc_v9_0_set_gmc_funcs(adev);
        gmc_v9_0_set_irq_funcs(adev);
 
        adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
@@ -981,7 +979,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
 
        gfxhub_v1_0_set_fault_enable_default(adev, value);
        mmhub_v1_0_set_fault_enable_default(adev, value);
-       gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
+       gmc_v9_0_flush_gpu_tlb(adev, 0);
 
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
                 (unsigned)(adev->gmc.gart_size >> 20),
index 8a0b1b9..892ec22 100644 (file)
@@ -1136,11 +1136,11 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
                                         unsigned vmid, uint64_t pd_addr)
 {
        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
+       uint32_t req = ring->adev->gmc.gmc_funcs->get_invalidate_req(vmid);
        uint64_t flags = AMDGPU_PTE_VALID;
        unsigned eng = ring->vm_inv_eng;
 
-       amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
+       amdgpu_gmc_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
        pd_addr |= flags;
 
        amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
index 6b95f4f..4c19c96 100644 (file)
@@ -1294,12 +1294,12 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
                                        unsigned vmid, uint64_t pd_addr)
 {
        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
+       uint32_t req = ring->adev->gmc.gmc_funcs->get_invalidate_req(vmid);
        uint64_t flags = AMDGPU_PTE_VALID;
        unsigned eng = ring->vm_inv_eng;
        uint32_t data0, data1, mask;
 
-       amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
+       amdgpu_gmc_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
        pd_addr |= flags;
 
        data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2;
@@ -1346,11 +1346,11 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
                         unsigned int vmid, uint64_t pd_addr)
 {
        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
+       uint32_t req = ring->adev->gmc.gmc_funcs->get_invalidate_req(vmid);
        uint64_t flags = AMDGPU_PTE_VALID;
        unsigned eng = ring->vm_inv_eng;
 
-       amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
+       amdgpu_gmc_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
        pd_addr |= flags;
 
        amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
index 7cf2eef..071fb17 100755 (executable)
@@ -968,11 +968,11 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
                         unsigned int vmid, uint64_t pd_addr)
 {
        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
+       uint32_t req = ring->adev->gmc.gmc_funcs->get_invalidate_req(vmid);
        uint64_t flags = AMDGPU_PTE_VALID;
        unsigned eng = ring->vm_inv_eng;
 
-       amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
+       amdgpu_gmc_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
        pd_addr |= flags;
 
        amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
index b99e15c..659a8f2 100644 (file)
@@ -891,12 +891,12 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
                                        unsigned vmid, uint64_t pd_addr)
 {
        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
+       uint32_t req = ring->adev->gmc.gmc_funcs->get_invalidate_req(vmid);
        uint64_t flags = AMDGPU_PTE_VALID;
        unsigned eng = ring->vm_inv_eng;
        uint32_t data0, data1, mask;
 
-       amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
+       amdgpu_gmc_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
        pd_addr |= flags;
 
        data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2;
@@ -1024,11 +1024,11 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
                         unsigned int vmid, uint64_t pd_addr)
 {
        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
+       uint32_t req = ring->adev->gmc.gmc_funcs->get_invalidate_req(vmid);
        uint64_t flags = AMDGPU_PTE_VALID;
        unsigned eng = ring->vm_inv_eng;
 
-       amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
+       amdgpu_gmc_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
        pd_addr |= flags;
 
        amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);