drm/vmwgfx: Remove usage of MOBFMT_RANGE
authorZack Rusin <zackr@vmware.com>
Mon, 6 Dec 2021 17:26:19 +0000 (12:26 -0500)
committerZack Rusin <zackr@vmware.com>
Thu, 9 Dec 2021 18:16:34 +0000 (13:16 -0500)
Using MOBFMT_RANGE in the early days of guest backed objects was a major
performance win but that has changed a lot since. There's no more
a performance reason to use MOBFMT_RANGE. The device can/will still
profit from the pages being contiguous but marking them as MOBFMT_RANGE
no longer matters.
Benchmarks (e.g. heaven, valley) show that creating page tables
for mob memory is actually faster than using mobfmt ranges.

Signed-off-by: Zack Rusin <zackr@vmware.com>
Reviewed-by: Martin Krastev <krastevm@vmware.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211206172620.3139754-12-zack@kde.org
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c

index 21dd69e..1760ba1 100644 (file)
@@ -333,7 +333,6 @@ struct vmw_sg_table {
        struct page **pages;
        const dma_addr_t *addrs;
        struct sg_table *sgt;
-       unsigned long num_regions;
        unsigned long num_pages;
 };
 
index 65f7c2b..2d91a44 100644 (file)
@@ -146,9 +146,6 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
        if (otable->size <= PAGE_SIZE) {
                mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
                mob->pt_root_page = vmw_piter_dma_addr(&iter);
-       } else if (vsgt->num_regions == 1) {
-               mob->pt_level = SVGA3D_MOBFMT_RANGE;
-               mob->pt_root_page = vmw_piter_dma_addr(&iter);
        } else {
                ret = vmw_mob_pt_populate(dev_priv, mob);
                if (unlikely(ret != 0))
@@ -623,9 +620,6 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
        if (likely(num_data_pages == 1)) {
                mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
                mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
-       } else if (vsgt->num_regions == 1) {
-               mob->pt_level = SVGA3D_MOBFMT_RANGE;
-               mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
        } else if (unlikely(mob->pt_bo == NULL)) {
                ret = vmw_mob_pt_populate(dev_priv, mob);
                if (unlikely(ret != 0))
index 20f752f..b84ecc6 100644 (file)
@@ -288,8 +288,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
 {
        struct vmw_private *dev_priv = vmw_tt->dev_priv;
        struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
-       struct vmw_piter iter;
-       dma_addr_t old;
        int ret = 0;
 
        if (vmw_tt->mapped)
@@ -321,16 +319,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
                break;
        }
 
-       old = ~((dma_addr_t) 0);
-       vmw_tt->vsgt.num_regions = 0;
-       for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
-               dma_addr_t cur = vmw_piter_dma_addr(&iter);
-
-               if (cur != old + PAGE_SIZE)
-                       vmw_tt->vsgt.num_regions++;
-               old = cur;
-       }
-
        vmw_tt->mapped = true;
        return 0;