drm/vmwgfx: Assign eviction priorities to resources
authorThomas Hellstrom <thellstrom@vmware.com>
Mon, 4 Mar 2019 18:37:40 +0000 (19:37 +0100)
committerThomas Hellstrom <thellstrom@vmware.com>
Thu, 15 Aug 2019 06:40:05 +0000 (08:40 +0200)
TTM provides a means to assign eviction priorities to buffer object. This
means that all buffer objects with a lower priority will be evicted first
on memory pressure.
Use this to make sure surfaces and in particular non-dirty surfaces are
evicted first. Evicting in particular shaders, cotables and contexts imply
a significant performance hit on vmwgfx, so make sure these resources are
evicted last.
Some buffer objects are sub-allocated in user-space which means we can have
many resources attached to a single buffer object or resource. In that case
the buffer object is given the highest priority of the attached resources.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Deepak Rawat <drawat@vmware.com>
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
drivers/gpu/drm/vmwgfx/vmwgfx_context.c
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c

index 0f04e58..0d9a1d4 100644 (file)
@@ -509,6 +509,8 @@ int vmw_bo_init(struct vmw_private *dev_priv,
 
        acc_size = vmw_bo_acc_size(dev_priv, size, user);
        memset(vmw_bo, 0, sizeof(*vmw_bo));
+       BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
+       vmw_bo->base.priority = 3;
 
        INIT_LIST_HEAD(&vmw_bo->res_list);
 
index 63f1110..a56c9d8 100644 (file)
@@ -88,6 +88,8 @@ static const struct vmw_res_func vmw_gb_context_func = {
        .res_type = vmw_res_context,
        .needs_backup = true,
        .may_evict = true,
+       .prio = 3,
+       .dirty_prio = 3,
        .type_name = "guest backed contexts",
        .backup_placement = &vmw_mob_placement,
        .create = vmw_gb_context_create,
@@ -100,6 +102,8 @@ static const struct vmw_res_func vmw_dx_context_func = {
        .res_type = vmw_res_dx_context,
        .needs_backup = true,
        .may_evict = true,
+       .prio = 3,
+       .dirty_prio = 3,
        .type_name = "dx contexts",
        .backup_placement = &vmw_mob_placement,
        .create = vmw_dx_context_create,
index 7984f17..4e8df76 100644 (file)
@@ -116,6 +116,8 @@ static const struct vmw_res_func vmw_cotable_func = {
        .res_type = vmw_res_cotable,
        .needs_backup = true,
        .may_evict = true,
+       .prio = 3,
+       .dirty_prio = 3,
        .type_name = "context guest backed object tables",
        .backup_placement = &vmw_mob_placement,
        .create = vmw_cotable_create,
@@ -307,7 +309,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
        struct ttm_buffer_object *bo = val_buf->bo;
        struct vmw_fence_obj *fence;
 
-       if (list_empty(&res->mob_head))
+       if (!vmw_resource_mob_attached(res))
                return 0;
 
        WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
@@ -453,6 +455,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
                goto out_wait;
        }
 
+       vmw_resource_mob_detach(res);
        res->backup = buf;
        res->backup_size = new_size;
        vcotbl->size_read_back = cur_size_read_back;
@@ -467,12 +470,12 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
                res->backup = old_buf;
                res->backup_size = old_size;
                vcotbl->size_read_back = old_size_read_back;
+               vmw_resource_mob_attach(res);
                goto out_wait;
        }
 
+       vmw_resource_mob_attach(res);
        /* Let go of the old mob. */
-       list_del(&res->mob_head);
-       list_add_tail(&res->mob_head, &buf->res_list);
        vmw_bo_unreference(&old_buf);
        res->id = vcotbl->type;
 
@@ -496,7 +499,7 @@ out_wait:
  * is called before bind() in the validation sequence is instead used for two
  * things.
  * 1) Unscrub the cotable if it is scrubbed and still attached to a backup
- *    buffer, that is, if @res->mob_head is non-empty.
+ *    buffer.
  * 2) Resize the cotable if needed.
  */
 static int vmw_cotable_create(struct vmw_resource *res)
@@ -512,7 +515,7 @@ static int vmw_cotable_create(struct vmw_resource *res)
                new_size *= 2;
 
        if (likely(new_size <= res->backup_size)) {
-               if (vcotbl->scrubbed && !list_empty(&res->mob_head)) {
+               if (vcotbl->scrubbed && vmw_resource_mob_attached(res)) {
                        ret = vmw_cotable_unscrub(res);
                        if (ret)
                                return ret;
index bec80e6..5eb73de 100644 (file)
@@ -97,6 +97,15 @@ struct vmw_fpriv {
        bool gb_aware; /* user-space is guest-backed aware */
 };
 
+/**
+ * struct vmw_buffer_object - TTM buffer object with vmwgfx additions
+ * @base: The TTM buffer object
+ * @res_list: List of resources using this buffer object as a backing MOB
+ * @pin_count: pin depth
+ * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
+ * @map: Kmap object for semi-persistent mappings
+ * @res_prios: Eviction priority counts for attached resources
+ */
 struct vmw_buffer_object {
        struct ttm_buffer_object base;
        struct list_head res_list;
@@ -105,6 +114,7 @@ struct vmw_buffer_object {
        struct vmw_resource *dx_query_ctx;
        /* Protected by reservation */
        struct ttm_bo_kmap_obj map;
+       u32 res_prios[TTM_MAX_BO_PRIORITY];
 };
 
 /**
@@ -156,6 +166,7 @@ struct vmw_resource {
        struct kref kref;
        struct vmw_private *dev_priv;
        int id;
+       u32 used_prio;
        unsigned long backup_size;
        bool res_dirty;
        bool backup_dirty;
@@ -703,6 +714,19 @@ extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
 extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
 extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
 extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
+void vmw_resource_mob_attach(struct vmw_resource *res);
+void vmw_resource_mob_detach(struct vmw_resource *res);
+
+/**
+ * vmw_resource_mob_attached - Whether a resource currently has a mob attached
+ * @res: The resource
+ *
+ * Return: true if the resource has a mob attached, false otherwise.
+ */
+static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
+{
+       return !list_empty(&res->mob_head);
+}
 
 /**
  * vmw_user_resource_noref_release - release a user resource pointer looked up
@@ -781,6 +805,54 @@ static inline void vmw_user_bo_noref_release(void)
        ttm_base_object_noref_release();
 }
 
+/**
+ * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
+ * according to attached resources
+ * @vbo: The struct vmw_buffer_object
+ */
+static inline void vmw_bo_prio_adjust(struct vmw_buffer_object *vbo)
+{
+       int i = ARRAY_SIZE(vbo->res_prios);
+
+       while (i--) {
+               if (vbo->res_prios[i]) {
+                       vbo->base.priority = i;
+                       return;
+               }
+       }
+
+       vbo->base.priority = 3;
+}
+
+/**
+ * vmw_bo_prio_add - Notify a buffer object of a newly attached resource
+ * eviction priority
+ * @vbo: The struct vmw_buffer_object
+ * @prio: The resource priority
+ *
+ * After being notified, the code assigns the highest resource eviction priority
+ * to the backing buffer object (mob).
+ */
+static inline void vmw_bo_prio_add(struct vmw_buffer_object *vbo, int prio)
+{
+       if (vbo->res_prios[prio]++ == 0)
+               vmw_bo_prio_adjust(vbo);
+}
+
+/**
+ * vmw_bo_prio_del - Notify a buffer object of a resource with a certain
+ * priority being removed
+ * @vbo: The struct vmw_buffer_object
+ * @prio: The resource priority
+ *
+ * After being notified, the code assigns the highest resource eviction priority
+ * to the backing buffer object (mob).
+ */
+static inline void vmw_bo_prio_del(struct vmw_buffer_object *vbo, int prio)
+{
+       if (--vbo->res_prios[prio] == 0)
+               vmw_bo_prio_adjust(vbo);
+}
 
 /**
  * Misc Ioctl functionality - vmwgfx_ioctl.c
index 7994b81..2eb3532 100644 (file)
 
 #define VMW_RES_EVICT_ERR_COUNT 10
 
+/**
+ * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
+ * @res: The resource
+ */
+void vmw_resource_mob_attach(struct vmw_resource *res)
+{
+       struct vmw_buffer_object *backup = res->backup;
+
+       reservation_object_assert_held(backup->base.base.resv);
+       res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
+               res->func->prio;
+       list_add_tail(&res->mob_head, &backup->res_list);
+       vmw_bo_prio_add(backup, res->used_prio);
+}
+
+/**
+ * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
+ * @res: The resource
+ */
+void vmw_resource_mob_detach(struct vmw_resource *res)
+{
+       struct vmw_buffer_object *backup = res->backup;
+
+       reservation_object_assert_held(backup->base.base.resv);
+       if (vmw_resource_mob_attached(res)) {
+               list_del_init(&res->mob_head);
+               vmw_bo_prio_del(backup, res->used_prio);
+       }
+}
+
 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
 {
        kref_get(&res->kref);
@@ -79,7 +109,7 @@ static void vmw_resource_release(struct kref *kref)
                struct ttm_buffer_object *bo = &res->backup->base;
 
                ttm_bo_reserve(bo, false, false, NULL);
-               if (!list_empty(&res->mob_head) &&
+               if (vmw_resource_mob_attached(res) &&
                    res->func->unbind != NULL) {
                        struct ttm_validate_buffer val_buf;
 
@@ -88,7 +118,7 @@ static void vmw_resource_release(struct kref *kref)
                        res->func->unbind(res, false, &val_buf);
                }
                res->backup_dirty = false;
-               list_del_init(&res->mob_head);
+               vmw_resource_mob_detach(res);
                ttm_bo_unreserve(bo);
                vmw_bo_unreference(&res->backup);
        }
@@ -178,6 +208,7 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
        res->backup_offset = 0;
        res->backup_dirty = false;
        res->res_dirty = false;
+       res->used_prio = 3;
        if (delay_id)
                return 0;
        else
@@ -354,14 +385,14 @@ static int vmw_resource_do_validate(struct vmw_resource *res,
        }
 
        if (func->bind &&
-           ((func->needs_backup && list_empty(&res->mob_head) &&
+           ((func->needs_backup && !vmw_resource_mob_attached(res) &&
              val_buf->bo != NULL) ||
             (!func->needs_backup && val_buf->bo != NULL))) {
                ret = func->bind(res, val_buf);
                if (unlikely(ret != 0))
                        goto out_bind_failed;
                if (func->needs_backup)
-                       list_add_tail(&res->mob_head, &res->backup->res_list);
+                       vmw_resource_mob_attach(res);
        }
 
        return 0;
@@ -401,15 +432,13 @@ void vmw_resource_unreserve(struct vmw_resource *res,
 
        if (switch_backup && new_backup != res->backup) {
                if (res->backup) {
-                       reservation_object_assert_held(res->backup->base.base.resv);
-                       list_del_init(&res->mob_head);
+                       vmw_resource_mob_detach(res);
                        vmw_bo_unreference(&res->backup);
                }
 
                if (new_backup) {
                        res->backup = vmw_bo_reference(new_backup);
-                       reservation_object_assert_held(new_backup->base.base.resv);
-                       list_add_tail(&res->mob_head, &new_backup->res_list);
+                       vmw_resource_mob_attach(res);
                } else {
                        res->backup = NULL;
                }
@@ -468,7 +497,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
        if (unlikely(ret != 0))
                goto out_no_reserve;
 
-       if (res->func->needs_backup && list_empty(&res->mob_head))
+       if (res->func->needs_backup && !vmw_resource_mob_attached(res))
                return 0;
 
        backup_dirty = res->backup_dirty;
@@ -573,11 +602,11 @@ static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
                return ret;
 
        if (unlikely(func->unbind != NULL &&
-                    (!func->needs_backup || !list_empty(&res->mob_head)))) {
+                    (!func->needs_backup || vmw_resource_mob_attached(res)))) {
                ret = func->unbind(res, res->res_dirty, &val_buf);
                if (unlikely(ret != 0))
                        goto out_no_unbind;
-               list_del_init(&res->mob_head);
+               vmw_resource_mob_detach(res);
        }
        ret = func->destroy(res);
        res->backup_dirty = true;
@@ -659,7 +688,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr)
        if (unlikely(ret != 0))
                goto out_no_validate;
        else if (!res->func->needs_backup && res->backup) {
-               list_del_init(&res->mob_head);
+               WARN_ON_ONCE(vmw_resource_mob_attached(res));
                vmw_bo_unreference(&res->backup);
        }
 
@@ -698,7 +727,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
                (void) res->func->unbind(res, res->res_dirty, &val_buf);
                res->backup_dirty = true;
                res->res_dirty = false;
-               list_del_init(&res->mob_head);
+               vmw_resource_mob_detach(res);
        }
 
        (void) ttm_bo_wait(&vbo->base, false, false);
index 7e19eba..984e588 100644 (file)
@@ -78,6 +78,8 @@ struct vmw_res_func {
        const char *type_name;
        struct ttm_placement *backup_placement;
        bool may_evict;
+       u32 prio;
+       u32 dirty_prio;
 
        int (*create) (struct vmw_resource *res);
        int (*destroy) (struct vmw_resource *res);
index d310d21..e139fdf 100644 (file)
@@ -95,6 +95,8 @@ static const struct vmw_res_func vmw_gb_shader_func = {
        .res_type = vmw_res_shader,
        .needs_backup = true,
        .may_evict = true,
+       .prio = 3,
+       .dirty_prio = 3,
        .type_name = "guest backed shaders",
        .backup_placement = &vmw_mob_placement,
        .create = vmw_gb_shader_create,
@@ -106,7 +108,9 @@ static const struct vmw_res_func vmw_gb_shader_func = {
 static const struct vmw_res_func vmw_dx_shader_func = {
        .res_type = vmw_res_shader,
        .needs_backup = true,
-       .may_evict = false,
+       .may_evict = true,
+       .prio = 3,
+       .dirty_prio = 3,
        .type_name = "dx shaders",
        .backup_placement = &vmw_mob_placement,
        .create = vmw_dx_shader_create,
@@ -423,7 +427,7 @@ static int vmw_dx_shader_create(struct vmw_resource *res)
 
        WARN_ON_ONCE(!shader->committed);
 
-       if (!list_empty(&res->mob_head)) {
+       if (vmw_resource_mob_attached(res)) {
                mutex_lock(&dev_priv->binding_mutex);
                ret = vmw_dx_shader_unscrub(res);
                mutex_unlock(&dev_priv->binding_mutex);
index 739d931..29d8794 100644 (file)
@@ -112,6 +112,8 @@ static const struct vmw_res_func vmw_legacy_surface_func = {
        .res_type = vmw_res_surface,
        .needs_backup = false,
        .may_evict = true,
+       .prio = 1,
+       .dirty_prio = 1,
        .type_name = "legacy surfaces",
        .backup_placement = &vmw_srf_placement,
        .create = &vmw_legacy_srf_create,
@@ -124,6 +126,8 @@ static const struct vmw_res_func vmw_gb_surface_func = {
        .res_type = vmw_res_surface,
        .needs_backup = true,
        .may_evict = true,
+       .prio = 1,
+       .dirty_prio = 2,
        .type_name = "guest backed surfaces",
        .backup_placement = &vmw_mob_placement,
        .create = vmw_gb_surface_create,