drm/ttm: add bulk move function on LRU
authorHuang Rui <ray.huang@amd.com>
Mon, 6 Aug 2018 09:28:35 +0000 (17:28 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 27 Aug 2018 16:11:21 +0000 (11:11 -0500)
This function allow us to bulk move a group of BOs to the tail of their LRU.
The positions of group of BOs are stored on the (first, last) bulk_move_pos
structure.

Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Tested-by: Mike Lothian <mike@fireburn.co.uk>
Tested-by: Dieter Nützel <Dieter@nuetzel-hh.de>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/ttm/ttm_bo.c
include/drm/ttm/ttm_bo_api.h

index 7117b6b..39d9d55 100644 (file)
@@ -247,6 +247,58 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
 }
 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
 
+static void ttm_bo_bulk_move_helper(struct ttm_lru_bulk_move_pos *pos,
+                                   struct list_head *lru, bool is_swap)
+{
+       struct list_head entries, before;
+       struct list_head *list1, *list2;
+
+       list1 = is_swap ? &pos->last->swap : &pos->last->lru;
+       list2 = is_swap ? pos->first->swap.prev : pos->first->lru.prev;
+
+       list_cut_position(&entries, lru, list1);
+       list_cut_position(&before, &entries, list2);
+       list_splice(&before, lru);
+       list_splice_tail(&entries, lru);
+}
+
+void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
+{
+       unsigned i;
+
+       for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
+               struct ttm_mem_type_manager *man;
+
+               if (!bulk->tt[i].first)
+                       continue;
+
+               man = &bulk->tt[i].first->bdev->man[TTM_PL_TT];
+               ttm_bo_bulk_move_helper(&bulk->tt[i], &man->lru[i], false);
+       }
+
+       for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
+               struct ttm_mem_type_manager *man;
+
+               if (!bulk->vram[i].first)
+                       continue;
+
+               man = &bulk->vram[i].first->bdev->man[TTM_PL_VRAM];
+               ttm_bo_bulk_move_helper(&bulk->vram[i], &man->lru[i], false);
+       }
+
+       for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
+               struct ttm_lru_bulk_move_pos *pos = &bulk->swap[i];
+               struct list_head *lru;
+
+               if (!pos->first)
+                       continue;
+
+               lru = &pos->first->bdev->glob->swap_lru[i];
+               ttm_bo_bulk_move_helper(&bulk->swap[i], lru, true);
+       }
+}
+EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
+
 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
                                  struct ttm_mem_reg *mem, bool evict,
                                  struct ttm_operation_ctx *ctx)
index 0d4eb81..8c19470 100644 (file)
@@ -417,6 +417,16 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
                             struct ttm_lru_bulk_move *bulk);
 
 /**
+ * ttm_bo_bulk_move_lru_tail
+ *
+ * @bulk: bulk move structure
+ *
+ * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that
+ * BO order never changes. Should be called with ttm_bo_global::lru_lock held.
+ */
+void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk);
+
+/**
  * ttm_bo_lock_delayed_workqueue
  *
  * Prevent the delayed workqueue from running.