From 2bf257d662509553ae226239e7dc1c3d00636ca6 Mon Sep 17 00:00:00 2001 From: Roger He Date: Tue, 21 Nov 2017 16:47:16 +0800 Subject: [PATCH] drm/ttm: roundup the shrink request to prevent skip huge pool MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit e.g. shrink reqeust is less than 512, the logic will skip huge pool Reviewed-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Roger He Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_page_alloc.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 337c228..116897a 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -442,17 +442,19 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) /* select start pool in round robin fashion */ for (i = 0; i < NUM_POOLS; ++i) { unsigned nr_free = shrink_pages; + unsigned page_nr; + if (shrink_pages == 0) break; pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; + page_nr = (1 << pool->order); /* OK to use static buffer since global mutex is held. */ - nr_free_pool = (nr_free >> pool->order); - if (nr_free_pool == 0) - continue; - + nr_free_pool = roundup(nr_free, page_nr) >> pool->order; shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true); - freed += ((nr_free_pool - shrink_pages) << pool->order); + freed += (nr_free_pool - shrink_pages) << pool->order; + if (freed >= sc->nr_to_scan) + break; } mutex_unlock(&lock); return freed; -- 2.7.4