From ffe96a1f0d3d49d8f3d5eb0b8e64b5d8b39247dc Mon Sep 17 00:00:00 2001 From: Tao Zeng Date: Mon, 18 Feb 2019 14:53:09 +0800 Subject: [PATCH] mm: reclaim for unevictable cma pages [1/1] PD#SWPL-3902 Problem: If cma page is unevictable, migrate it will cost long time. Solution: 1. Recalim unevictable cma file cache pages. 2. Using CMA after first water mark not ok. Verify: einstern Change-Id: I0ecbf5dd535cb034430c4ea623891e7a7ae6e4dd Signed-off-by: Tao Zeng --- drivers/amlogic/memory_ext/aml_cma.c | 11 +++++++++-- include/linux/amlogic/aml_cma.h | 1 + mm/cma.c | 5 ++++- mm/migrate.c | 2 +- mm/page_alloc.c | 13 +++++++++++++ mm/vmscan.c | 3 +++ 6 files changed, 31 insertions(+), 4 deletions(-) diff --git a/drivers/amlogic/memory_ext/aml_cma.c b/drivers/amlogic/memory_ext/aml_cma.c index 7f8b96c..563cfb7 100644 --- a/drivers/amlogic/memory_ext/aml_cma.c +++ b/drivers/amlogic/memory_ext/aml_cma.c @@ -101,6 +101,7 @@ EXPORT_SYMBOL(cma_page_count_update); #define RESTRIC_ANON 0 #define ANON_RATIO 60 +bool cma_first_wm_low __read_mostly; bool can_use_cma(gfp_t gfp_flags) { @@ -108,6 +109,9 @@ bool can_use_cma(gfp_t gfp_flags) unsigned long anon_cma; #endif /* RESTRIC_ANON */ + if (unlikely(!cma_first_wm_low)) + return false; + if (cma_forbidden_mask(gfp_flags)) return false; @@ -715,14 +719,17 @@ EXPORT_SYMBOL(aml_cma_free); void show_page(struct page *page) { unsigned long trace = 0; + unsigned long map_flag = -1UL; if (!page) return; #ifdef CONFIG_AMLOGIC_PAGE_TRACE trace = get_page_trace(page); #endif - pr_info("page:%lx, map:%p, f:%lx, m:%d, c:%d, f:%pf\n", - page_to_pfn(page), page->mapping, + if (page->mapping && !((unsigned long)page->mapping & 0x3)) + map_flag = page->mapping->flags; + pr_info("page:%lx, map:%p, mf:%lx, pf:%lx, m:%d, c:%d, f:%pf\n", + page_to_pfn(page), page->mapping, map_flag, page->flags & 0xffffffff, page_mapcount(page), page_count(page), (void *)trace); diff --git a/include/linux/amlogic/aml_cma.h b/include/linux/amlogic/aml_cma.h index 621acac..c472722 100644 --- a/include/linux/amlogic/aml_cma.h +++ b/include/linux/amlogic/aml_cma.h @@ -74,6 +74,7 @@ extern bool cma_page(struct page *page); extern unsigned long get_cma_allocated(void); extern unsigned long get_total_cmapages(void); extern spinlock_t cma_iso_lock; +extern bool cma_first_wm_low; extern int cma_debug_level; extern int aml_cma_alloc_range(unsigned long start, unsigned long end); diff --git a/mm/cma.c b/mm/cma.c index 59fda4c..c840830 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -499,6 +499,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) int ret; #ifdef CONFIG_AMLOGIC_CMA int dummy; + unsigned long long tick; #endif /* CONFIG_AMLOGIC_CMA */ if (!cma || !cma->count) @@ -508,6 +509,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) count, align); #ifdef CONFIG_AMLOGIC_CMA + tick = sched_clock(); cma_debug(0, NULL, "(cma %p, count %zu, align %d)\n", (void *)cma, count, align); #endif @@ -572,7 +574,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) #ifdef CONFIG_AMLOGIC_CMA aml_cma_alloc_post_hook(&dummy, count, page); - cma_debug(0, NULL, "return %p\n", page); + cma_debug(0, NULL, "return page:%lx, tick:%lld\n", + page ? page_to_pfn(page) : 0, sched_clock() - tick); #endif /* CONFIG_AMLOGIC_CMA */ pr_debug("%s(): returned %p\n", __func__, page); return page; diff --git a/mm/migrate.c b/mm/migrate.c index 0512185..a943ef4 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -445,7 +445,7 @@ int migrate_page_move_mapping(struct address_space *mapping, radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { spin_unlock_irq(&mapping->tree_lock); #ifdef CONFIG_AMLOGIC_CMA - cma_debug(2, page, " anon page cnt miss match, e:%d, p:%d\n", + cma_debug(2, page, " file page cnt miss match, e:%d, p:%d\n", expected_count, page_has_private(page)); #endif return -EAGAIN; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d30cb0c..7118cf8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3007,8 +3007,21 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, * are not met, then a high-order request also cannot go ahead * even if a suitable page happened to be free. */ +#ifdef CONFIG_AMLOGIC_CMA + if (free_pages <= min + z->lowmem_reserve[classzone_idx]) { + /* do not using cma until water mark is low */ + if (unlikely(!cma_first_wm_low && free_pages > 0)) { + cma_first_wm_low = true; + pr_info("Now can use cma, free:%ld, wm:%ld\n", + free_pages, + min + z->lowmem_reserve[classzone_idx]); + } + return false; + } +#else if (free_pages <= min + z->lowmem_reserve[classzone_idx]) return false; +#endif /* If this is an order-0 request then the watermark is fine */ if (!order) diff --git a/mm/vmscan.c b/mm/vmscan.c index 5d86ff1..1efc1f9 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1315,6 +1315,9 @@ static int filecache_need_migrate(struct page *page) if (!PageActive(page) && page_mapcount(page) >= INACTIVE_MIGRATE) return 1; + if (PageUnevictable(page)) + return 0; + return 0; } #endif -- 2.7.4