From 54213b948bfd2f29f3f5e60504fae4509946ea85 Mon Sep 17 00:00:00 2001 From: tao zeng Date: Tue, 27 Nov 2018 12:26:53 +0800 Subject: [PATCH] mm: add cma debug interafce [1/1] PD#GH-17 Problem: sometimes cma allocation failed but it's hard to get failed log and need recompile code. Solution: Add /proc/cma_debug to open cma debug message dynamic. By default, no debug message is printed. If you write a value large than 0 to this node, debug message will be printed. Verify: p212 Change-Id: Ibcfd1d48be5f33f674f09df713dc2e493748c405 Signed-off-by: tao zeng --- drivers/amlogic/memory_ext/aml_cma.c | 115 +++++++++++++++++++++++------------ include/linux/amlogic/aml_cma.h | 12 ++++ mm/cma.c | 5 ++ mm/compaction.c | 79 +++++++++++++++++++++++- mm/migrate.c | 25 ++++++++ 5 files changed, 195 insertions(+), 41 deletions(-) diff --git a/drivers/amlogic/memory_ext/aml_cma.c b/drivers/amlogic/memory_ext/aml_cma.c index 1eabadb..7f8b96c 100644 --- a/drivers/amlogic/memory_ext/aml_cma.c +++ b/drivers/amlogic/memory_ext/aml_cma.c @@ -30,11 +30,14 @@ #include #include #include +#include #include #ifdef CONFIG_AMLOGIC_PAGE_TRACE #include #endif /* CONFIG_AMLOGIC_PAGE_TRACE */ +#define MAX_DEBUG_LEVEL 5 + struct work_cma { struct list_head list; unsigned long pfn; @@ -52,6 +55,8 @@ struct cma_pcp { static bool can_boost; static DEFINE_PER_CPU(struct cma_pcp, cma_pcp_thread); +static struct proc_dir_entry *dentry; +int cma_debug_level; DEFINE_SPINLOCK(cma_iso_lock); static atomic_t cma_allocate; @@ -261,6 +266,7 @@ static int aml_alloc_contig_migrate_range(struct compact_control *cc, pfn = isolate_migratepages_range(cc, pfn, end); if (!pfn) { ret = -EINTR; + cma_debug(1, NULL, " iso migrate page fail\n"); break; } tries = 0; @@ -336,7 +342,7 @@ static int cma_boost_work_func(void *cma_data) drain_local_pages(NULL); } if (ret) - pr_debug("%s, failed, ret:%d\n", __func__, ret); + cma_debug(1, NULL, "failed, ret:%d\n", ret); next: complete(&c_work->end); if (kthread_should_stop()) { @@ -441,34 +447,6 @@ int cma_alloc_contig_boost(unsigned long start_pfn, unsigned long count) return ret; } -/* - * Some of these functions are implemented from page_isolate.c - */ -static bool can_free_list_page(struct page *page, struct list_head *list) -{ -#if 0 - unsigned long flags; - bool ret = false; - - if (!spin_trylock_irqsave(&cma_iso_lock, flags)) - return ret; - - if (!(page->flags & PAGE_FLAGS_CHECK_AT_FREE) && - !PageSwapBacked(page) && - (page->lru.next != LIST_POISON1)) { - if (list_empty(&page->lru)) - list_add(&page->lru, list); - else - list_move(&page->lru, list); - ret = true; - } - spin_unlock_irqrestore(&cma_iso_lock, flags); - return ret; -#else - return false; -#endif -} - static int __aml_check_pageblock_isolate(unsigned long pfn, unsigned long end_pfn, bool skip_hwpoisoned_pages, @@ -496,13 +474,7 @@ static int __aml_check_pageblock_isolate(unsigned long pfn, */ pfn++; } else { - /* This page can be freed ? */ - if (!page_count(page)) { - if (can_free_list_page(page, list)) { - pfn++; - continue; - } - } + cma_debug(1, page, " isolate failed\n"); break; } } @@ -581,11 +553,14 @@ int aml_cma_alloc_range(unsigned long start, unsigned long end) }; INIT_LIST_HEAD(&cc.migratepages); + cma_debug(0, NULL, " range [%lx-%lx]\n", start, end); ret = start_isolate_page_range(get_align_pfn_low(start), get_align_pfn_high(end), MIGRATE_CMA, false); - if (ret) + if (ret) { + cma_debug(1, NULL, "ret:%d\n", ret); return ret; + } try_again: /* @@ -600,8 +575,10 @@ try_again: } else ret = aml_alloc_contig_migrate_range(&cc, start, end, 0); - if (ret && ret != -EBUSY) + if (ret && ret != -EBUSY) { + cma_debug(1, NULL, "ret:%d\n", ret); goto done; + } ret = 0; if (!boost_ok) { @@ -633,8 +610,8 @@ try_again: /* Make sure the range is really isolated. */ if (aml_check_pages_isolated(outer_start, end, false)) { - pr_debug("%s check_pages_isolated(%lx, %lx) failed\n", - __func__, outer_start, end); + cma_debug(1, NULL, "check page isolate(%lx, %lx) failed\n", + outer_start, end); try_times++; if (try_times < 10) goto try_again; @@ -650,6 +627,8 @@ try_again: pr_info("cma_alloc [%lx-%lx] aborted\n", start, end); } else ret = -EBUSY; + cma_debug(1, NULL, "iso free range(%lx, %lx) failed\n", + outer_start, end); goto done; } @@ -733,11 +712,67 @@ void aml_cma_free(unsigned long pfn, unsigned int nr_pages) } EXPORT_SYMBOL(aml_cma_free); +void show_page(struct page *page) +{ + unsigned long trace = 0; + + if (!page) + return; +#ifdef CONFIG_AMLOGIC_PAGE_TRACE + trace = get_page_trace(page); +#endif + pr_info("page:%lx, map:%p, f:%lx, m:%d, c:%d, f:%pf\n", + page_to_pfn(page), page->mapping, + page->flags & 0xffffffff, + page_mapcount(page), page_count(page), + (void *)trace); +} + +static int cma_debug_show(struct seq_file *m, void *arg) +{ + seq_printf(m, "level=%d\n", cma_debug_level); + return 0; +} + +static ssize_t cma_debug_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) +{ + int arg = 0; + + if (kstrtoint_from_user(buffer, count, 10, &arg)) + return -EINVAL; + + if (arg > MAX_DEBUG_LEVEL) + return -EINVAL; + + cma_debug_level = arg; + return count; +} + +static int cma_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, cma_debug_show, NULL); +} + +static const struct file_operations cma_dbg_file_ops = { + .open = cma_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .write = cma_debug_write, + .release = single_release, +}; + static int __init aml_cma_init(void) { atomic_set(&cma_allocate, 0); atomic_long_set(&nr_cma_allocated, 0); + dentry = proc_create("cma_debug", 0644, NULL, &cma_dbg_file_ops); + if (IS_ERR_OR_NULL(dentry)) { + pr_err("%s, create sysfs failed\n", __func__); + return -1; + } + return 0; } arch_initcall(aml_cma_init); diff --git a/include/linux/amlogic/aml_cma.h b/include/linux/amlogic/aml_cma.h index 4acb96f..621acac 100644 --- a/include/linux/amlogic/aml_cma.h +++ b/include/linux/amlogic/aml_cma.h @@ -74,6 +74,7 @@ extern bool cma_page(struct page *page); extern unsigned long get_cma_allocated(void); extern unsigned long get_total_cmapages(void); extern spinlock_t cma_iso_lock; +extern int cma_debug_level; extern int aml_cma_alloc_range(unsigned long start, unsigned long end); extern void aml_cma_free(unsigned long pfn, unsigned int nr_pages); @@ -81,6 +82,7 @@ extern void aml_cma_free(unsigned long pfn, unsigned int nr_pages); extern unsigned long reclaim_clean_pages_from_list(struct zone *zone, struct list_head *page_list); +extern void show_page(struct page *page); unsigned long isolate_freepages_range(struct compact_control *cc, unsigned long start_pfn, unsigned long end_pfn); @@ -91,4 +93,14 @@ isolate_migratepages_range(struct compact_control *cc, struct page *compaction_cma_alloc(struct page *migratepage, unsigned long data, int **result); + +#define cma_debug(l, p, format, args...) \ + { \ + if (l < cma_debug_level) { \ + show_page(p); \ + pr_info("%s, %d "format, __func__, __LINE__, ##args); \ + } \ + } + + #endif /* __AMLOGIC_CMA_H__ */ diff --git a/mm/cma.c b/mm/cma.c index 23d978d..59fda4c 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -507,6 +507,10 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma, count, align); +#ifdef CONFIG_AMLOGIC_CMA + cma_debug(0, NULL, "(cma %p, count %zu, align %d)\n", + (void *)cma, count, align); +#endif if (!count) return NULL; @@ -568,6 +572,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) #ifdef CONFIG_AMLOGIC_CMA aml_cma_alloc_post_hook(&dummy, count, page); + cma_debug(0, NULL, "return %p\n", page); #endif /* CONFIG_AMLOGIC_CMA */ pr_debug("%s(): returned %p\n", __func__, page); return page; diff --git a/mm/compaction.c b/mm/compaction.c index e35f491..665760f 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -434,14 +434,34 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, * contention, to give chance to IRQs. Abort if fatal signal * pending or async compaction detects need_resched() */ + #ifdef CONFIG_AMLOGIC_CMA + if (!(blockpfn % SWAP_CLUSTER_MAX) + && compact_unlock_should_abort(&cc->zone->lock, flags, + &locked, cc)) { + if (strict) + cma_debug(1, page, "abort, blk:%lx, swap:%ld\n", + blockpfn, SWAP_CLUSTER_MAX); + break; + } + #else if (!(blockpfn % SWAP_CLUSTER_MAX) && compact_unlock_should_abort(&cc->zone->lock, flags, &locked, cc)) break; + #endif nr_scanned++; + #ifdef CONFIG_AMLOGIC_CMA + if (!pfn_valid_within(blockpfn)) { + if (strict) + cma_debug(1, page, "invalid pfn:%lx\n", + blockpfn); + goto isolate_fail; + } + #else if (!pfn_valid_within(blockpfn)) goto isolate_fail; + #endif if (!valid_page) valid_page = page; @@ -459,12 +479,25 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, blockpfn += (1UL << comp_order) - 1; cursor += (1UL << comp_order) - 1; } - + #ifdef CONFIG_AMLOGIC_CMA + if (strict) + cma_debug(1, page, "compound page:%lx\n", + page_to_pfn(page)); + #endif goto isolate_fail; } + #ifdef CONFIG_AMLOGIC_CMA + if (!PageBuddy(page)) { + if (strict) + cma_debug(1, page, " NO buddy page1:%lx\n", + page_to_pfn(page)); + goto isolate_fail; + } + #else if (!PageBuddy(page)) goto isolate_fail; + #endif /* * If we already hold the lock, we can skip some rechecking. @@ -484,19 +517,44 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, */ locked = compact_trylock_irqsave(&cc->zone->lock, &flags, cc); + #ifdef CONFIG_AMLOGIC_CMA + if (!locked) { + if (strict) + cma_debug(1, page, " lock failed:%lx\n", + page_to_pfn(page)); + break; + } + /* Recheck this is a buddy page under lock */ + if (!PageBuddy(page)) { + if (strict) + cma_debug(1, page, " No buddy2:%lx\n", + page_to_pfn(page)); + goto isolate_fail; + } + #else if (!locked) break; /* Recheck this is a buddy page under lock */ if (!PageBuddy(page)) goto isolate_fail; + #endif } /* Found a free page, will break it into order-0 pages */ order = page_order(page); isolated = __isolate_free_page(page, order); + #ifdef CONFIG_AMLOGIC_CMA + if (!isolated) { + if (strict) + cma_debug(1, page, "iso free fail:%lx, o:%d\n", + page_to_pfn(page), order); + break; + } + #else if (!isolated) break; + #endif set_page_private(page, order); total_isolated += isolated; @@ -599,9 +657,17 @@ isolate_freepages_range(struct compact_control *cc, block_end_pfn = min(block_end_pfn, end_pfn); } + #ifdef CONFIG_AMLOGIC_CMA + if (!pageblock_pfn_to_page(block_start_pfn, + block_end_pfn, cc->zone)) { + cma_debug(1, NULL, " no page block\n"); + break; + } + #else if (!pageblock_pfn_to_page(block_start_pfn, block_end_pfn, cc->zone)) break; + #endif isolated = isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn, &freelist, true); @@ -611,8 +677,15 @@ isolate_freepages_range(struct compact_control *cc, * there are any holes in the block (ie. invalid PFNs or * non-free pages). */ + #ifdef CONFIG_AMLOGIC_CMA + if (!isolated) { + cma_debug(1, NULL, " isolate free page failed\n"); + break; + } + #else if (!isolated) break; + #endif /* * If we managed to isolate pages, it is always (1 << n) * @@ -626,6 +699,10 @@ isolate_freepages_range(struct compact_control *cc, if (pfn < end_pfn) { /* Loop terminated early, cleanup. */ + #ifdef CONFIG_AMLOGIC_CMA + cma_debug(1, NULL, "pfn:%lx, end:%lx, start:%lx\n", + pfn, end_pfn, start_pfn); + #endif release_freepages(&freelist); return 0; } diff --git a/mm/migrate.c b/mm/migrate.c index 821623f..0512185 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -415,6 +415,11 @@ int migrate_page_move_mapping(struct address_space *mapping, if (!mapping) { /* Anonymous page without mapping */ + #ifdef CONFIG_AMLOGIC_CMA + if (page_count(page) != expected_count) + cma_debug(2, page, " anon page cnt miss match, e:%d\n", + expected_count); + #endif if (page_count(page) != expected_count) return -EAGAIN; @@ -439,6 +444,10 @@ int migrate_page_move_mapping(struct address_space *mapping, if (page_count(page) != expected_count || radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { spin_unlock_irq(&mapping->tree_lock); + #ifdef CONFIG_AMLOGIC_CMA + cma_debug(2, page, " anon page cnt miss match, e:%d, p:%d\n", + expected_count, page_has_private(page)); + #endif return -EAGAIN; } @@ -1017,11 +1026,21 @@ static int __unmap_and_move(struct page *page, struct page *newpage, goto out_unlock_both; } } else if (page_mapped(page)) { + #ifdef CONFIG_AMLOGIC_CMA + int ret; + + ret = try_to_unmap(page, + TTU_MIGRATION | TTU_IGNORE_MLOCK | + TTU_IGNORE_ACCESS); + if (ret != SWAP_SUCCESS) + cma_debug(2, page, " unmap failed:%d\n", ret); + #else /* Establish migration ptes */ VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, page); try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); + #endif page_was_mapped = 1; } @@ -1341,6 +1360,9 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page, switch(rc) { case -ENOMEM: nr_failed++; + #ifdef CONFIG_AMLOGIC_CMA + cma_debug(2, page, " NO MEM\n"); + #endif goto out; case -EAGAIN: retry++; @@ -1356,6 +1378,9 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page, * retried in the next outer loop. */ nr_failed++; + #ifdef CONFIG_AMLOGIC_CMA + cma_debug(2, page, " failed:%d\n", rc); + #endif break; } } -- 2.7.4