#include <linux/spinlock_types.h>
#include <linux/amlogic/aml_cma.h>
#include <linux/hugetlb.h>
+#include <linux/proc_fs.h>
#include <trace/events/page_isolation.h>
#ifdef CONFIG_AMLOGIC_PAGE_TRACE
#include <linux/amlogic/page_trace.h>
#endif /* CONFIG_AMLOGIC_PAGE_TRACE */
+#define MAX_DEBUG_LEVEL 5
+
struct work_cma {
struct list_head list;
unsigned long pfn;
static bool can_boost;
static DEFINE_PER_CPU(struct cma_pcp, cma_pcp_thread);
+static struct proc_dir_entry *dentry;
+int cma_debug_level;
DEFINE_SPINLOCK(cma_iso_lock);
static atomic_t cma_allocate;
pfn = isolate_migratepages_range(cc, pfn, end);
if (!pfn) {
ret = -EINTR;
+ cma_debug(1, NULL, " iso migrate page fail\n");
break;
}
tries = 0;
drain_local_pages(NULL);
}
if (ret)
- pr_debug("%s, failed, ret:%d\n", __func__, ret);
+ cma_debug(1, NULL, "failed, ret:%d\n", ret);
next:
complete(&c_work->end);
if (kthread_should_stop()) {
return ret;
}
-/*
- * Some of these functions are implemented from page_isolate.c
- */
-static bool can_free_list_page(struct page *page, struct list_head *list)
-{
-#if 0
- unsigned long flags;
- bool ret = false;
-
- if (!spin_trylock_irqsave(&cma_iso_lock, flags))
- return ret;
-
- if (!(page->flags & PAGE_FLAGS_CHECK_AT_FREE) &&
- !PageSwapBacked(page) &&
- (page->lru.next != LIST_POISON1)) {
- if (list_empty(&page->lru))
- list_add(&page->lru, list);
- else
- list_move(&page->lru, list);
- ret = true;
- }
- spin_unlock_irqrestore(&cma_iso_lock, flags);
- return ret;
-#else
- return false;
-#endif
-}
-
static int __aml_check_pageblock_isolate(unsigned long pfn,
unsigned long end_pfn,
bool skip_hwpoisoned_pages,
*/
pfn++;
} else {
- /* This page can be freed ? */
- if (!page_count(page)) {
- if (can_free_list_page(page, list)) {
- pfn++;
- continue;
- }
- }
+ cma_debug(1, page, " isolate failed\n");
break;
}
}
};
INIT_LIST_HEAD(&cc.migratepages);
+ cma_debug(0, NULL, " range [%lx-%lx]\n", start, end);
ret = start_isolate_page_range(get_align_pfn_low(start),
get_align_pfn_high(end), MIGRATE_CMA,
false);
- if (ret)
+ if (ret) {
+ cma_debug(1, NULL, "ret:%d\n", ret);
return ret;
+ }
try_again:
/*
} else
ret = aml_alloc_contig_migrate_range(&cc, start, end, 0);
- if (ret && ret != -EBUSY)
+ if (ret && ret != -EBUSY) {
+ cma_debug(1, NULL, "ret:%d\n", ret);
goto done;
+ }
ret = 0;
if (!boost_ok) {
/* Make sure the range is really isolated. */
if (aml_check_pages_isolated(outer_start, end, false)) {
- pr_debug("%s check_pages_isolated(%lx, %lx) failed\n",
- __func__, outer_start, end);
+ cma_debug(1, NULL, "check page isolate(%lx, %lx) failed\n",
+ outer_start, end);
try_times++;
if (try_times < 10)
goto try_again;
pr_info("cma_alloc [%lx-%lx] aborted\n", start, end);
} else
ret = -EBUSY;
+ cma_debug(1, NULL, "iso free range(%lx, %lx) failed\n",
+ outer_start, end);
goto done;
}
}
EXPORT_SYMBOL(aml_cma_free);
+void show_page(struct page *page)
+{
+ unsigned long trace = 0;
+
+ if (!page)
+ return;
+#ifdef CONFIG_AMLOGIC_PAGE_TRACE
+ trace = get_page_trace(page);
+#endif
+ pr_info("page:%lx, map:%p, f:%lx, m:%d, c:%d, f:%pf\n",
+ page_to_pfn(page), page->mapping,
+ page->flags & 0xffffffff,
+ page_mapcount(page), page_count(page),
+ (void *)trace);
+}
+
+static int cma_debug_show(struct seq_file *m, void *arg)
+{
+ seq_printf(m, "level=%d\n", cma_debug_level);
+ return 0;
+}
+
+static ssize_t cma_debug_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int arg = 0;
+
+ if (kstrtoint_from_user(buffer, count, 10, &arg))
+ return -EINVAL;
+
+ if (arg > MAX_DEBUG_LEVEL)
+ return -EINVAL;
+
+ cma_debug_level = arg;
+ return count;
+}
+
+static int cma_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cma_debug_show, NULL);
+}
+
+static const struct file_operations cma_dbg_file_ops = {
+ .open = cma_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = cma_debug_write,
+ .release = single_release,
+};
+
static int __init aml_cma_init(void)
{
atomic_set(&cma_allocate, 0);
atomic_long_set(&nr_cma_allocated, 0);
+ dentry = proc_create("cma_debug", 0644, NULL, &cma_dbg_file_ops);
+ if (IS_ERR_OR_NULL(dentry)) {
+ pr_err("%s, create sysfs failed\n", __func__);
+ return -1;
+ }
+
return 0;
}
arch_initcall(aml_cma_init);
extern unsigned long get_cma_allocated(void);
extern unsigned long get_total_cmapages(void);
extern spinlock_t cma_iso_lock;
+extern int cma_debug_level;
extern int aml_cma_alloc_range(unsigned long start, unsigned long end);
extern void aml_cma_free(unsigned long pfn, unsigned int nr_pages);
extern unsigned long reclaim_clean_pages_from_list(struct zone *zone,
struct list_head *page_list);
+extern void show_page(struct page *page);
unsigned long
isolate_freepages_range(struct compact_control *cc,
unsigned long start_pfn, unsigned long end_pfn);
struct page *compaction_cma_alloc(struct page *migratepage,
unsigned long data,
int **result);
+
+#define cma_debug(l, p, format, args...) \
+ { \
+ if (l < cma_debug_level) { \
+ show_page(p); \
+ pr_info("%s, %d "format, __func__, __LINE__, ##args); \
+ } \
+ }
+
+
#endif /* __AMLOGIC_CMA_H__ */
pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
count, align);
+#ifdef CONFIG_AMLOGIC_CMA
+ cma_debug(0, NULL, "(cma %p, count %zu, align %d)\n",
+ (void *)cma, count, align);
+#endif
if (!count)
return NULL;
#ifdef CONFIG_AMLOGIC_CMA
aml_cma_alloc_post_hook(&dummy, count, page);
+ cma_debug(0, NULL, "return %p\n", page);
#endif /* CONFIG_AMLOGIC_CMA */
pr_debug("%s(): returned %p\n", __func__, page);
return page;
* contention, to give chance to IRQs. Abort if fatal signal
* pending or async compaction detects need_resched()
*/
+ #ifdef CONFIG_AMLOGIC_CMA
+ if (!(blockpfn % SWAP_CLUSTER_MAX)
+ && compact_unlock_should_abort(&cc->zone->lock, flags,
+ &locked, cc)) {
+ if (strict)
+ cma_debug(1, page, "abort, blk:%lx, swap:%ld\n",
+ blockpfn, SWAP_CLUSTER_MAX);
+ break;
+ }
+ #else
if (!(blockpfn % SWAP_CLUSTER_MAX)
&& compact_unlock_should_abort(&cc->zone->lock, flags,
&locked, cc))
break;
+ #endif
nr_scanned++;
+ #ifdef CONFIG_AMLOGIC_CMA
+ if (!pfn_valid_within(blockpfn)) {
+ if (strict)
+ cma_debug(1, page, "invalid pfn:%lx\n",
+ blockpfn);
+ goto isolate_fail;
+ }
+ #else
if (!pfn_valid_within(blockpfn))
goto isolate_fail;
+ #endif
if (!valid_page)
valid_page = page;
blockpfn += (1UL << comp_order) - 1;
cursor += (1UL << comp_order) - 1;
}
-
+ #ifdef CONFIG_AMLOGIC_CMA
+ if (strict)
+ cma_debug(1, page, "compound page:%lx\n",
+ page_to_pfn(page));
+ #endif
goto isolate_fail;
}
+ #ifdef CONFIG_AMLOGIC_CMA
+ if (!PageBuddy(page)) {
+ if (strict)
+ cma_debug(1, page, " NO buddy page1:%lx\n",
+ page_to_pfn(page));
+ goto isolate_fail;
+ }
+ #else
if (!PageBuddy(page))
goto isolate_fail;
+ #endif
/*
* If we already hold the lock, we can skip some rechecking.
*/
locked = compact_trylock_irqsave(&cc->zone->lock,
&flags, cc);
+ #ifdef CONFIG_AMLOGIC_CMA
+ if (!locked) {
+ if (strict)
+ cma_debug(1, page, " lock failed:%lx\n",
+ page_to_pfn(page));
+ break;
+ }
+ /* Recheck this is a buddy page under lock */
+ if (!PageBuddy(page)) {
+ if (strict)
+ cma_debug(1, page, " No buddy2:%lx\n",
+ page_to_pfn(page));
+ goto isolate_fail;
+ }
+ #else
if (!locked)
break;
/* Recheck this is a buddy page under lock */
if (!PageBuddy(page))
goto isolate_fail;
+ #endif
}
/* Found a free page, will break it into order-0 pages */
order = page_order(page);
isolated = __isolate_free_page(page, order);
+ #ifdef CONFIG_AMLOGIC_CMA
+ if (!isolated) {
+ if (strict)
+ cma_debug(1, page, "iso free fail:%lx, o:%d\n",
+ page_to_pfn(page), order);
+ break;
+ }
+ #else
if (!isolated)
break;
+ #endif
set_page_private(page, order);
total_isolated += isolated;
block_end_pfn = min(block_end_pfn, end_pfn);
}
+ #ifdef CONFIG_AMLOGIC_CMA
+ if (!pageblock_pfn_to_page(block_start_pfn,
+ block_end_pfn, cc->zone)) {
+ cma_debug(1, NULL, " no page block\n");
+ break;
+ }
+ #else
if (!pageblock_pfn_to_page(block_start_pfn,
block_end_pfn, cc->zone))
break;
+ #endif
isolated = isolate_freepages_block(cc, &isolate_start_pfn,
block_end_pfn, &freelist, true);
* there are any holes in the block (ie. invalid PFNs or
* non-free pages).
*/
+ #ifdef CONFIG_AMLOGIC_CMA
+ if (!isolated) {
+ cma_debug(1, NULL, " isolate free page failed\n");
+ break;
+ }
+ #else
if (!isolated)
break;
+ #endif
/*
* If we managed to isolate pages, it is always (1 << n) *
if (pfn < end_pfn) {
/* Loop terminated early, cleanup. */
+ #ifdef CONFIG_AMLOGIC_CMA
+ cma_debug(1, NULL, "pfn:%lx, end:%lx, start:%lx\n",
+ pfn, end_pfn, start_pfn);
+ #endif
release_freepages(&freelist);
return 0;
}
if (!mapping) {
/* Anonymous page without mapping */
+ #ifdef CONFIG_AMLOGIC_CMA
+ if (page_count(page) != expected_count)
+ cma_debug(2, page, " anon page cnt miss match, e:%d\n",
+ expected_count);
+ #endif
if (page_count(page) != expected_count)
return -EAGAIN;
if (page_count(page) != expected_count ||
radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
spin_unlock_irq(&mapping->tree_lock);
+ #ifdef CONFIG_AMLOGIC_CMA
+ cma_debug(2, page, " anon page cnt miss match, e:%d, p:%d\n",
+ expected_count, page_has_private(page));
+ #endif
return -EAGAIN;
}
goto out_unlock_both;
}
} else if (page_mapped(page)) {
+ #ifdef CONFIG_AMLOGIC_CMA
+ int ret;
+
+ ret = try_to_unmap(page,
+ TTU_MIGRATION | TTU_IGNORE_MLOCK |
+ TTU_IGNORE_ACCESS);
+ if (ret != SWAP_SUCCESS)
+ cma_debug(2, page, " unmap failed:%d\n", ret);
+ #else
/* Establish migration ptes */
VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
page);
try_to_unmap(page,
TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
+ #endif
page_was_mapped = 1;
}
switch(rc) {
case -ENOMEM:
nr_failed++;
+ #ifdef CONFIG_AMLOGIC_CMA
+ cma_debug(2, page, " NO MEM\n");
+ #endif
goto out;
case -EAGAIN:
retry++;
* retried in the next outer loop.
*/
nr_failed++;
+ #ifdef CONFIG_AMLOGIC_CMA
+ cma_debug(2, page, " failed:%d\n", rc);
+ #endif
break;
}
}