From 5d782cb05790798da1d5ab923084ca7390720367 Mon Sep 17 00:00:00 2001 From: tao zeng Date: Mon, 29 Oct 2018 11:12:11 +0800 Subject: [PATCH] mm: avoid pages migrated to different zone [1/1] PD#SWPL-881 Problem: Amlogic modified code when compatcion from normal case. And side effect is that pages in normal zone maybe migrated to highmem zone. Which caused page_address get a NULL value and kernel panic occurred. Solution: Avoid pages migarte to different zone by adding forbid_to_cma flags. Verify: P212 Change-Id: I1d9c6653dc1069562db3c1be3f53a3510a51f0d1 Signed-off-by: tao zeng --- include/linux/amlogic/aml_cma.h | 1 + mm/compaction.c | 37 ++++++++++++++++++++++++++----------- 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/include/linux/amlogic/aml_cma.h b/include/linux/amlogic/aml_cma.h index 947fd0ae..4acb96f 100644 --- a/include/linux/amlogic/aml_cma.h +++ b/include/linux/amlogic/aml_cma.h @@ -50,6 +50,7 @@ struct compact_control { const int classzone_idx; /* zone index of a direct compactor */ struct zone *zone; bool contended; /* Signal lock or sched contention */ + bool forbid_to_cma; /* Forbit to migrate to cma */ }; static inline bool cma_forbidden_mask(gfp_t gfp_flags) diff --git a/mm/compaction.c b/mm/compaction.c index b24f499..2d9ee59 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -648,6 +648,25 @@ static bool too_many_isolated(struct zone *zone) return isolated > (inactive + active) / 2; } +#ifdef CONFIG_AMLOGIC_CMA +static void check_page_to_cma(struct compact_control *cc, struct page *page) +{ + struct address_space *mapping; + + if (cc->forbid_to_cma) /* no need check once it is true */ + return; + + mapping = page_mapping(page); + if ((unsigned long)mapping & PAGE_MAPPING_ANON) + mapping = NULL; + + if (PageKsm(page) && !PageSlab(page)) + cc->forbid_to_cma = true; + + if (mapping && cma_forbidden_mask(mapping_gfp_mask(mapping))) + cc->forbid_to_cma = true; +} +#endif /** * isolate_migratepages_block() - isolate all migrate-able pages within @@ -746,6 +765,9 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, page = pfn_to_page(low_pfn); + #ifdef CONFIG_AMLOGIC_CMA + check_page_to_cma(cc, page); + #endif if (!valid_page) valid_page = page; @@ -1098,7 +1120,7 @@ static void isolate_freepages(struct compact_control *cc) migrate_type = get_pageblock_migratetype(page); if (is_migrate_isolate(migrate_type)) continue; - if (is_migrate_cma(migrate_type) && cma_alloc_ref()) + if (is_migrate_cma(migrate_type) && cc->forbid_to_cma) continue; #endif /* CONFIG_AMLOGIC_CMA */ /* Found a block suitable for isolating free pages from. */ @@ -1151,16 +1173,6 @@ static struct page *compaction_alloc(struct page *migratepage, { struct compact_control *cc = (struct compact_control *)data; struct page *freepage; -#ifdef CONFIG_AMLOGIC_CMA - struct address_space *mapping; - - mapping = page_mapping(migratepage); - if ((unsigned long)mapping & PAGE_MAPPING_ANON) - mapping = NULL; - - if (mapping && !can_use_cma(mapping_gfp_mask(mapping))) - return alloc_page(mapping_gfp_mask(mapping) | __GFP_BDEV); -#endif /* * Isolate free pages if necessary, and if we are not aborting due to @@ -1558,6 +1570,9 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro migrate_prep_local(); +#ifdef CONFIG_AMLOGIC_CMA + cc->forbid_to_cma = false; +#endif while ((ret = compact_finished(zone, cc, migratetype)) == COMPACT_CONTINUE) { int err; -- 2.7.4