int file_cma = 0;
int cma_forbid = 0;
- if (cma_forbidden_mask(sc->gfp_mask)) {
+ if (cma_forbidden_mask(sc->gfp_mask) && !current_is_kswapd()) {
free_cma = global_page_state(NR_FREE_CMA_PAGES);
file_cma = global_page_state(NR_INACTIVE_FILE_CMA) +
global_page_state(NR_ACTIVE_FILE_CMA);
#include <linux/huge_mm.h>
#include <linux/swap.h>
-#ifdef CONFIG_AMLOGIC_CMA
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
#include <linux/page-isolation.h>
-#endif /* CONFIG_AMLOGIC_CMA */
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
/**
* page_is_file_cache - should the page be on a file LRU or anon LRU?
static __always_inline void add_page_to_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
-#ifdef CONFIG_AMLOGIC_CMA
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
int nr_pages = hpage_nr_pages(page);
int num = NR_INACTIVE_ANON_CMA - NR_INACTIVE_ANON;
int migrate_type = 0;
-#endif /* CONFIG_AMLOGIC_CMA */
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
-#ifdef CONFIG_AMLOGIC_CMA
+ list_add(&page->lru, &lruvec->lists[lru]);
+
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
migrate_type = get_pageblock_migratetype(page);
- if (is_migrate_cma(migrate_type) || is_migrate_isolate(migrate_type)) {
+ if (is_migrate_cma(migrate_type) || is_migrate_isolate(migrate_type))
__mod_zone_page_state(page_zone(page),
NR_LRU_BASE + lru + num, nr_pages);
- list_add_tail(&page->lru, lruvec->cma_list[lru]);
- /* Always to point to first cma page */
- lruvec->cma_list[lru] = &page->lru;
- } else
- list_add(&page->lru, &lruvec->lists[lru]);
-#else
- list_add(&page->lru, &lruvec->lists[lru]);
-#endif /* CONFIG_AMLOGIC_CMA */
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
}
static __always_inline void del_page_from_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
-#ifdef CONFIG_AMLOGIC_CMA
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
int nr_pages = hpage_nr_pages(page);
int num = NR_INACTIVE_ANON_CMA - NR_INACTIVE_ANON;
int migrate_type = 0;
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
+ list_del(&page->lru);
+ update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));
+
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
migrate_type = get_pageblock_migratetype(page);
- if (is_migrate_cma(migrate_type) || is_migrate_isolate(migrate_type)) {
+ if (is_migrate_cma(migrate_type) || is_migrate_isolate(migrate_type))
__mod_zone_page_state(page_zone(page),
NR_LRU_BASE + lru + num, -nr_pages);
- if (lruvec->cma_list[lru] == &page->lru)
- lruvec->cma_list[lru] = page->lru.next;
- }
-#endif /* CONFIG_AMLOGIC_CMA */
- list_del(&page->lru);
- update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
}
/**
struct lruvec {
struct list_head lists[NR_LRU_LISTS];
struct zone_reclaim_stat reclaim_stat;
-#ifdef CONFIG_AMLOGIC_CMA
- struct list_head *cma_list[NR_LRU_LISTS];
-#endif /* CONFIG_AMLOGIC_CMA */
/* Evictions & activations on the inactive file list */
atomic_long_t inactive_age;
#ifdef CONFIG_MEMCG
#endif
};
-#ifdef CONFIG_AMLOGIC_CMA
-static inline bool lru_normal_empty(enum lru_list lru, struct lruvec *lruv)
-{
- if (lruv->lists[lru].next == lruv->cma_list[lru])
- return true;
- else
- return false;
-}
-#endif /* CONFIG_AMLOGIC_CMA */
-
-
/* Mask used at gathering information at once (see memcontrol.c) */
#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
for_each_lru(lru)
INIT_LIST_HEAD(&lruvec->lists[lru]);
-#ifdef CONFIG_AMLOGIC_CMA
- for_each_lru(lru)
- lruvec->cma_list[lru] = &lruvec->lists[lru];
-#endif /* CONFIG_AMLOGIC_CMA */
}
#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
enum lru_list lru = page_lru_base_type(page);
- #ifdef CONFIG_AMLOGIC_CMA
- if (!cma_page(page)) {
- list_move_tail(&page->lru, lruvec->cma_list[lru]);
- } else {
- if ((lruvec->cma_list[lru] == &page->lru) &&
- (page->lru.next != &lruvec->lists[lru]))
- lruvec->cma_list[lru] = page->lru.next;
- list_move_tail(&page->lru, &lruvec->lists[lru]);
- }
- #else
list_move_tail(&page->lru, &lruvec->lists[lru]);
- #endif /* CONFIG_AMLOGIC_CMA */
(*pgmoved)++;
}
}
* The page's writeback ends up during pagevec
* We moves tha page into tail of inactive.
*/
- #ifdef CONFIG_AMLOGIC_CMA
- if (!cma_page(page)) {
- list_move_tail(&page->lru, lruvec->cma_list[lru]);
- } else {
- if ((lruvec->cma_list[lru] == &page->lru) &&
- (page->lru.next != &lruvec->lists[lru]))
- lruvec->cma_list[lru] = page->lru.next;
- list_move_tail(&page->lru, &lruvec->lists[lru]);
- }
- #else
list_move_tail(&page->lru, &lruvec->lists[lru]);
- #endif /* CONFIG_AMLOGIC_CMA */
__count_vm_event(PGROTATED);
}
unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
unsigned long scan, nr_pages;
LIST_HEAD(pages_skipped);
-#ifdef CONFIG_AMLOGIC_CMA
+#ifdef CONFIG_AMLOGIC_MODIFY
int num = NR_INACTIVE_ANON_CMA - NR_INACTIVE_ANON;
- bool use_cma = true, is_cma_page;
-
- if (cma_forbidden_mask(sc->gfp_mask))
- use_cma = false;
-#endif /* CONFIG_AMLOGIC_CMA */
+ int migrate_type = 0;
+#endif /* CONFIG_AMLOGIC_MODIFY */
for (scan = 0; scan < nr_to_scan && nr_taken < nr_to_scan &&
!list_empty(src);) {
struct page *page;
- #ifdef CONFIG_AMLOGIC_CMA
- page = NULL;
- if (!use_cma) {
- if (!lru_normal_empty(lru, lruvec))
- page = lru_to_page(lruvec->cma_list[lru]);
- }
- if (!page)
- page = lru_to_page(src);
- is_cma_page = cma_page(page);
- #else
page = lru_to_page(src);
- #endif /* CONFIG_AMLOGIC_CMA */
prefetchw_prev_lru_page(page, src, flags);
VM_BUG_ON_PAGE(!PageLRU(page), page);
nr_pages = hpage_nr_pages(page);
nr_taken += nr_pages;
nr_zone_taken[page_zonenum(page)] += nr_pages;
- #ifdef CONFIG_AMLOGIC_CMA
- if (is_cma_page) {
- __mod_zone_page_state(page_zone(page),
- NR_LRU_BASE + lru + num,
- -nr_pages);
- if (lruvec->cma_list[lru] == &page->lru)
- lruvec->cma_list[lru] = page->lru.next;
- }
- #endif /* CONFIG_AMLOGIC_CMA */
list_move(&page->lru, dst);
+ #ifdef CONFIG_AMLOGIC_MODIFY
+ migrate_type = get_pageblock_migratetype(page);
+ if (is_migrate_cma(migrate_type) ||
+ is_migrate_isolate(migrate_type))
+ __mod_zone_page_state(page_zone(page),
+ NR_LRU_BASE + lru + num,
+ -nr_pages);
+ #endif /* CONFIG_AMLOGIC_MODIFY */
break;
case -EBUSY:
/* else it is being freed elsewhere */
- #ifdef CONFIG_AMLOGIC_CMA
- if (is_cma_page) {
- list_move(&page->lru,
- lruvec->cma_list[lru]->prev);
- lruvec->cma_list[lru] = &page->lru;
- } else
- list_move(&page->lru, src);
- #else
list_move(&page->lru, src);
- #endif /* CONFIG_AMLOGIC_CMA */
continue;
default:
unsigned long pgmoved = 0;
struct page *page;
int nr_pages;
-#ifdef CONFIG_AMLOGIC_CMA
+#ifdef CONFIG_AMLOGIC_MODIFY
int num = NR_INACTIVE_ANON_CMA - NR_INACTIVE_ANON;
- bool is_cma_page;
-#endif /* CONFIG_AMLOGIC_CMA */
+ int migrate_type = 0;
+#endif /* CONFIG_AMLOGIC_MODIFY */
while (!list_empty(list)) {
page = lru_to_page(list);
nr_pages = hpage_nr_pages(page);
update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
- #ifdef CONFIG_AMLOGIC_CMA
- is_cma_page = cma_page(page);
- if (is_cma_page) {
+ list_move(&page->lru, &lruvec->lists[lru]);
+ pgmoved += nr_pages;
+ #ifdef CONFIG_AMLOGIC_MODIFY
+ migrate_type = get_pageblock_migratetype(page);
+ if (is_migrate_cma(migrate_type) ||
+ is_migrate_isolate(migrate_type))
__mod_zone_page_state(page_zone(page),
NR_LRU_BASE + lru + num,
nr_pages);
- list_move(&page->lru, lruvec->cma_list[lru]->prev);
- lruvec->cma_list[lru] = &page->lru;
- } else
- list_move(&page->lru, &lruvec->lists[lru]);
- #else
- list_move(&page->lru, &lruvec->lists[lru]);
- #endif /* CONFIG_AMLOGIC_CMA */
- pgmoved += nr_pages;
+ #endif /* CONFIG_AMLOGIC_MODIFY */
if (put_page_testzero(page)) {
__ClearPageLRU(page);