mm: optimize for lowmemory killer
authortao zeng <tao.zeng@amlogic.com>
Tue, 17 Apr 2018 07:29:50 +0000 (15:29 +0800)
committerYixun Lan <yixun.lan@amlogic.com>
Wed, 18 Apr 2018 01:58:08 +0000 (17:58 -0800)
PD#164558: mm: optimize for lowmemory killer

1. Remove change for LRU;
2. Only subtract cma file caches when task is not kswapd.

Change-Id: I09ad86fc9754c1136cd976d656a023dee6cbe2eb
Signed-off-by: tao zeng <tao.zeng@amlogic.com>
drivers/staging/android/lowmemorykiller.c
include/linux/mm_inline.h
include/linux/mmzone.h
mm/mmzone.c
mm/swap.c
mm/vmscan.c

index 8774de0..9e54901 100644 (file)
@@ -148,7 +148,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
        int file_cma   = 0;
        int cma_forbid = 0;
 
-       if (cma_forbidden_mask(sc->gfp_mask)) {
+       if (cma_forbidden_mask(sc->gfp_mask) && !current_is_kswapd()) {
                free_cma    = global_page_state(NR_FREE_CMA_PAGES);
                file_cma    = global_page_state(NR_INACTIVE_FILE_CMA) +
                              global_page_state(NR_ACTIVE_FILE_CMA);
index 8fc56ac..a60c09c 100644 (file)
@@ -3,9 +3,9 @@
 
 #include <linux/huge_mm.h>
 #include <linux/swap.h>
-#ifdef CONFIG_AMLOGIC_CMA
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
 #include <linux/page-isolation.h>
-#endif /* CONFIG_AMLOGIC_CMA */
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
 
 /**
  * page_is_file_cache - should the page be on a file LRU or anon LRU?
@@ -49,46 +49,41 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
 static __always_inline void add_page_to_lru_list(struct page *page,
                                struct lruvec *lruvec, enum lru_list lru)
 {
-#ifdef CONFIG_AMLOGIC_CMA
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
        int nr_pages = hpage_nr_pages(page);
        int num = NR_INACTIVE_ANON_CMA - NR_INACTIVE_ANON;
        int migrate_type = 0;
-#endif /* CONFIG_AMLOGIC_CMA */
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
 
        update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
-#ifdef CONFIG_AMLOGIC_CMA
+       list_add(&page->lru, &lruvec->lists[lru]);
+
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
        migrate_type = get_pageblock_migratetype(page);
-       if (is_migrate_cma(migrate_type) || is_migrate_isolate(migrate_type)) {
+       if (is_migrate_cma(migrate_type) || is_migrate_isolate(migrate_type))
                __mod_zone_page_state(page_zone(page),
                                      NR_LRU_BASE + lru + num, nr_pages);
-               list_add_tail(&page->lru, lruvec->cma_list[lru]);
-               /* Always to point to first cma page */
-               lruvec->cma_list[lru] = &page->lru;
-       } else
-               list_add(&page->lru, &lruvec->lists[lru]);
-#else
-       list_add(&page->lru, &lruvec->lists[lru]);
-#endif /* CONFIG_AMLOGIC_CMA */
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
 }
 
 static __always_inline void del_page_from_lru_list(struct page *page,
                                struct lruvec *lruvec, enum lru_list lru)
 {
-#ifdef CONFIG_AMLOGIC_CMA
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
        int nr_pages = hpage_nr_pages(page);
        int num = NR_INACTIVE_ANON_CMA - NR_INACTIVE_ANON;
        int migrate_type = 0;
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
 
+       list_del(&page->lru);
+       update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));
+
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
        migrate_type = get_pageblock_migratetype(page);
-       if (is_migrate_cma(migrate_type) || is_migrate_isolate(migrate_type)) {
+       if (is_migrate_cma(migrate_type) || is_migrate_isolate(migrate_type))
                __mod_zone_page_state(page_zone(page),
                                      NR_LRU_BASE + lru + num, -nr_pages);
-               if (lruvec->cma_list[lru] == &page->lru)
-                       lruvec->cma_list[lru] = page->lru.next;
-       }
-#endif /* CONFIG_AMLOGIC_CMA */
-       list_del(&page->lru);
-       update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
 }
 
 /**
index 082280d..9d7c732 100644 (file)
@@ -244,9 +244,6 @@ struct zone_reclaim_stat {
 struct lruvec {
        struct list_head                lists[NR_LRU_LISTS];
        struct zone_reclaim_stat        reclaim_stat;
-#ifdef CONFIG_AMLOGIC_CMA
-       struct list_head *cma_list[NR_LRU_LISTS];
-#endif /* CONFIG_AMLOGIC_CMA */
        /* Evictions & activations on the inactive file list */
        atomic_long_t                   inactive_age;
 #ifdef CONFIG_MEMCG
@@ -254,17 +251,6 @@ struct lruvec {
 #endif
 };
 
-#ifdef CONFIG_AMLOGIC_CMA
-static inline bool lru_normal_empty(enum lru_list lru, struct lruvec *lruv)
-{
-       if (lruv->lists[lru].next == lruv->cma_list[lru])
-               return true;
-       else
-               return false;
-}
-#endif /* CONFIG_AMLOGIC_CMA */
-
-
 /* Mask used at gathering information at once (see memcontrol.c) */
 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
index 83da7b2..5652be8 100644 (file)
@@ -93,10 +93,6 @@ void lruvec_init(struct lruvec *lruvec)
 
        for_each_lru(lru)
                INIT_LIST_HEAD(&lruvec->lists[lru]);
-#ifdef CONFIG_AMLOGIC_CMA
-       for_each_lru(lru)
-               lruvec->cma_list[lru] = &lruvec->lists[lru];
-#endif /* CONFIG_AMLOGIC_CMA */
 }
 
 #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
index e7f4169..4dcf852 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -210,18 +210,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
 
        if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
                enum lru_list lru = page_lru_base_type(page);
-       #ifdef CONFIG_AMLOGIC_CMA
-               if (!cma_page(page)) {
-                       list_move_tail(&page->lru, lruvec->cma_list[lru]);
-               } else {
-                       if ((lruvec->cma_list[lru] == &page->lru) &&
-                                       (page->lru.next != &lruvec->lists[lru]))
-                               lruvec->cma_list[lru] = page->lru.next;
-                       list_move_tail(&page->lru, &lruvec->lists[lru]);
-               }
-       #else
                list_move_tail(&page->lru, &lruvec->lists[lru]);
-       #endif /* CONFIG_AMLOGIC_CMA */
                (*pgmoved)++;
        }
 }
@@ -560,18 +549,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
                 * The page's writeback ends up during pagevec
                 * We moves tha page into tail of inactive.
                 */
-       #ifdef CONFIG_AMLOGIC_CMA
-               if (!cma_page(page)) {
-                       list_move_tail(&page->lru, lruvec->cma_list[lru]);
-               } else {
-                       if ((lruvec->cma_list[lru] == &page->lru) &&
-                                       (page->lru.next != &lruvec->lists[lru]))
-                               lruvec->cma_list[lru] = page->lru.next;
-                       list_move_tail(&page->lru, &lruvec->lists[lru]);
-               }
-       #else
                list_move_tail(&page->lru, &lruvec->lists[lru]);
-       #endif /* CONFIG_AMLOGIC_CMA */
                __count_vm_event(PGROTATED);
        }
 
index 3a65c54..04d109a 100644 (file)
@@ -1456,30 +1456,16 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
        unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
        unsigned long scan, nr_pages;
        LIST_HEAD(pages_skipped);
-#ifdef CONFIG_AMLOGIC_CMA
+#ifdef CONFIG_AMLOGIC_MODIFY
        int num = NR_INACTIVE_ANON_CMA - NR_INACTIVE_ANON;
-       bool use_cma = true, is_cma_page;
-
-       if (cma_forbidden_mask(sc->gfp_mask))
-               use_cma = false;
-#endif /* CONFIG_AMLOGIC_CMA */
+       int migrate_type = 0;
+#endif /* CONFIG_AMLOGIC_MODIFY */
 
        for (scan = 0; scan < nr_to_scan && nr_taken < nr_to_scan &&
                                        !list_empty(src);) {
                struct page *page;
 
-       #ifdef CONFIG_AMLOGIC_CMA
-               page = NULL;
-               if (!use_cma) {
-                       if (!lru_normal_empty(lru, lruvec))
-                               page = lru_to_page(lruvec->cma_list[lru]);
-               }
-               if (!page)
-                       page = lru_to_page(src);
-               is_cma_page = cma_page(page);
-       #else
                page = lru_to_page(src);
-       #endif /* CONFIG_AMLOGIC_CMA */
                prefetchw_prev_lru_page(page, src, flags);
 
                VM_BUG_ON_PAGE(!PageLRU(page), page);
@@ -1501,30 +1487,20 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                        nr_pages = hpage_nr_pages(page);
                        nr_taken += nr_pages;
                        nr_zone_taken[page_zonenum(page)] += nr_pages;
-               #ifdef CONFIG_AMLOGIC_CMA
-                       if (is_cma_page) {
-                               __mod_zone_page_state(page_zone(page),
-                                                     NR_LRU_BASE + lru + num,
-                                                     -nr_pages);
-                               if (lruvec->cma_list[lru] == &page->lru)
-                                       lruvec->cma_list[lru] = page->lru.next;
-                       }
-               #endif /* CONFIG_AMLOGIC_CMA */
                        list_move(&page->lru, dst);
+               #ifdef CONFIG_AMLOGIC_MODIFY
+                       migrate_type = get_pageblock_migratetype(page);
+                       if (is_migrate_cma(migrate_type) ||
+                           is_migrate_isolate(migrate_type))
+                               __mod_zone_page_state(page_zone(page),
+                                       NR_LRU_BASE + lru + num,
+                                       -nr_pages);
+               #endif /* CONFIG_AMLOGIC_MODIFY */
                        break;
 
                case -EBUSY:
                        /* else it is being freed elsewhere */
-               #ifdef CONFIG_AMLOGIC_CMA
-                       if (is_cma_page) {
-                               list_move(&page->lru,
-                                               lruvec->cma_list[lru]->prev);
-                               lruvec->cma_list[lru] = &page->lru;
-                       } else
-                               list_move(&page->lru, src);
-               #else
                        list_move(&page->lru, src);
-               #endif /* CONFIG_AMLOGIC_CMA */
                        continue;
 
                default:
@@ -1932,10 +1908,10 @@ static void move_active_pages_to_lru(struct lruvec *lruvec,
        unsigned long pgmoved = 0;
        struct page *page;
        int nr_pages;
-#ifdef CONFIG_AMLOGIC_CMA
+#ifdef CONFIG_AMLOGIC_MODIFY
        int num = NR_INACTIVE_ANON_CMA - NR_INACTIVE_ANON;
-       bool is_cma_page;
-#endif /* CONFIG_AMLOGIC_CMA */
+       int migrate_type = 0;
+#endif /* CONFIG_AMLOGIC_MODIFY */
 
        while (!list_empty(list)) {
                page = lru_to_page(list);
@@ -1946,20 +1922,16 @@ static void move_active_pages_to_lru(struct lruvec *lruvec,
 
                nr_pages = hpage_nr_pages(page);
                update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
-       #ifdef CONFIG_AMLOGIC_CMA
-               is_cma_page = cma_page(page);
-               if (is_cma_page) {
+               list_move(&page->lru, &lruvec->lists[lru]);
+               pgmoved += nr_pages;
+       #ifdef CONFIG_AMLOGIC_MODIFY
+               migrate_type = get_pageblock_migratetype(page);
+               if (is_migrate_cma(migrate_type) ||
+                   is_migrate_isolate(migrate_type))
                        __mod_zone_page_state(page_zone(page),
                                              NR_LRU_BASE + lru + num,
                                              nr_pages);
-                       list_move(&page->lru, lruvec->cma_list[lru]->prev);
-                       lruvec->cma_list[lru] = &page->lru;
-               } else
-                       list_move(&page->lru, &lruvec->lists[lru]);
-       #else
-               list_move(&page->lru, &lruvec->lists[lru]);
-       #endif /* CONFIG_AMLOGIC_CMA */
-               pgmoved += nr_pages;
+       #endif /* CONFIG_AMLOGIC_MODIFY */
 
                if (put_page_testzero(page)) {
                        __ClearPageLRU(page);