mm: change lmk/cma using policy [1/1]
authorTao Zeng <tao.zeng@amlogic.com>
Wed, 16 Oct 2019 08:44:11 +0000 (16:44 +0800)
committerJianxin Pan <jianxin.pan@amlogic.com>
Fri, 25 Oct 2019 07:09:00 +0000 (00:09 -0700)
PD#TV-10462

Problem:
Memory will allocation fail if play secure vedio source. Usually
seen by zram/wifi driver

Solution:
1, wake up kswapd earlier if water mark without free cma is not ok;
2, using zone-filecache to increace active of lmk. Which can be more
accurate than using global page status;
3, remove some restrict of using cma when allocate movable page by
zram or migrate from cma pool;
4, try allocate hard for atomic request in soft IRQ

Verify:
T950L

Change-Id: Ibf03f3c11a32175e9983ee8a61a14ae4b2436f1e
Signed-off-by: Tao Zeng <tao.zeng@amlogic.com>
drivers/amlogic/memory_ext/aml_cma.c
drivers/staging/android/lowmemorykiller.c
mm/page_alloc.c

index 809cf13..c75b823 100644 (file)
@@ -117,13 +117,6 @@ bool can_use_cma(gfp_t gfp_flags)
        if (cma_forbidden_mask(gfp_flags))
                return false;
 
-       /*
-        * do not use cma pages when cma allocate is working. this is the
-        * weakest condition
-        */
-       if (cma_alloc_ref())
-               return false;
-
        if (task_nice(current) > 0)
                return false;
 
@@ -229,7 +222,7 @@ static unsigned long get_align_pfn_high(unsigned long pfn)
 static struct page *get_migrate_page(struct page *page, unsigned long private,
                                  int **resultp)
 {
-       gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_BDEV;
+       gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
        struct page *new = NULL;
 #ifdef CONFIG_AMLOGIC_PAGE_TRACE
        struct page_trace *old_trace, *new_trace;
index 9e54901..2915e05 100644 (file)
@@ -139,16 +139,38 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
        short selected_oom_score_adj;
        int array_size = ARRAY_SIZE(lowmem_adj);
        int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
-       int other_file = global_node_page_state(NR_FILE_PAGES) -
-                               global_node_page_state(NR_SHMEM) -
-                               global_node_page_state(NR_UNEVICTABLE) -
-                               total_swapcache_pages();
 #ifdef CONFIG_AMLOGIC_CMA
+       int other_file;
+       struct zone *z = NULL;
+       pg_data_t *pgdat;
        int free_cma   = 0;
        int file_cma   = 0;
        int cma_forbid = 0;
-
-       if (cma_forbidden_mask(sc->gfp_mask) && !current_is_kswapd()) {
+       int zfile      = 0;
+       int globle_file = global_node_page_state(NR_FILE_PAGES) -
+                         global_node_page_state(NR_SHMEM) -
+                         global_node_page_state(NR_UNEVICTABLE) -
+                         total_swapcache_pages();
+
+       if (gfp_zone(sc->gfp_mask) == ZONE_NORMAL) {
+               /* using zone page state for more accurate */
+               pgdat = NODE_DATA(sc->nid);
+               z     = &pgdat->node_zones[ZONE_NORMAL];
+               if (managed_zone(z)) {
+                       zfile = zone_page_state(z, NR_ZONE_INACTIVE_FILE) +
+                               zone_page_state(z, NR_ZONE_ACTIVE_FILE);
+                       other_file = zfile -
+                                    global_node_page_state(NR_SHMEM) -
+                                    zone_page_state(z, NR_ZONE_UNEVICTABLE) -
+                                    total_swapcache_pages();
+               } else {
+                       other_file = globle_file;
+               }
+       } else {
+               other_file = globle_file;
+       }
+       if (cma_forbidden_mask(sc->gfp_mask) &&
+           (!current_is_kswapd() || cma_alloc_ref())) {
                free_cma    = global_page_state(NR_FREE_CMA_PAGES);
                file_cma    = global_page_state(NR_INACTIVE_FILE_CMA) +
                              global_page_state(NR_ACTIVE_FILE_CMA);
@@ -156,6 +178,11 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
                other_file -= file_cma;
                cma_forbid  = 1;
        }
+#else
+       int other_file = global_node_page_state(NR_FILE_PAGES) -
+                               global_node_page_state(NR_SHMEM) -
+                               global_node_page_state(NR_UNEVICTABLE) -
+                               total_swapcache_pages();
 #endif /* CONFIG_AMLOGIC_CMA */
 
        if (lowmem_adj_size < array_size)
@@ -254,6 +281,12 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
                } else {
                        lowmem_deathpending_timeout = jiffies + HZ;
                }
+               if (z)
+                       pr_info("  zone:%s, file:%d, shmem:%ld, unevc:%ld, file_cma:%d\n",
+                               z->name, zfile,
+                               global_node_page_state(NR_SHMEM),
+                               zone_page_state(z, NR_ZONE_UNEVICTABLE),
+                               file_cma);
        #else
                lowmem_deathpending_timeout = jiffies + HZ;
        #endif /* CONFIG_AMLOGIC_CMA */
index 9c45a55..6180636 100644 (file)
@@ -3017,9 +3017,7 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
 
 #ifdef CONFIG_CMA
        /* If allocation can't use CMA areas don't use free CMA pages */
-#ifndef CONFIG_AMLOGIC_CMA /* always sub cma pages to avoid wm all CMA */
        if (!(alloc_flags & ALLOC_CMA))
-#endif
                free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
 #endif
 
@@ -3196,6 +3194,12 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
                        if (alloc_flags & ALLOC_NO_WATERMARKS)
                                goto try_this_zone;
 
+               #ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
+                       /* alloc harder if under softirq */
+                       if (in_serving_softirq() && (gfp_mask & __GFP_ATOMIC))
+                               goto try_this_zone;
+               #endif
+
                        if (node_reclaim_mode == 0 ||
                            !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
                                continue;
@@ -4005,15 +4009,22 @@ static inline void should_wakeup_kswap(gfp_t gfp_mask, int order,
        unsigned long free_pages, free_cma = 0;
        struct zoneref *z = ac->preferred_zoneref;
        struct zone *zone;
+       unsigned long high_wm;
 
-       if (!(gfp_mask & __GFP_RECLAIM))        /* not allowed */
+       /*
+        * 1, if flag not allow reclaim
+        * 2, if with aotimic, we still need enable pre-wake up of
+        *    kswap to avoid large amount memory request fail in very
+        *    short time
+        */
+       if (!(gfp_mask & __GFP_RECLAIM) && !(gfp_mask & __GFP_ATOMIC))
                return;
 
        for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
                                                                ac->nodemask) {
                free_pages = zone_page_state(zone, NR_FREE_PAGES);
        #ifdef CONFIG_AMLOGIC_CMA
-               if (can_use_cma(gfp_mask))
+               if (!can_use_cma(gfp_mask))
                        free_cma = zone_page_state(zone, NR_FREE_CMA_PAGES);
        #endif /* CONFIG_AMLOGIC_CMA */
                free_pages -= free_cma;
@@ -4022,7 +4033,10 @@ static inline void should_wakeup_kswap(gfp_t gfp_mask, int order,
                 * fast reclaim process and can avoid memory become too low
                 * some times
                 */
-               if (free_pages <= high_wmark_pages(zone))
+               high_wm = high_wmark_pages(zone);
+               if (gfp_mask & __GFP_HIGH) /* 1.5x if __GFP_HIGH */
+                       high_wm = ((high_wm * 3) / 2);
+               if (free_pages <= high_wm)
                        wakeup_kswapd(zone, order, ac->high_zoneidx);
        }
 }