mm: update cma policy and debug sysfs
authorTao Zeng <tao.zeng@amlogic.com>
Wed, 6 Sep 2017 08:38:09 +0000 (16:38 +0800)
committerJianxin Pan <jianxin.pan@amlogic.com>
Thu, 7 Sep 2017 03:24:23 +0000 (20:24 -0700)
PD#150194: mm: update cma policy and debug sysfs

1. For movable memory, using CMA first, this can help to avoid
   low memory issue and driver can't allocate pages issue;
2. change /proc/slabinfo sysfs with total size print of each slab;
3. change /proc/pagetypeinfo with toatal pages of each migrate type;
4. add statistics for free pages of each migrate type, and show in
   echo m > /proc/sysrq-trigger.

Change-Id: I2fdab73c2d1278cd025185362a1159e4f683166b
Signed-off-by: Tao Zeng <tao.zeng@amlogic.com>
include/linux/cma.h
include/linux/mmzone.h
include/linux/vmstat.h
mm/cma.c
mm/page_alloc.c
mm/slab_common.c
mm/vmstat.c

index d127368..c73a16d 100644 (file)
@@ -31,5 +31,6 @@ extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int
 
 #ifdef CONFIG_AMLOGIC_MODIFY
 extern bool cma_suitable(gfp_t gfp_mask);
+extern unsigned long get_driver_alloc_cma(void);
 #endif /* CONFIG_AMLOGIC_MODIFY */
 #endif
index 6744eb4..71735f0 100644 (file)
@@ -137,7 +137,19 @@ enum zone_stat_item {
        NUMA_LOCAL,             /* allocation from local node */
        NUMA_OTHER,             /* allocation from other node */
 #endif
+#ifdef CONFIG_AMLOGIC_MODIFY /* get free pages according migrate type */
+       NR_FREE_UNMOVABLE,
+       NR_FREE_MOVABLE,
+       NR_FREE_RECLAIMABLE,
+       NR_FREE_HIGHATOMIC,
+#endif /* CONFIG_AMLOGIC_MODIFY */
        NR_FREE_CMA_PAGES,
+#ifdef CONFIG_AMLOGIC_MODIFY
+       /* This is in order with MIGRATE_TYPES */
+#ifdef CONFIG_MEMORY_ISOLATION
+       NR_FREE_ISOLATE,
+#endif
+#endif /* CONFIG_AMLOGIC_MODIFY */
        NR_VM_ZONE_STAT_ITEMS };
 
 enum node_stat_item {
index 6137719..8801000 100644 (file)
@@ -345,10 +345,25 @@ static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
                                             int migratetype)
 {
        __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
+#ifndef CONFIG_AMLOGIC_MODIFY
        if (is_migrate_cma(migratetype))
                __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
+#endif /* !CONFIG_AMLOGIC_MODIFY */
 }
 
+#ifdef CONFIG_AMLOGIC_MODIFY
+/* statistics free pages according migrate type */
+static inline void __mod_zone_migrate_state(struct zone *zone, int nr_pages,
+                                           int migratetype)
+{
+       if (migratetype >= MIGRATE_TYPES || migratetype < MIGRATE_UNMOVABLE) {
+               WARN(1, "wrong type:%d\n", migratetype);
+               return;
+       }
+       zone_page_state_add(nr_pages, zone, NR_FREE_UNMOVABLE + migratetype);
+}
+#endif /* CONFIG_AMLOGIC_MODIFY */
+
 extern const char * const vmstat_text[];
 
 #endif /* _LINUX_VMSTAT_H */
index 5e19358..e8d02c8 100644 (file)
--- a/mm/cma.c
+++ b/mm/cma.c
 struct cma cma_areas[MAX_CMA_AREAS];
 unsigned cma_area_count;
 static DEFINE_MUTEX(cma_mutex);
+#ifdef CONFIG_AMLOGIC_MODIFY
+/* how many cma pages used by driver */
+static atomic_long_t driver_alloc_cma;
+unsigned long get_driver_alloc_cma(void)
+{
+       return atomic_long_read(&driver_alloc_cma);
+}
+#endif /* CONFIG_AMLOGIC_MODIFY */
 
 phys_addr_t cma_get_base(const struct cma *cma)
 {
@@ -152,6 +160,9 @@ static int __init cma_init_reserved_areas(void)
                if (ret)
                        return ret;
        }
+#ifdef CONFIG_AMLOGIC_MODIFY
+       atomic_long_set(&driver_alloc_cma, 0);
+#endif /* CONFIG_AMLOGIC_MODIFY */
 
        return 0;
 }
@@ -426,6 +437,10 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
 
        trace_cma_alloc(pfn, page, count, align);
 
+#ifdef CONFIG_AMLOGIC_MODIFY
+       if (page)
+               atomic_long_add(count, &driver_alloc_cma);
+#endif /* CONFIG_AMLOGIC_MODIFY */
        pr_debug("%s(): returned %p\n", __func__, page);
        return page;
 }
@@ -460,6 +475,9 @@ bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
        cma_clear_bitmap(cma, pfn, count);
        trace_cma_release(pfn, pages, count);
 
+#ifdef CONFIG_AMLOGIC_MODIFY
+       atomic_long_sub(count, &driver_alloc_cma);
+#endif /* CONFIG_AMLOGIC_MODIFY */
        return true;
 }
 
index ebe834e..c287b47 100644 (file)
@@ -806,6 +806,9 @@ static inline void __free_one_page(struct page *page,
        unsigned long uninitialized_var(buddy_idx);
        struct page *buddy;
        unsigned int max_order;
+#ifdef CONFIG_AMLOGIC_MODIFY
+       int buddy_mg;
+#endif /* CONFIG_AMLOGIC_MODIFY */
 
        max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
 
@@ -834,6 +837,19 @@ continue_merging:
                if (page_is_guard(buddy)) {
                        clear_page_guard(zone, buddy, order, migratetype);
                } else {
+               #ifdef CONFIG_AMLOGIC_MODIFY
+                       /*
+                        * Kernel have provided some information about it in
+                        * /proc/pagetypeinfo, /proc/buddyinfo. But both of them
+                        * do not have a summary of free pages of each migrate
+                        * type.
+                        * Update zone free migrate type change according
+                        * free_area's nr_free, this information is helpful and
+                        * can be shown in echo m > /proc/sysrq-trigger.
+                        */
+                       buddy_mg = get_pcppage_migratetype(buddy);
+                       __mod_zone_migrate_state(zone, -(1 << order), buddy_mg);
+               #endif /* CONFIG_AMLOGIC_MODIFY */
                        list_del(&buddy->lru);
                        zone->free_area[order].nr_free--;
                        rmv_page_order(buddy);
@@ -895,6 +911,10 @@ done_merging:
        list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
 out:
        zone->free_area[order].nr_free++;
+#ifdef CONFIG_AMLOGIC_MODIFY
+       set_pcppage_migratetype(page, migratetype);
+       __mod_zone_migrate_state(zone, (1 << order), migratetype);
+#endif /* CONFIG_AMLOGIC_MODIFY */
 }
 
 /*
@@ -1663,6 +1683,10 @@ static inline void expand(struct zone *zone, struct page *page,
 
                list_add(&page[size].lru, &area->free_list[migratetype]);
                area->nr_free++;
+       #ifdef CONFIG_AMLOGIC_MODIFY
+               set_pcppage_migratetype(&page[size], migratetype);
+               __mod_zone_migrate_state(zone, (1 << high), migratetype);
+       #endif /* CONFIG_AMLOGIC_MODIFY */
                set_page_order(&page[size], high);
        }
 }
@@ -1817,6 +1841,10 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
                list_del(&page->lru);
                rmv_page_order(page);
                area->nr_free--;
+       #ifdef CONFIG_AMLOGIC_MODIFY
+               __mod_zone_migrate_state(zone, -(1 << current_order),
+                                        migratetype);
+       #endif /* CONFIG_AMLOGIC_MODIFY */
                expand(zone, page, order, current_order, area, migratetype);
                set_pcppage_migratetype(page, migratetype);
                return page;
@@ -1865,6 +1893,9 @@ int move_freepages(struct zone *zone,
        struct page *page;
        unsigned int order;
        int pages_moved = 0;
+#ifdef CONFIG_AMLOGIC_MODIFY
+       int list_type;
+#endif /* CONFIG_AMLOGIC_MODIFY */
 
 #ifndef CONFIG_HOLES_IN_ZONE
        /*
@@ -1894,6 +1925,12 @@ int move_freepages(struct zone *zone,
                order = page_order(page);
                list_move(&page->lru,
                          &zone->free_area[order].free_list[migratetype]);
+       #ifdef CONFIG_AMLOGIC_MODIFY
+               list_type = get_pcppage_migratetype(page);
+               __mod_zone_migrate_state(zone, -(1 << order), list_type);
+               __mod_zone_migrate_state(zone, (1 << order), migratetype);
+               set_pcppage_migratetype(page, migratetype);
+       #endif /* CONFIG_AMLOGIC_MODIFY */
                page += 1 << order;
                pages_moved += 1 << order;
        }
@@ -2152,6 +2189,10 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
 
                /* Remove the page from the freelists */
                area->nr_free--;
+       #ifdef CONFIG_AMLOGIC_MODIFY
+               __mod_zone_migrate_state(zone, -(1 << current_order),
+                                        fallback_mt);
+       #endif /* CONFIG_AMLOGIC_MODIFY */
                list_del(&page->lru);
                rmv_page_order(page);
 
@@ -2184,10 +2225,24 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
 {
        struct page *page;
 
+#ifdef CONFIG_AMLOGIC_MODIFY
+       /* use CMA first */
+       if (migratetype == MIGRATE_MOVABLE) {
+               page = __rmqueue_cma_fallback(zone, order);
+               if (page) {
+                       trace_mm_page_alloc_zone_locked(page, order,
+                                                       MIGRATE_CMA);
+                       return page;
+               }
+       }
+#endif /* CONFIG_AMLOGIC_MODIFY */
+
        page = __rmqueue_smallest(zone, order, migratetype);
        if (unlikely(!page)) {
+       #ifndef CONFIG_AMLOGIC_MODIFY   /* no need to try again */
                if (migratetype == MIGRATE_MOVABLE)
                        page = __rmqueue_cma_fallback(zone, order);
+       #endif /* !CONFIG_AMLOGIC_MODIFY */
 
                if (!page)
                        page = __rmqueue_fallback(zone, order, migratetype);
@@ -2253,9 +2308,11 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
                        list_add_tail(&page->lru, list);
                list = &page->lru;
                alloced++;
+       #ifndef CONFIG_AMLOGIC_MODIFY
                if (is_migrate_cma(get_pcppage_migratetype(page)))
                        __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
                                              -(1 << order));
+       #endif /* !CONFIG_AMLOGIC_MODIFY */
        }
 
        /*
@@ -2575,6 +2632,10 @@ int __isolate_free_page(struct page *page, unsigned int order)
        /* Remove page from free list */
        list_del(&page->lru);
        zone->free_area[order].nr_free--;
+#ifdef CONFIG_AMLOGIC_MODIFY
+       __mod_zone_migrate_state(zone, -(1 << order),
+                                get_pcppage_migratetype(page));
+#endif /* CONFIG_AMLOGIC_MODIFY */
        rmv_page_order(page);
 
        /*
@@ -2660,15 +2721,45 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
                        else
                                page = list_first_entry(list, struct page, lru);
 
-               #ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_MODIFY
+                       /*
+                        * USING CMA FIRST POLICY situations:
+                        * 1. CMA pages may return to pcp and allocated next
+                        *    but gfp mask is not suitable for CMA;
+                        * 2. MOVABLE pages may return to pcp and allocated next
+                        *    but gfp mask is suitable for CMA
+                        *
+                        * For 1, we should replace a none-CMA page
+                        * For 2, we should replace with a cma page
+                        * before page is deleted from PCP list.
+                        */
                        if (!cma_suitable(gfp_flags) &&
                            is_migrate_cma_page(page)) {
+                               /* case 1 */
                                page = rmqueue_no_cma(zone, order, migratetype);
                                if (page)
                                        break;
                                goto failed;
+                       } else if ((migratetype == MIGRATE_MOVABLE) &&
+                           (get_pcppage_migratetype(page) != MIGRATE_CMA) &&
+                           cma_suitable(gfp_flags)) {
+                               struct page *tmp_page;
+
+                               spin_lock(&zone->lock);
+                               tmp_page = __rmqueue_cma_fallback(zone, order);
+                               /* can't alloc cma pages or not ready */
+                               if (!tmp_page || check_new_pcp(page)) {
+                                       spin_unlock(&zone->lock);
+                                       goto use_pcp;
+                               }
+                               page = tmp_page;
+                               __mod_zone_freepage_state(zone, -(1 << order),
+                                       get_pcppage_migratetype(page));
+                               spin_unlock(&zone->lock);
+                               goto alloc_success;
                        }
-               #endif /* CONFIG_AMLOGIC_MODIFY */
+use_pcp:
+#endif /* CONFIG_AMLOGIC_MODIFY */
 
                        list_del(&page->lru);
                        pcp->count--;
@@ -2699,6 +2790,9 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
                                          get_pcppage_migratetype(page));
        }
 
+#ifdef CONFIG_AMLOGIC_MODIFY
+alloc_success:
+#endif /* CONFIG_AMLOGIC_MODIFY */
        __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
        zone_statistics(preferred_zone, zone, gfp_flags);
        local_irq_restore(flags);
@@ -4344,6 +4438,9 @@ void show_free_areas(unsigned int filter)
                " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
                " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
                " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
+       #ifdef CONFIG_AMLOGIC_MODIFY
+               " driver_cma:%lu"
+       #endif /* CONFIG_AMLOGIC_MODIFY */
                " free:%lu free_pcp:%lu free_cma:%lu\n",
                global_node_page_state(NR_ACTIVE_ANON),
                global_node_page_state(NR_INACTIVE_ANON),
@@ -4361,6 +4458,9 @@ void show_free_areas(unsigned int filter)
                global_node_page_state(NR_SHMEM),
                global_page_state(NR_PAGETABLE),
                global_page_state(NR_BOUNCE),
+       #ifdef CONFIG_AMLOGIC_MODIFY
+               get_driver_alloc_cma(),
+       #endif /* CONFIG_AMLOGIC_MODIFY */
                global_page_state(NR_FREE_PAGES),
                free_pcp,
                global_page_state(NR_FREE_CMA_PAGES));
@@ -4445,6 +4545,15 @@ void show_free_areas(unsigned int filter)
                        " bounce:%lukB"
                        " free_pcp:%lukB"
                        " local_pcp:%ukB"
+               #ifdef CONFIG_AMLOGIC_MODIFY
+                       " free_unmovable:%lukB"
+                       " free_movable:%lukB"
+                       " free_reclaimable:%lukB"
+                       " free_highatomic:%lukB"
+               #ifdef CONFIG_MEMORY_ISOLATION
+                       " free_isolate:%lukB"
+               #endif
+               #endif /* CONFIG_AMLOGIC_MODIFY */
                        " free_cma:%lukB"
                        "\n",
                        zone->name,
@@ -4468,6 +4577,15 @@ void show_free_areas(unsigned int filter)
                        K(zone_page_state(zone, NR_BOUNCE)),
                        K(free_pcp),
                        K(this_cpu_read(zone->pageset->pcp.count)),
+               #ifdef CONFIG_AMLOGIC_MODIFY
+                       K(zone_page_state(zone, NR_FREE_UNMOVABLE)),
+                       K(zone_page_state(zone, NR_FREE_MOVABLE)),
+                       K(zone_page_state(zone, NR_FREE_RECLAIMABLE)),
+                       K(zone_page_state(zone, NR_FREE_HIGHATOMIC)),
+               #ifdef CONFIG_MEMORY_ISOLATION
+                       K(zone_page_state(zone, NR_FREE_ISOLATE)),
+               #endif
+               #endif /* CONFIG_AMLOGIC_MODIFY */
                        K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
                printk("lowmem_reserve[]:");
                for (i = 0; i < MAX_NR_ZONES; i++)
@@ -7499,6 +7617,10 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
                list_del(&page->lru);
                rmv_page_order(page);
                zone->free_area[order].nr_free--;
+       #ifdef CONFIG_AMLOGIC_MODIFY
+               __mod_zone_migrate_state(zone, -(1 << order),
+                                        get_pcppage_migratetype(page));
+       #endif /* CONFIG_AMLOGIC_MODIFY */
                for (i = 0; i < (1 << order); i++)
                        SetPageReserved((page+i));
                pfn += (1 << order);
index 5d2f24f..3852861 100644 (file)
@@ -1119,7 +1119,13 @@ static void print_slabinfo_header(struct seq_file *m)
 #else
        seq_puts(m, "slabinfo - version: 2.1\n");
 #endif
+#ifdef CONFIG_AMLOGIC_MODIFY
+       /* add total bytes for each slab */
+       seq_puts(m, "# name                        <active_objs> <num_objs> ");
+       seq_puts(m, "<objsize> <objperslab> <pagesperslab> <total bytes>");
+#else
        seq_puts(m, "# name            <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
+#endif /* CONFIG_AMLOGIC_MODIFY */
        seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
        seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
 #ifdef CONFIG_DEBUG_SLAB
@@ -1169,15 +1175,28 @@ memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
 static void cache_show(struct kmem_cache *s, struct seq_file *m)
 {
        struct slabinfo sinfo;
+#ifdef CONFIG_AMLOGIC_MODIFY
+       char name[32];
+       long total;
+#endif
 
        memset(&sinfo, 0, sizeof(sinfo));
        get_slabinfo(s, &sinfo);
 
        memcg_accumulate_slabinfo(s, &sinfo);
 
+#ifdef CONFIG_AMLOGIC_MODIFY
+       strncpy(name, cache_name(s), 31);
+       total = sinfo.num_objs * s->size;
+       seq_printf(m, "%-31s %6lu %6lu %6u %4u %4d %8lu",
+                  name, sinfo.active_objs, sinfo.num_objs, s->size,
+                  sinfo.objects_per_slab, (1 << sinfo.cache_order),
+                  total);
+#else
        seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
                   cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
                   sinfo.objects_per_slab, (1 << sinfo.cache_order));
+#endif /* CONFIG_AMLOGIC_MODIFY */
 
        seq_printf(m, " : tunables %4u %4u %4u",
                   sinfo.limit, sinfo.batchcount, sinfo.shared);
index 604f26a..90738d1 100644 (file)
@@ -1164,12 +1164,18 @@ static void pagetypeinfo_showfree_print(struct seq_file *m,
                                        pg_data_t *pgdat, struct zone *zone)
 {
        int order, mtype;
+#ifdef CONFIG_AMLOGIC_MODIFY
+       unsigned long total;
+#endif /* CONFIG_AMLOGIC_MODIFY */
 
        for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
                seq_printf(m, "Node %4d, zone %8s, type %12s ",
                                        pgdat->node_id,
                                        zone->name,
                                        migratetype_names[mtype]);
+       #ifdef CONFIG_AMLOGIC_MODIFY
+               total = 0;
+       #endif /* CONFIG_AMLOGIC_MODIFY */
                for (order = 0; order < MAX_ORDER; ++order) {
                        unsigned long freecount = 0;
                        struct free_area *area;
@@ -1180,7 +1186,14 @@ static void pagetypeinfo_showfree_print(struct seq_file *m,
                        list_for_each(curr, &area->free_list[mtype])
                                freecount++;
                        seq_printf(m, "%6lu ", freecount);
+               #ifdef CONFIG_AMLOGIC_MODIFY
+                       total += (freecount << order);
+               #endif /* CONFIG_AMLOGIC_MODIFY */
                }
+       #ifdef CONFIG_AMLOGIC_MODIFY
+               /* show total size for each migrate type*/
+               seq_printf(m, " %6lu", total);
+       #endif /* CONFIG_AMLOGIC_MODIFY */
                seq_putc(m, '\n');
        }
 }
@@ -1195,6 +1208,9 @@ static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
        seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
        for (order = 0; order < MAX_ORDER; ++order)
                seq_printf(m, "%6d ", order);
+#ifdef CONFIG_AMLOGIC_MODIFY
+       seq_printf(m, "%s", "  total");
+#endif /* CONFIG_AMLOGIC_MODIFY */
        seq_putc(m, '\n');
 
        walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);