mm: save wasted memory by slab [1/1]
authortao zeng <tao.zeng@amlogic.com>
Wed, 14 Nov 2018 02:29:45 +0000 (10:29 +0800)
committerJianxin Pan <jianxin.pan@amlogic.com>
Tue, 18 Dec 2018 07:10:21 +0000 (23:10 -0800)
PD#SWPL-1767

Problem:
When driver/kernel call kmalloc with large size, memory may waste
if size is not equal to 2^n. For example, driver call kmalloc with
size 129KB, kmalloc will allocate a 256KB memory block to caller.
Then 127kb memory will be wasted if this caller don't free it.

Solution:
Free tail of slab memory if size is not match to 2^n. This change
can save about 900KB memory after boot, and more than 100KB during
run time.

Verify:
P212

Change-Id: Iba378792ec30003358b64384361c0f0c4c2800d8
Signed-off-by: tao zeng <tao.zeng@amlogic.com>
drivers/amlogic/memory_ext/aml_cma.c
drivers/amlogic/memory_ext/page_trace.c
include/linux/amlogic/page_trace.h
mm/page_alloc.c
mm/slab_common.c
mm/slub.c

index 5554619..450af9a 100644 (file)
@@ -152,15 +152,17 @@ EXPORT_SYMBOL(cma_page);
 static void update_cma_page_trace(struct page *page, unsigned long cnt)
 {
        long i;
+       unsigned long fun;
 
        if (page == NULL)
                return;
 
+       fun = find_back_trace();
        if (cma_alloc_trace)
                pr_info("%s alloc page:%lx, count:%ld, func:%pf\n", __func__,
-                       page_to_pfn(page), cnt, (void *)find_back_trace());
+                       page_to_pfn(page), cnt, (void *)fun);
        for (i = 0; i < cnt; i++) {
-               set_page_trace(page, 0, __GFP_BDEV);
+               set_page_trace(page, 0, __GFP_BDEV, (void *)fun);
                page++;
        }
 }
index 7af3ac0..1c2f8ba 100644 (file)
@@ -38,7 +38,7 @@
 #define DEBUG_PAGE_TRACE       0
 #endif
 
-#define COMMON_CALLER_SIZE     24
+#define COMMON_CALLER_SIZE     32
 
 /*
  * this is a driver which will hook during page alloc/free and
@@ -83,8 +83,11 @@ static struct fun_symbol common_func[] __initdata = {
        {"dma_alloc_from_contiguous",   1},
        {"aml_cma_alloc_post_hook",     1},
        {"__dma_alloc",                 1},
+       {"arm_dma_alloc",               1},
        {"__kmalloc_track_caller",      1},
        {"kmem_cache_alloc_trace",      1},
+       {"__alloc_from_contiguous",     1},
+       {"cma_allocator_alloc",         1},
        {"alloc_pages_exact",           1},
        {"get_zeroed_page",             1},
        {"__vmalloc_node_range",        1},
@@ -581,7 +584,7 @@ unsigned int pack_ip(unsigned long ip, int order, gfp_t flag)
 }
 EXPORT_SYMBOL(pack_ip);
 
-void set_page_trace(struct page *page, int order, gfp_t flag)
+void set_page_trace(struct page *page, int order, gfp_t flag, void *func)
 {
        unsigned long ip;
        struct page_trace *base;
@@ -592,7 +595,10 @@ void set_page_trace(struct page *page, int order, gfp_t flag)
 #else
        if (page && trace_buffer) {
 #endif
-               ip = find_back_trace();
+               if (!func)
+                       ip = find_back_trace();
+               else
+                       ip = (unsigned long)func;
                if (!ip) {
                        pr_debug("can't find backtrace for page:%lx\n",
                                page_to_pfn(page));
index b3e93f8..7d72430 100644 (file)
@@ -66,7 +66,8 @@ struct page_trace {
 extern unsigned int cma_alloc_trace;
 extern unsigned long unpack_ip(struct page_trace *trace);
 extern unsigned int pack_ip(unsigned long ip, int order, gfp_t flag);
-extern void set_page_trace(struct page *page, int order, gfp_t gfp_flags);
+extern void set_page_trace(struct page *page, int order,
+                          gfp_t gfp_flags, void *func);
 extern void reset_page_trace(struct page *page, int order);
 extern void page_trace_mem_init(void);
 extern struct page_trace *find_page_base(struct page *page);
index a361ce2..d30cb0c 100644 (file)
@@ -4095,7 +4095,7 @@ out:
 
        trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
 #ifdef CONFIG_AMLOGIC_PAGE_TRACE
-       set_page_trace(page, order, gfp_mask);
+       set_page_trace(page, order, gfp_mask, NULL);
 #endif /* CONFIG_AMLOGIC_PAGE_TRACE */
 
        return page;
index b9cace0..07b8fc8 100644 (file)
@@ -1029,6 +1029,59 @@ void __init create_kmalloc_caches(unsigned long flags)
 }
 #endif /* !CONFIG_SLOB */
 
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
+#ifdef CONFIG_AMLOGIC_PAGE_TRACE
+#include <linux/amlogic/page_trace.h>
+#endif
+
+static inline void *aml_slub_alloc_large(size_t size, gfp_t flags, int order)
+{
+       struct page *page, *p;
+
+       flags &= ~__GFP_COMP;
+       page = alloc_pages(flags, order);
+       if (page) {
+               unsigned long used_pages = PAGE_ALIGN(size) / PAGE_SIZE;
+               unsigned long total_pages = 1 << order;
+               unsigned long saved = 0;
+       #ifdef CONFIG_AMLOGIC_PAGE_TRACE
+               unsigned long fun;
+       #endif
+               int i;
+
+               /* record how many pages in first page*/
+               __SetPageHead(page);
+               SetPageOwnerPriv1(page);        /* special flag */
+
+       #ifdef CONFIG_AMLOGIC_PAGE_TRACE
+               fun = get_page_trace(page);
+       #endif
+
+               for (i = 1; i < used_pages; i++) {
+                       p = page + i;
+                       set_compound_head(p, page);
+               #ifdef CONFIG_AMLOGIC_PAGE_TRACE
+                       set_page_trace(page, 0, flags, (void *)fun);
+               #endif
+               }
+               page->index = used_pages;
+               split_page(page, order);
+               p = page + used_pages;
+               while (used_pages < total_pages) {
+                       __free_pages(p, 0);
+                       used_pages++;
+                       p++;
+                       saved++;
+               }
+               pr_debug("%s, page:%p, all:%5ld, size:%5ld, save:%5ld, f:%pf\n",
+                       __func__, page_address(page), total_pages * PAGE_SIZE,
+                       (long)size, saved * PAGE_SIZE, (void *)fun);
+               return page;
+       } else
+               return NULL;
+}
+#endif
+
 /*
  * To avoid unnecessary overhead, we pass through large allocation requests
  * directly to the page allocator. We use __GFP_COMP, because we will need to
@@ -1040,7 +1093,14 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
        struct page *page;
 
        flags |= __GFP_COMP;
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
+       if (size < (PAGE_SIZE * (1 << order)))
+               page = aml_slub_alloc_large(size, flags, order);
+       else
+               page = alloc_pages(flags, order);
+#else
        page = alloc_pages(flags, order);
+#endif
        ret = page ? page_address(page) : NULL;
        kmemleak_alloc(ret, size, 1, flags);
        kasan_kmalloc_large(ret, size, flags);
index edc79ca..71ae980 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3721,6 +3721,25 @@ static int __init setup_slub_min_objects(char *str)
 
 __setup("slub_min_objects=", setup_slub_min_objects);
 
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
+static void aml_slub_free_large(struct page *page, const void *obj)
+{
+       unsigned int nr_pages, i;
+
+       if (page) {
+               __ClearPageHead(page);
+               ClearPageOwnerPriv1(page);
+               nr_pages = page->index;
+               pr_debug("%s, page:%p, pages:%d, obj:%p\n",
+                       __func__, page_address(page), nr_pages, obj);
+               for (i = 0; i < nr_pages; i++)  {
+                       __free_pages(page, 0);
+                       page++;
+               }
+       }
+}
+#endif
+
 void *__kmalloc(size_t size, gfp_t flags)
 {
        struct kmem_cache *s;
@@ -3841,7 +3860,17 @@ static size_t __ksize(const void *object)
 
        if (unlikely(!PageSlab(page))) {
                WARN_ON(!PageCompound(page));
+       #ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
+               if (unlikely(PageOwnerPriv1(page))) {
+                       pr_debug("%s, obj:%p, page:%p, index:%ld, size:%ld\n",
+                               __func__, object, page_address(page),
+                               page->index, PAGE_SIZE * page->index);
+                       return PAGE_SIZE * page->index;
+               } else
+                       return PAGE_SIZE << compound_order(page);
+       #else
                return PAGE_SIZE << compound_order(page);
+       #endif
        }
 
        return slab_ksize(page->slab_cache);
@@ -3872,8 +3901,16 @@ void kfree(const void *x)
        if (unlikely(!PageSlab(page))) {
                BUG_ON(!PageCompound(page));
                kfree_hook(x);
+       #ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
+               if (unlikely(PageOwnerPriv1(page)))
+                       aml_slub_free_large(page, x);
+               else
+                       __free_pages(page, compound_order(page));
+               return;
+       #else
                __free_pages(page, compound_order(page));
                return;
+       #endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
        }
        slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
 }