memblock: ensure there is no overflow in memblock_overlaps_region()
[platform/kernel/linux-rpi.git] / mm / slub.c
index 34dcc09..1384dc9 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/bit_spinlock.h>
 #include <linux/interrupt.h>
+#include <linux/swab.h>
 #include <linux/bitops.h>
 #include <linux/slab.h>
 #include "slab.h"
@@ -698,15 +699,15 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
               p, p - addr, get_freepointer(s, p));
 
        if (s->flags & SLAB_RED_ZONE)
-               print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
+               print_section(KERN_ERR, "Redzone  ", p - s->red_left_pad,
                              s->red_left_pad);
        else if (p > addr + 16)
                print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
 
-       print_section(KERN_ERR, "Object ", p,
+       print_section(KERN_ERR,         "Object   ", p,
                      min_t(unsigned int, s->object_size, PAGE_SIZE));
        if (s->flags & SLAB_RED_ZONE)
-               print_section(KERN_ERR, "Redzone ", p + s->object_size,
+               print_section(KERN_ERR, "Redzone  ", p + s->object_size,
                        s->inuse - s->object_size);
 
        off = get_info_end(s);
@@ -718,7 +719,7 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
 
        if (off != size_from_object(s))
                /* Beginning of the filler is the free pointer */
-               print_section(KERN_ERR, "Padding ", p + off,
+               print_section(KERN_ERR, "Padding  ", p + off,
                              size_from_object(s) - off);
 
        dump_stack();
@@ -895,11 +896,11 @@ static int check_object(struct kmem_cache *s, struct page *page,
        u8 *endobject = object + s->object_size;
 
        if (s->flags & SLAB_RED_ZONE) {
-               if (!check_bytes_and_report(s, page, object, "Redzone",
+               if (!check_bytes_and_report(s, page, object, "Left Redzone",
                        object - s->red_left_pad, val, s->red_left_pad))
                        return 0;
 
-               if (!check_bytes_and_report(s, page, object, "Redzone",
+               if (!check_bytes_and_report(s, page, object, "Right Redzone",
                        endobject, val, s->inuse - s->object_size))
                        return 0;
        } else {
@@ -914,7 +915,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
                if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
                        (!check_bytes_and_report(s, page, p, "Poison", p,
                                        POISON_FREE, s->object_size - 1) ||
-                        !check_bytes_and_report(s, page, p, "Poison",
+                        !check_bytes_and_report(s, page, p, "End Poison",
                                p + s->object_size - 1, POISON_END, 1)))
                        return 0;
                /*
@@ -1397,7 +1398,6 @@ __setup("slub_debug", setup_slub_debug);
  * @object_size:       the size of an object without meta data
  * @flags:             flags to set
  * @name:              name of the cache
- * @ctor:              constructor function
  *
  * Debug option(s) are applied to @flags. In addition to the debug
  * option(s), if a slab name (or multiple) is specified i.e.
@@ -1405,8 +1405,7 @@ __setup("slub_debug", setup_slub_debug);
  * then only the select slabs will receive the debug option(s).
  */
 slab_flags_t kmem_cache_flags(unsigned int object_size,
-       slab_flags_t flags, const char *name,
-       void (*ctor)(void *))
+       slab_flags_t flags, const char *name)
 {
        char *iter;
        size_t len;
@@ -1471,8 +1470,7 @@ static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
                                        struct page *page) {}
 slab_flags_t kmem_cache_flags(unsigned int object_size,
-       slab_flags_t flags, const char *name,
-       void (*ctor)(void *))
+       slab_flags_t flags, const char *name)
 {
        return flags;
 }
@@ -1545,7 +1543,8 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
 }
 
 static inline bool slab_free_freelist_hook(struct kmem_cache *s,
-                                          void **head, void **tail)
+                                          void **head, void **tail,
+                                          int *cnt)
 {
 
        void *object;
@@ -1580,6 +1579,12 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
                        *head = object;
                        if (!*tail)
                                *tail = object;
+               } else {
+                       /*
+                        * Adjust the reconstructed freelist depth
+                        * accordingly if object's reuse is delayed.
+                        */
+                       --(*cnt);
                }
        } while (object != old_tail);
 
@@ -3095,7 +3100,9 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
        struct kmem_cache_cpu *c;
        unsigned long tid;
 
-       memcg_slab_free_hook(s, &head, 1);
+       /* memcg_slab_free_hook() is already called for bulk free. */
+       if (!tail)
+               memcg_slab_free_hook(s, &head, 1);
 redo:
        /*
         * Determine the currently cpus per cpu slab.
@@ -3139,7 +3146,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
         * With KASAN enabled slab_free_freelist_hook modifies the freelist
         * to remove objects, whose reuse must be delayed.
         */
-       if (slab_free_freelist_hook(s, &head, &tail))
+       if (slab_free_freelist_hook(s, &head, &tail, &cnt))
                do_slab_free(s, page, head, tail, cnt, addr);
 }
 
@@ -3642,7 +3649,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
 {
        slab_flags_t flags = s->flags;
        unsigned int size = s->object_size;
-       unsigned int freepointer_area;
        unsigned int order;
 
        /*
@@ -3651,13 +3657,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
         * the possible location of the free pointer.
         */
        size = ALIGN(size, sizeof(void *));
-       /*
-        * This is the area of the object where a freepointer can be
-        * safely written. If redzoning adds more to the inuse size, we
-        * can't use that portion for writing the freepointer, so
-        * s->offset must be limited within this for the general case.
-        */
-       freepointer_area = size;
 
 #ifdef CONFIG_SLUB_DEBUG
        /*
@@ -3683,19 +3682,21 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
 
        /*
         * With that we have determined the number of bytes in actual use
-        * by the object. This is the potential offset to the free pointer.
+        * by the object and redzoning.
         */
        s->inuse = size;
 
-       if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
-               s->ctor)) {
+       if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
+           ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
+           s->ctor) {
                /*
                 * Relocate free pointer after the object if it is not
                 * permitted to overwrite the first word of the object on
                 * kmem_cache_free.
                 *
                 * This is the case if we do RCU, have a constructor or
-                * destructor or are poisoning the objects.
+                * destructor, are poisoning the objects, or are
+                * redzoning an object smaller than sizeof(void *).
                 *
                 * The assumption that s->offset >= s->inuse means free
                 * pointer is outside of the object is used in the
@@ -3704,13 +3705,13 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
                 */
                s->offset = size;
                size += sizeof(void *);
-       } else if (freepointer_area > sizeof(void *)) {
+       } else {
                /*
                 * Store freelist pointer near middle of object to keep
                 * it away from the edges of the object to avoid small
                 * sized over/underflows from neighboring allocations.
                 */
-               s->offset = ALIGN(freepointer_area / 2, sizeof(void *));
+               s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
        }
 
 #ifdef CONFIG_SLUB_DEBUG
@@ -3782,7 +3783,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
 
 static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
 {
-       s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
+       s->flags = kmem_cache_flags(s->size, flags, s->name);
 #ifdef CONFIG_SLAB_FREELIST_HARDENED
        s->random = get_random_long();
 #endif
@@ -3833,8 +3834,8 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
        if (alloc_kmem_cache_cpus(s))
                return 0;
 
-       free_kmem_cache_nodes(s);
 error:
+       __kmem_cache_release(s);
        return -EINVAL;
 }
 
@@ -3984,8 +3985,8 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
        page = alloc_pages_node(node, flags, order);
        if (page) {
                ptr = page_address(page);
-               mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
-                                   PAGE_SIZE << order);
+               mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
+                                     PAGE_SIZE << order);
        }
 
        return kmalloc_large_node_hook(ptr, size, flags);
@@ -4116,8 +4117,8 @@ void kfree(const void *x)
 
                BUG_ON(!PageCompound(page));
                kfree_hook(object);
-               mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
-                                   -(PAGE_SIZE << order));
+               mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
+                                     -(PAGE_SIZE << order));
                __free_pages(page, order);
                return;
        }
@@ -5620,10 +5621,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
 
        s->kobj.kset = kset;
        err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
-       if (err) {
-               kobject_put(&s->kobj);
+       if (err)
                goto out;
-       }
 
        err = sysfs_create_group(&s->kobj, &slab_attr_group);
        if (err)