Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penber...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 1 Jun 2012 23:50:23 +0000 (16:50 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 1 Jun 2012 23:50:23 +0000 (16:50 -0700)
Pull slab updates from Pekka Enberg:
 "Mainly a bunch of SLUB fixes from Joonsoo Kim"

* 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux:
  slub: use __SetPageSlab function to set PG_slab flag
  slub: fix a memory leak in get_partial_node()
  slub: remove unused argument of init_kmem_cache_node()
  slub: fix a possible memory leak
  Documentations: Fix slabinfo.c directory in vm/slub.txt
  slub: fix incorrect return type of get_any_partial()

1  2 
mm/slub.c

diff --combined mm/slub.c
+++ b/mm/slub.c
@@@ -1369,7 -1369,7 +1369,7 @@@ static struct page *new_slab(struct kme
  
        inc_slabs_node(s, page_to_nid(page), page->objects);
        page->slab = s;
-       page->flags |= 1 << PG_slab;
+       __SetPageSlab(page);
  
        start = page_address(page);
  
@@@ -1514,15 -1514,19 +1514,19 @@@ static inline void *acquire_slab(struc
                freelist = page->freelist;
                counters = page->counters;
                new.counters = counters;
-               if (mode)
+               if (mode) {
                        new.inuse = page->objects;
+                       new.freelist = NULL;
+               } else {
+                       new.freelist = freelist;
+               }
  
                VM_BUG_ON(new.frozen);
                new.frozen = 1;
  
        } while (!__cmpxchg_double_slab(s, page,
                        freelist, counters,
-                       NULL, new.counters,
+                       new.freelist, new.counters,
                        "lock and freeze"));
  
        remove_partial(n, page);
@@@ -1564,7 -1568,6 +1568,6 @@@ static void *get_partial_node(struct km
                        object = t;
                        available =  page->objects - page->inuse;
                } else {
-                       page->freelist = t;
                        available = put_cpu_partial(s, page, 0);
                        stat(s, CPU_PARTIAL_NODE);
                }
  /*
   * Get a page from somewhere. Search in increasing NUMA distances.
   */
- static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
+ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
                struct kmem_cache_cpu *c)
  {
  #ifdef CONFIG_NUMA
@@@ -2040,7 -2043,7 +2043,7 @@@ static bool has_cpu_slab(int cpu, void 
        struct kmem_cache *s = info;
        struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
  
 -      return !!(c->page);
 +      return c->page || c->partial;
  }
  
  static void flush_all(struct kmem_cache *s)
@@@ -2766,7 -2769,7 +2769,7 @@@ static unsigned long calculate_alignmen
  }
  
  static void
- init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
+ init_kmem_cache_node(struct kmem_cache_node *n)
  {
        n->nr_partial = 0;
        spin_lock_init(&n->list_lock);
@@@ -2836,7 -2839,7 +2839,7 @@@ static void early_kmem_cache_node_alloc
        init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
        init_tracking(kmem_cache_node, n);
  #endif
-       init_kmem_cache_node(n, kmem_cache_node);
+       init_kmem_cache_node(n);
        inc_slabs_node(kmem_cache_node, node, page->objects);
  
        add_partial(n, page, DEACTIVATE_TO_HEAD);
@@@ -2876,7 -2879,7 +2879,7 @@@ static int init_kmem_cache_nodes(struc
                }
  
                s->node[node] = n;
-               init_kmem_cache_node(n, s);
+               init_kmem_cache_node(n);
        }
        return 1;
  }
@@@ -3625,7 -3628,7 +3628,7 @@@ static int slab_mem_going_online_callba
                        ret = -ENOMEM;
                        goto out;
                }
-               init_kmem_cache_node(n, s);
+               init_kmem_cache_node(n);
                s->node[nid] = n;
        }
  out:
@@@ -3968,9 -3971,9 +3971,9 @@@ struct kmem_cache *kmem_cache_create(co
                        }
                        return s;
                }
-               kfree(n);
                kfree(s);
        }
+       kfree(n);
  err:
        up_write(&slub_lock);