Merge branch 'slab/next' into slab/for-linus
authorPekka Enberg <penberg@kernel.org>
Tue, 7 May 2013 06:19:47 +0000 (09:19 +0300)
committerPekka Enberg <penberg@kernel.org>
Tue, 7 May 2013 06:19:47 +0000 (09:19 +0300)
1  2 
mm/slab.c
mm/slub.c

diff --combined mm/slab.c
+++ b/mm/slab.c
@@@ -286,68 -286,27 +286,27 @@@ struct arraycache_init 
  };
  
  /*
-  * The slab lists for all objects.
-  */
- struct kmem_list3 {
-       struct list_head slabs_partial; /* partial list first, better asm code */
-       struct list_head slabs_full;
-       struct list_head slabs_free;
-       unsigned long free_objects;
-       unsigned int free_limit;
-       unsigned int colour_next;       /* Per-node cache coloring */
-       spinlock_t list_lock;
-       struct array_cache *shared;     /* shared per node */
-       struct array_cache **alien;     /* on other nodes */
-       unsigned long next_reap;        /* updated without locking */
-       int free_touched;               /* updated without locking */
- };
- /*
   * Need this for bootstrapping a per node allocator.
   */
  #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
- static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
+ static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
  #define       CACHE_CACHE 0
  #define       SIZE_AC MAX_NUMNODES
- #define       SIZE_L3 (2 * MAX_NUMNODES)
+ #define       SIZE_NODE (2 * MAX_NUMNODES)
  
  static int drain_freelist(struct kmem_cache *cache,
-                       struct kmem_list3 *l3, int tofree);
+                       struct kmem_cache_node *n, int tofree);
  static void free_block(struct kmem_cache *cachep, void **objpp, int len,
                        int node);
  static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
  static void cache_reap(struct work_struct *unused);
  
- /*
-  * This function must be completely optimized away if a constant is passed to
-  * it.  Mostly the same as what is in linux/slab.h except it returns an index.
-  */
- static __always_inline int index_of(const size_t size)
- {
-       extern void __bad_size(void);
-       if (__builtin_constant_p(size)) {
-               int i = 0;
- #define CACHE(x) \
-       if (size <=x) \
-               return i; \
-       else \
-               i++;
- #include <linux/kmalloc_sizes.h>
- #undef CACHE
-               __bad_size();
-       } else
-               __bad_size();
-       return 0;
- }
  static int slab_early_init = 1;
  
- #define INDEX_AC index_of(sizeof(struct arraycache_init))
- #define INDEX_L3 index_of(sizeof(struct kmem_list3))
+ #define INDEX_AC kmalloc_index(sizeof(struct arraycache_init))
+ #define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
  
- static void kmem_list3_init(struct kmem_list3 *parent)
+ static void kmem_cache_node_init(struct kmem_cache_node *parent)
  {
        INIT_LIST_HEAD(&parent->slabs_full);
        INIT_LIST_HEAD(&parent->slabs_partial);
  #define MAKE_LIST(cachep, listp, slab, nodeid)                                \
        do {                                                            \
                INIT_LIST_HEAD(listp);                                  \
-               list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
+               list_splice(&(cachep->node[nodeid]->slab), listp);      \
        } while (0)
  
  #define       MAKE_ALL_LISTS(cachep, ptr, nodeid)                             \
@@@ -524,30 -483,6 +483,6 @@@ static inline unsigned int obj_to_index
        return reciprocal_divide(offset, cache->reciprocal_buffer_size);
  }
  
- /*
-  * These are the default caches for kmalloc. Custom caches can have other sizes.
-  */
- struct cache_sizes malloc_sizes[] = {
- #define CACHE(x) { .cs_size = (x) },
- #include <linux/kmalloc_sizes.h>
-       CACHE(ULONG_MAX)
- #undef CACHE
- };
- EXPORT_SYMBOL(malloc_sizes);
- /* Must match cache_sizes above. Out of line to keep cache footprint low. */
- struct cache_names {
-       char *name;
-       char *name_dma;
- };
- static struct cache_names __initdata cache_names[] = {
- #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
- #include <linux/kmalloc_sizes.h>
-       {NULL,}
- #undef CACHE
- };
  static struct arraycache_init initarray_generic =
      { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
  
@@@ -586,15 -521,15 +521,15 @@@ static void slab_set_lock_classes(struc
                int q)
  {
        struct array_cache **alc;
-       struct kmem_list3 *l3;
+       struct kmem_cache_node *n;
        int r;
  
-       l3 = cachep->nodelists[q];
-       if (!l3)
+       n = cachep->node[q];
+       if (!n)
                return;
  
-       lockdep_set_class(&l3->list_lock, l3_key);
-       alc = l3->alien;
+       lockdep_set_class(&n->list_lock, l3_key);
+       alc = n->alien;
        /*
         * FIXME: This check for BAD_ALIEN_MAGIC
         * should go away when common slab code is taught to
@@@ -625,28 -560,30 +560,30 @@@ static void slab_set_debugobj_lock_clas
  
  static void init_node_lock_keys(int q)
  {
-       struct cache_sizes *s = malloc_sizes;
+       int i;
  
        if (slab_state < UP)
                return;
  
-       for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
-               struct kmem_list3 *l3;
+       for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) {
+               struct kmem_cache_node *n;
+               struct kmem_cache *cache = kmalloc_caches[i];
+               if (!cache)
+                       continue;
  
-               l3 = s->cs_cachep->nodelists[q];
-               if (!l3 || OFF_SLAB(s->cs_cachep))
+               n = cache->node[q];
+               if (!n || OFF_SLAB(cache))
                        continue;
  
-               slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key,
+               slab_set_lock_classes(cache, &on_slab_l3_key,
                                &on_slab_alc_key, q);
        }
  }
  
  static void on_slab_lock_classes_node(struct kmem_cache *cachep, int q)
  {
-       struct kmem_list3 *l3;
-       l3 = cachep->nodelists[q];
-       if (!l3)
+       if (!cachep->node[q])
                return;
  
        slab_set_lock_classes(cachep, &on_slab_l3_key,
@@@ -702,41 -639,6 +639,6 @@@ static inline struct array_cache *cpu_c
        return cachep->array[smp_processor_id()];
  }
  
- static inline struct kmem_cache *__find_general_cachep(size_t size,
-                                                       gfp_t gfpflags)
- {
-       struct cache_sizes *csizep = malloc_sizes;
- #if DEBUG
-       /* This happens if someone tries to call
-        * kmem_cache_create(), or __kmalloc(), before
-        * the generic caches are initialized.
-        */
-       BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
- #endif
-       if (!size)
-               return ZERO_SIZE_PTR;
-       while (size > csizep->cs_size)
-               csizep++;
-       /*
-        * Really subtle: The last entry with cs->cs_size==ULONG_MAX
-        * has cs_{dma,}cachep==NULL. Thus no special case
-        * for large kmalloc calls required.
-        */
- #ifdef CONFIG_ZONE_DMA
-       if (unlikely(gfpflags & GFP_DMA))
-               return csizep->cs_dmacachep;
- #endif
-       return csizep->cs_cachep;
- }
- static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
- {
-       return __find_general_cachep(size, gfpflags);
- }
  static size_t slab_mgmt_size(size_t nr_objs, size_t align)
  {
        return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
@@@ -812,7 -714,7 +714,7 @@@ static void __slab_error(const char *fu
        printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
               function, cachep->name, msg);
        dump_stack();
 -      add_taint(TAINT_BAD_PAGE);
 +      add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
  }
  #endif
  
@@@ -938,29 -840,29 +840,29 @@@ static inline bool is_slab_pfmemalloc(s
  static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
                                                struct array_cache *ac)
  {
-       struct kmem_list3 *l3 = cachep->nodelists[numa_mem_id()];
+       struct kmem_cache_node *n = cachep->node[numa_mem_id()];
        struct slab *slabp;
        unsigned long flags;
  
        if (!pfmemalloc_active)
                return;
  
-       spin_lock_irqsave(&l3->list_lock, flags);
-       list_for_each_entry(slabp, &l3->slabs_full, list)
+       spin_lock_irqsave(&n->list_lock, flags);
+       list_for_each_entry(slabp, &n->slabs_full, list)
                if (is_slab_pfmemalloc(slabp))
                        goto out;
  
-       list_for_each_entry(slabp, &l3->slabs_partial, list)
+       list_for_each_entry(slabp, &n->slabs_partial, list)
                if (is_slab_pfmemalloc(slabp))
                        goto out;
  
-       list_for_each_entry(slabp, &l3->slabs_free, list)
+       list_for_each_entry(slabp, &n->slabs_free, list)
                if (is_slab_pfmemalloc(slabp))
                        goto out;
  
        pfmemalloc_active = false;
  out:
-       spin_unlock_irqrestore(&l3->list_lock, flags);
+       spin_unlock_irqrestore(&n->list_lock, flags);
  }
  
  static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
  
        /* Ensure the caller is allowed to use objects from PFMEMALLOC slab */
        if (unlikely(is_obj_pfmemalloc(objp))) {
-               struct kmem_list3 *l3;
+               struct kmem_cache_node *n;
  
                if (gfp_pfmemalloc_allowed(flags)) {
                        clear_obj_pfmemalloc(&objp);
                 * If there are empty slabs on the slabs_free list and we are
                 * being forced to refill the cache, mark this one !pfmemalloc.
                 */
-               l3 = cachep->nodelists[numa_mem_id()];
-               if (!list_empty(&l3->slabs_free) && force_refill) {
+               n = cachep->node[numa_mem_id()];
+               if (!list_empty(&n->slabs_free) && force_refill) {
                        struct slab *slabp = virt_to_slab(objp);
                        ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem));
                        clear_obj_pfmemalloc(&objp);
@@@ -1071,7 -973,7 +973,7 @@@ static int transfer_objects(struct arra
  #ifndef CONFIG_NUMA
  
  #define drain_alien_cache(cachep, alien) do { } while (0)
- #define reap_alien(cachep, l3) do { } while (0)
+ #define reap_alien(cachep, n) do { } while (0)
  
  static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
  {
@@@ -1143,33 -1045,33 +1045,33 @@@ static void free_alien_cache(struct arr
  static void __drain_alien_cache(struct kmem_cache *cachep,
                                struct array_cache *ac, int node)
  {
-       struct kmem_list3 *rl3 = cachep->nodelists[node];
+       struct kmem_cache_node *n = cachep->node[node];
  
        if (ac->avail) {
-               spin_lock(&rl3->list_lock);
+               spin_lock(&n->list_lock);
                /*
                 * Stuff objects into the remote nodes shared array first.
                 * That way we could avoid the overhead of putting the objects
                 * into the free lists and getting them back later.
                 */
-               if (rl3->shared)
-                       transfer_objects(rl3->shared, ac, ac->limit);
+               if (n->shared)
+                       transfer_objects(n->shared, ac, ac->limit);
  
                free_block(cachep, ac->entry, ac->avail, node);
                ac->avail = 0;
-               spin_unlock(&rl3->list_lock);
+               spin_unlock(&n->list_lock);
        }
  }
  
  /*
   * Called from cache_reap() to regularly drain alien caches round robin.
   */
- static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
+ static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
  {
        int node = __this_cpu_read(slab_reap_node);
  
-       if (l3->alien) {
-               struct array_cache *ac = l3->alien[node];
+       if (n->alien) {
+               struct array_cache *ac = n->alien[node];
  
                if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
                        __drain_alien_cache(cachep, ac, node);
@@@ -1199,7 -1101,7 +1101,7 @@@ static inline int cache_free_alien(stru
  {
        struct slab *slabp = virt_to_slab(objp);
        int nodeid = slabp->nodeid;
-       struct kmem_list3 *l3;
+       struct kmem_cache_node *n;
        struct array_cache *alien = NULL;
        int node;
  
        if (likely(slabp->nodeid == node))
                return 0;
  
-       l3 = cachep->nodelists[node];
+       n = cachep->node[node];
        STATS_INC_NODEFREES(cachep);
-       if (l3->alien && l3->alien[nodeid]) {
-               alien = l3->alien[nodeid];
+       if (n->alien && n->alien[nodeid]) {
+               alien = n->alien[nodeid];
                spin_lock(&alien->lock);
                if (unlikely(alien->avail == alien->limit)) {
                        STATS_INC_ACOVERFLOW(cachep);
                ac_put_obj(cachep, alien, objp);
                spin_unlock(&alien->lock);
        } else {
-               spin_lock(&(cachep->nodelists[nodeid])->list_lock);
+               spin_lock(&(cachep->node[nodeid])->list_lock);
                free_block(cachep, &objp, 1, nodeid);
-               spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
+               spin_unlock(&(cachep->node[nodeid])->list_lock);
        }
        return 1;
  }
  #endif
  
  /*
-  * Allocates and initializes nodelists for a node on each slab cache, used for
-  * either memory or cpu hotplug.  If memory is being hot-added, the kmem_list3
+  * Allocates and initializes node for a node on each slab cache, used for
+  * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
   * will be allocated off-node since memory is not yet online for the new node.
-  * When hotplugging memory or a cpu, existing nodelists are not replaced if
+  * When hotplugging memory or a cpu, existing node are not replaced if
   * already in use.
   *
   * Must hold slab_mutex.
   */
- static int init_cache_nodelists_node(int node)
+ static int init_cache_node_node(int node)
  {
        struct kmem_cache *cachep;
-       struct kmem_list3 *l3;
-       const int memsize = sizeof(struct kmem_list3);
+       struct kmem_cache_node *n;
+       const int memsize = sizeof(struct kmem_cache_node);
  
        list_for_each_entry(cachep, &slab_caches, list) {
                /*
                 * begin anything. Make sure some other cpu on this
                 * node has not already allocated this
                 */
-               if (!cachep->nodelists[node]) {
-                       l3 = kmalloc_node(memsize, GFP_KERNEL, node);
-                       if (!l3)
+               if (!cachep->node[node]) {
+                       n = kmalloc_node(memsize, GFP_KERNEL, node);
+                       if (!n)
                                return -ENOMEM;
-                       kmem_list3_init(l3);
-                       l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
+                       kmem_cache_node_init(n);
+                       n->next_reap = jiffies + REAPTIMEOUT_LIST3 +
                            ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
  
                        /*
                         * go.  slab_mutex is sufficient
                         * protection here.
                         */
-                       cachep->nodelists[node] = l3;
+                       cachep->node[node] = n;
                }
  
-               spin_lock_irq(&cachep->nodelists[node]->list_lock);
-               cachep->nodelists[node]->free_limit =
+               spin_lock_irq(&cachep->node[node]->list_lock);
+               cachep->node[node]->free_limit =
                        (1 + nr_cpus_node(node)) *
                        cachep->batchcount + cachep->num;
-               spin_unlock_irq(&cachep->nodelists[node]->list_lock);
+               spin_unlock_irq(&cachep->node[node]->list_lock);
        }
        return 0;
  }
  static void __cpuinit cpuup_canceled(long cpu)
  {
        struct kmem_cache *cachep;
-       struct kmem_list3 *l3 = NULL;
+       struct kmem_cache_node *n = NULL;
        int node = cpu_to_mem(cpu);
        const struct cpumask *mask = cpumask_of_node(node);
  
                /* cpu is dead; no one can alloc from it. */
                nc = cachep->array[cpu];
                cachep->array[cpu] = NULL;
-               l3 = cachep->nodelists[node];
+               n = cachep->node[node];
  
-               if (!l3)
+               if (!n)
                        goto free_array_cache;
  
-               spin_lock_irq(&l3->list_lock);
+               spin_lock_irq(&n->list_lock);
  
-               /* Free limit for this kmem_list3 */
-               l3->free_limit -= cachep->batchcount;
+               /* Free limit for this kmem_cache_node */
+               n->free_limit -= cachep->batchcount;
                if (nc)
                        free_block(cachep, nc->entry, nc->avail, node);
  
                if (!cpumask_empty(mask)) {
-                       spin_unlock_irq(&l3->list_lock);
+                       spin_unlock_irq(&n->list_lock);
                        goto free_array_cache;
                }
  
-               shared = l3->shared;
+               shared = n->shared;
                if (shared) {
                        free_block(cachep, shared->entry,
                                   shared->avail, node);
-                       l3->shared = NULL;
+                       n->shared = NULL;
                }
  
-               alien = l3->alien;
-               l3->alien = NULL;
+               alien = n->alien;
+               n->alien = NULL;
  
-               spin_unlock_irq(&l3->list_lock);
+               spin_unlock_irq(&n->list_lock);
  
                kfree(shared);
                if (alien) {
@@@ -1336,17 -1238,17 +1238,17 @@@ free_array_cache
         * shrink each nodelist to its limit.
         */
        list_for_each_entry(cachep, &slab_caches, list) {
-               l3 = cachep->nodelists[node];
-               if (!l3)
+               n = cachep->node[node];
+               if (!n)
                        continue;
-               drain_freelist(cachep, l3, l3->free_objects);
+               drain_freelist(cachep, n, n->free_objects);
        }
  }
  
  static int __cpuinit cpuup_prepare(long cpu)
  {
        struct kmem_cache *cachep;
-       struct kmem_list3 *l3 = NULL;
+       struct kmem_cache_node *n = NULL;
        int node = cpu_to_mem(cpu);
        int err;
  
         * We need to do this right in the beginning since
         * alloc_arraycache's are going to use this list.
         * kmalloc_node allows us to add the slab to the right
-        * kmem_list3 and not this cpu's kmem_list3
+        * kmem_cache_node and not this cpu's kmem_cache_node
         */
-       err = init_cache_nodelists_node(node);
+       err = init_cache_node_node(node);
        if (err < 0)
                goto bad;
  
                        }
                }
                cachep->array[cpu] = nc;
-               l3 = cachep->nodelists[node];
-               BUG_ON(!l3);
+               n = cachep->node[node];
+               BUG_ON(!n);
  
-               spin_lock_irq(&l3->list_lock);
-               if (!l3->shared) {
+               spin_lock_irq(&n->list_lock);
+               if (!n->shared) {
                        /*
                         * We are serialised from CPU_DEAD or
                         * CPU_UP_CANCELLED by the cpucontrol lock
                         */
-                       l3->shared = shared;
+                       n->shared = shared;
                        shared = NULL;
                }
  #ifdef CONFIG_NUMA
-               if (!l3->alien) {
-                       l3->alien = alien;
+               if (!n->alien) {
+                       n->alien = alien;
                        alien = NULL;
                }
  #endif
-               spin_unlock_irq(&l3->list_lock);
+               spin_unlock_irq(&n->list_lock);
                kfree(shared);
                free_alien_cache(alien);
                if (cachep->flags & SLAB_DEBUG_OBJECTS)
@@@ -1464,9 -1366,9 +1366,9 @@@ static int __cpuinit cpuup_callback(str
        case CPU_DEAD_FROZEN:
                /*
                 * Even if all the cpus of a node are down, we don't free the
-                * kmem_list3 of any cache. This to avoid a race between
+                * kmem_cache_node of any cache. This to avoid a race between
                 * cpu_down, and a kmalloc allocation from another cpu for
-                * memory from the node of the cpu going down.  The list3
+                * memory from the node of the cpu going down.  The node
                 * structure is usually allocated from kmem_cache_create() and
                 * gets destroyed at kmem_cache_destroy().
                 */
@@@ -1494,22 -1396,22 +1396,22 @@@ static struct notifier_block __cpuinitd
   *
   * Must hold slab_mutex.
   */
- static int __meminit drain_cache_nodelists_node(int node)
+ static int __meminit drain_cache_node_node(int node)
  {
        struct kmem_cache *cachep;
        int ret = 0;
  
        list_for_each_entry(cachep, &slab_caches, list) {
-               struct kmem_list3 *l3;
+               struct kmem_cache_node *n;
  
-               l3 = cachep->nodelists[node];
-               if (!l3)
+               n = cachep->node[node];
+               if (!n)
                        continue;
  
-               drain_freelist(cachep, l3, l3->free_objects);
+               drain_freelist(cachep, n, n->free_objects);
  
-               if (!list_empty(&l3->slabs_full) ||
-                   !list_empty(&l3->slabs_partial)) {
+               if (!list_empty(&n->slabs_full) ||
+                   !list_empty(&n->slabs_partial)) {
                        ret = -EBUSY;
                        break;
                }
@@@ -1531,12 -1433,12 +1433,12 @@@ static int __meminit slab_memory_callba
        switch (action) {
        case MEM_GOING_ONLINE:
                mutex_lock(&slab_mutex);
-               ret = init_cache_nodelists_node(nid);
+               ret = init_cache_node_node(nid);
                mutex_unlock(&slab_mutex);
                break;
        case MEM_GOING_OFFLINE:
                mutex_lock(&slab_mutex);
-               ret = drain_cache_nodelists_node(nid);
+               ret = drain_cache_node_node(nid);
                mutex_unlock(&slab_mutex);
                break;
        case MEM_ONLINE:
  #endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
  
  /*
-  * swap the static kmem_list3 with kmalloced memory
+  * swap the static kmem_cache_node with kmalloced memory
   */
- static void __init init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
+ static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
                                int nodeid)
  {
-       struct kmem_list3 *ptr;
+       struct kmem_cache_node *ptr;
  
-       ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_NOWAIT, nodeid);
+       ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
        BUG_ON(!ptr);
  
-       memcpy(ptr, list, sizeof(struct kmem_list3));
+       memcpy(ptr, list, sizeof(struct kmem_cache_node));
        /*
         * Do not assume that spinlocks can be initialized via memcpy:
         */
        spin_lock_init(&ptr->list_lock);
  
        MAKE_ALL_LISTS(cachep, ptr, nodeid);
-       cachep->nodelists[nodeid] = ptr;
+       cachep->node[nodeid] = ptr;
  }
  
  /*
-  * For setting up all the kmem_list3s for cache whose buffer_size is same as
-  * size of kmem_list3.
+  * For setting up all the kmem_cache_node for cache whose buffer_size is same as
+  * size of kmem_cache_node.
   */
- static void __init set_up_list3s(struct kmem_cache *cachep, int index)
+ static void __init set_up_node(struct kmem_cache *cachep, int index)
  {
        int node;
  
        for_each_online_node(node) {
-               cachep->nodelists[node] = &initkmem_list3[index + node];
-               cachep->nodelists[node]->next_reap = jiffies +
+               cachep->node[node] = &init_kmem_cache_node[index + node];
+               cachep->node[node]->next_reap = jiffies +
                    REAPTIMEOUT_LIST3 +
                    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
        }
  
  /*
   * The memory after the last cpu cache pointer is used for the
-  * the nodelists pointer.
+  * the node pointer.
   */
- static void setup_nodelists_pointer(struct kmem_cache *cachep)
+ static void setup_node_pointer(struct kmem_cache *cachep)
  {
-       cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
+       cachep->node = (struct kmem_cache_node **)&cachep->array[nr_cpu_ids];
  }
  
  /*
   */
  void __init kmem_cache_init(void)
  {
-       struct cache_sizes *sizes;
-       struct cache_names *names;
        int i;
  
        kmem_cache = &kmem_cache_boot;
-       setup_nodelists_pointer(kmem_cache);
+       setup_node_pointer(kmem_cache);
  
        if (num_possible_nodes() == 1)
                use_alien_caches = 0;
  
        for (i = 0; i < NUM_INIT_LISTS; i++)
-               kmem_list3_init(&initkmem_list3[i]);
+               kmem_cache_node_init(&init_kmem_cache_node[i]);
  
-       set_up_list3s(kmem_cache, CACHE_CACHE);
+       set_up_node(kmem_cache, CACHE_CACHE);
  
        /*
         * Fragmentation resistance on low memory - only use bigger
         *    kmem_cache structures of all caches, except kmem_cache itself:
         *    kmem_cache is statically allocated.
         *    Initially an __init data area is used for the head array and the
-        *    kmem_list3 structures, it's replaced with a kmalloc allocated
+        *    kmem_cache_node structures, it's replaced with a kmalloc allocated
         *    array at the end of the bootstrap.
         * 2) Create the first kmalloc cache.
         *    The struct kmem_cache for the new cache is allocated normally.
         *    head arrays.
         * 4) Replace the __init data head arrays for kmem_cache and the first
         *    kmalloc cache with kmalloc allocated arrays.
-        * 5) Replace the __init data for kmem_list3 for kmem_cache and
+        * 5) Replace the __init data for kmem_cache_node for kmem_cache and
         *    the other cache's with kmalloc allocated memory.
         * 6) Resize the head arrays of the kmalloc caches to their final sizes.
         */
         */
        create_boot_cache(kmem_cache, "kmem_cache",
                offsetof(struct kmem_cache, array[nr_cpu_ids]) +
-                                 nr_node_ids * sizeof(struct kmem_list3 *),
+                                 nr_node_ids * sizeof(struct kmem_cache_node *),
                                  SLAB_HWCACHE_ALIGN);
        list_add(&kmem_cache->list, &slab_caches);
  
        /* 2+3) create the kmalloc caches */
-       sizes = malloc_sizes;
-       names = cache_names;
  
        /*
         * Initialize the caches that provide memory for the array cache and the
-        * kmem_list3 structures first.  Without this, further allocations will
+        * kmem_cache_node structures first.  Without this, further allocations will
         * bug.
         */
  
-       sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name,
-                                       sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS);
+       kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
+                                       kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
  
-       if (INDEX_AC != INDEX_L3)
-               sizes[INDEX_L3].cs_cachep =
-                       create_kmalloc_cache(names[INDEX_L3].name,
-                               sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS);
+       if (INDEX_AC != INDEX_NODE)
+               kmalloc_caches[INDEX_NODE] =
+                       create_kmalloc_cache("kmalloc-node",
+                               kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
  
        slab_early_init = 0;
  
-       while (sizes->cs_size != ULONG_MAX) {
-               /*
-                * For performance, all the general caches are L1 aligned.
-                * This should be particularly beneficial on SMP boxes, as it
-                * eliminates "false sharing".
-                * Note for systems short on memory removing the alignment will
-                * allow tighter packing of the smaller caches.
-                */
-               if (!sizes->cs_cachep)
-                       sizes->cs_cachep = create_kmalloc_cache(names->name,
-                                       sizes->cs_size, ARCH_KMALLOC_FLAGS);
- #ifdef CONFIG_ZONE_DMA
-               sizes->cs_dmacachep = create_kmalloc_cache(
-                       names->name_dma, sizes->cs_size,
-                       SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS);
- #endif
-               sizes++;
-               names++;
-       }
        /* 4) Replace the bootstrap head arrays */
        {
                struct array_cache *ptr;
  
                ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
  
-               BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
+               BUG_ON(cpu_cache_get(kmalloc_caches[INDEX_AC])
                       != &initarray_generic.cache);
-               memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
+               memcpy(ptr, cpu_cache_get(kmalloc_caches[INDEX_AC]),
                       sizeof(struct arraycache_init));
                /*
                 * Do not assume that spinlocks can be initialized via memcpy:
                 */
                spin_lock_init(&ptr->lock);
  
-               malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
-                   ptr;
+               kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr;
        }
-       /* 5) Replace the bootstrap kmem_list3's */
+       /* 5) Replace the bootstrap kmem_cache_node */
        {
                int nid;
  
                for_each_online_node(nid) {
-                       init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
+                       init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
  
-                       init_list(malloc_sizes[INDEX_AC].cs_cachep,
-                                 &initkmem_list3[SIZE_AC + nid], nid);
+                       init_list(kmalloc_caches[INDEX_AC],
+                                 &init_kmem_cache_node[SIZE_AC + nid], nid);
  
-                       if (INDEX_AC != INDEX_L3) {
-                               init_list(malloc_sizes[INDEX_L3].cs_cachep,
-                                         &initkmem_list3[SIZE_L3 + nid], nid);
+                       if (INDEX_AC != INDEX_NODE) {
+                               init_list(kmalloc_caches[INDEX_NODE],
+                                         &init_kmem_cache_node[SIZE_NODE + nid], nid);
                        }
                }
        }
  
-       slab_state = UP;
+       create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
  }
  
  void __init kmem_cache_init_late(void)
  #ifdef CONFIG_NUMA
        /*
         * Register a memory hotplug callback that initializes and frees
-        * nodelists.
+        * node.
         */
        hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
  #endif
@@@ -1803,7 -1680,7 +1680,7 @@@ __initcall(cpucache_init)
  static noinline void
  slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
  {
-       struct kmem_list3 *l3;
+       struct kmem_cache_node *n;
        struct slab *slabp;
        unsigned long flags;
        int node;
                unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
                unsigned long active_slabs = 0, num_slabs = 0;
  
-               l3 = cachep->nodelists[node];
-               if (!l3)
+               n = cachep->node[node];
+               if (!n)
                        continue;
  
-               spin_lock_irqsave(&l3->list_lock, flags);
-               list_for_each_entry(slabp, &l3->slabs_full, list) {
+               spin_lock_irqsave(&n->list_lock, flags);
+               list_for_each_entry(slabp, &n->slabs_full, list) {
                        active_objs += cachep->num;
                        active_slabs++;
                }
-               list_for_each_entry(slabp, &l3->slabs_partial, list) {
+               list_for_each_entry(slabp, &n->slabs_partial, list) {
                        active_objs += slabp->inuse;
                        active_slabs++;
                }
-               list_for_each_entry(slabp, &l3->slabs_free, list)
+               list_for_each_entry(slabp, &n->slabs_free, list)
                        num_slabs++;
  
-               free_objects += l3->free_objects;
-               spin_unlock_irqrestore(&l3->list_lock, flags);
+               free_objects += n->free_objects;
+               spin_unlock_irqrestore(&n->list_lock, flags);
  
                num_slabs += active_slabs;
                num_objs = num_slabs * cachep->num;
@@@ -2260,7 -2137,7 +2137,7 @@@ static int __init_refok setup_cpu_cache
        if (slab_state == DOWN) {
                /*
                 * Note: Creation of first cache (kmem_cache).
-                * The setup_list3s is taken care
+                * The setup_node is taken care
                 * of by the caller of __kmem_cache_create
                 */
                cachep->array[smp_processor_id()] = &initarray_generic.cache;
                cachep->array[smp_processor_id()] = &initarray_generic.cache;
  
                /*
-                * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
-                * the second cache, then we need to set up all its list3s,
+                * If the cache that's used by kmalloc(sizeof(kmem_cache_node)) is
+                * the second cache, then we need to set up all its node/,
                 * otherwise the creation of further caches will BUG().
                 */
-               set_up_list3s(cachep, SIZE_AC);
-               if (INDEX_AC == INDEX_L3)
-                       slab_state = PARTIAL_L3;
+               set_up_node(cachep, SIZE_AC);
+               if (INDEX_AC == INDEX_NODE)
+                       slab_state = PARTIAL_NODE;
                else
                        slab_state = PARTIAL_ARRAYCACHE;
        } else {
                        kmalloc(sizeof(struct arraycache_init), gfp);
  
                if (slab_state == PARTIAL_ARRAYCACHE) {
-                       set_up_list3s(cachep, SIZE_L3);
-                       slab_state = PARTIAL_L3;
+                       set_up_node(cachep, SIZE_NODE);
+                       slab_state = PARTIAL_NODE;
                } else {
                        int node;
                        for_each_online_node(node) {
-                               cachep->nodelists[node] =
-                                   kmalloc_node(sizeof(struct kmem_list3),
+                               cachep->node[node] =
+                                   kmalloc_node(sizeof(struct kmem_cache_node),
                                                gfp, node);
-                               BUG_ON(!cachep->nodelists[node]);
-                               kmem_list3_init(cachep->nodelists[node]);
+                               BUG_ON(!cachep->node[node]);
+                               kmem_cache_node_init(cachep->node[node]);
                        }
                }
        }
-       cachep->nodelists[numa_mem_id()]->next_reap =
+       cachep->node[numa_mem_id()]->next_reap =
                        jiffies + REAPTIMEOUT_LIST3 +
                        ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
  
@@@ -2405,7 -2282,7 +2282,7 @@@ __kmem_cache_create (struct kmem_cache 
        else
                gfp = GFP_NOWAIT;
  
-       setup_nodelists_pointer(cachep);
+       setup_node_pointer(cachep);
  #if DEBUG
  
        /*
                        size += BYTES_PER_WORD;
        }
  #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
-       if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
+       if (size >= kmalloc_size(INDEX_NODE + 1)
            && cachep->object_size > cache_line_size()
            && ALIGN(size, cachep->align) < PAGE_SIZE) {
                cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
        cachep->reciprocal_buffer_size = reciprocal_value(size);
  
        if (flags & CFLGS_OFF_SLAB) {
-               cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
+               cachep->slabp_cache = kmalloc_slab(slab_size, 0u);
                /*
                 * This is a possibility for one of the malloc_sizes caches.
                 * But since we go off slab only for object size greater than
@@@ -2545,7 -2422,7 +2422,7 @@@ static void check_spinlock_acquired(str
  {
  #ifdef CONFIG_SMP
        check_irq_off();
-       assert_spin_locked(&cachep->nodelists[numa_mem_id()]->list_lock);
+       assert_spin_locked(&cachep->node[numa_mem_id()]->list_lock);
  #endif
  }
  
@@@ -2553,7 -2430,7 +2430,7 @@@ static void check_spinlock_acquired_nod
  {
  #ifdef CONFIG_SMP
        check_irq_off();
-       assert_spin_locked(&cachep->nodelists[node]->list_lock);
+       assert_spin_locked(&cachep->node[node]->list_lock);
  #endif
  }
  
  #define check_spinlock_acquired_node(x, y) do { } while(0)
  #endif
  
- static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
+ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
                        struct array_cache *ac,
                        int force, int node);
  
@@@ -2576,29 -2453,29 +2453,29 @@@ static void do_drain(void *arg
  
        check_irq_off();
        ac = cpu_cache_get(cachep);
-       spin_lock(&cachep->nodelists[node]->list_lock);
+       spin_lock(&cachep->node[node]->list_lock);
        free_block(cachep, ac->entry, ac->avail, node);
-       spin_unlock(&cachep->nodelists[node]->list_lock);
+       spin_unlock(&cachep->node[node]->list_lock);
        ac->avail = 0;
  }
  
  static void drain_cpu_caches(struct kmem_cache *cachep)
  {
-       struct kmem_list3 *l3;
+       struct kmem_cache_node *n;
        int node;
  
        on_each_cpu(do_drain, cachep, 1);
        check_irq_on();
        for_each_online_node(node) {
-               l3 = cachep->nodelists[node];
-               if (l3 && l3->alien)
-                       drain_alien_cache(cachep, l3->alien);
+               n = cachep->node[node];
+               if (n && n->alien)
+                       drain_alien_cache(cachep, n->alien);
        }
  
        for_each_online_node(node) {
-               l3 = cachep->nodelists[node];
-               if (l3)
-                       drain_array(cachep, l3, l3->shared, 1, node);
+               n = cachep->node[node];
+               if (n)
+                       drain_array(cachep, n, n->shared, 1, node);
        }
  }
  
   * Returns the actual number of slabs released.
   */
  static int drain_freelist(struct kmem_cache *cache,
-                       struct kmem_list3 *l3, int tofree)
+                       struct kmem_cache_node *n, int tofree)
  {
        struct list_head *p;
        int nr_freed;
        struct slab *slabp;
  
        nr_freed = 0;
-       while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
+       while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
  
-               spin_lock_irq(&l3->list_lock);
-               p = l3->slabs_free.prev;
-               if (p == &l3->slabs_free) {
-                       spin_unlock_irq(&l3->list_lock);
+               spin_lock_irq(&n->list_lock);
+               p = n->slabs_free.prev;
+               if (p == &n->slabs_free) {
+                       spin_unlock_irq(&n->list_lock);
                        goto out;
                }
  
                 * Safe to drop the lock. The slab is no longer linked
                 * to the cache.
                 */
-               l3->free_objects -= cache->num;
-               spin_unlock_irq(&l3->list_lock);
+               n->free_objects -= cache->num;
+               spin_unlock_irq(&n->list_lock);
                slab_destroy(cache, slabp);
                nr_freed++;
        }
  static int __cache_shrink(struct kmem_cache *cachep)
  {
        int ret = 0, i = 0;
-       struct kmem_list3 *l3;
+       struct kmem_cache_node *n;
  
        drain_cpu_caches(cachep);
  
        check_irq_on();
        for_each_online_node(i) {
-               l3 = cachep->nodelists[i];
-               if (!l3)
+               n = cachep->node[i];
+               if (!n)
                        continue;
  
-               drain_freelist(cachep, l3, l3->free_objects);
+               drain_freelist(cachep, n, n->free_objects);
  
-               ret += !list_empty(&l3->slabs_full) ||
-                       !list_empty(&l3->slabs_partial);
+               ret += !list_empty(&n->slabs_full) ||
+                       !list_empty(&n->slabs_partial);
        }
        return (ret ? 1 : 0);
  }
@@@ -2689,7 -2566,7 +2566,7 @@@ EXPORT_SYMBOL(kmem_cache_shrink)
  int __kmem_cache_shutdown(struct kmem_cache *cachep)
  {
        int i;
-       struct kmem_list3 *l3;
+       struct kmem_cache_node *n;
        int rc = __cache_shrink(cachep);
  
        if (rc)
        for_each_online_cpu(i)
            kfree(cachep->array[i]);
  
-       /* NUMA: free the list3 structures */
+       /* NUMA: free the node structures */
        for_each_online_node(i) {
-               l3 = cachep->nodelists[i];
-               if (l3) {
-                       kfree(l3->shared);
-                       free_alien_cache(l3->alien);
-                       kfree(l3);
+               n = cachep->node[i];
+               if (n) {
+                       kfree(n->shared);
+                       free_alien_cache(n->alien);
+                       kfree(n);
                }
        }
        return 0;
@@@ -2886,7 -2763,7 +2763,7 @@@ static int cache_grow(struct kmem_cach
        struct slab *slabp;
        size_t offset;
        gfp_t local_flags;
-       struct kmem_list3 *l3;
+       struct kmem_cache_node *n;
  
        /*
         * Be lazy and only check for valid flags here,  keeping it out of the
        BUG_ON(flags & GFP_SLAB_BUG_MASK);
        local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
  
-       /* Take the l3 list lock to change the colour_next on this node */
+       /* Take the node list lock to change the colour_next on this node */
        check_irq_off();
-       l3 = cachep->nodelists[nodeid];
-       spin_lock(&l3->list_lock);
+       n = cachep->node[nodeid];
+       spin_lock(&n->list_lock);
  
        /* Get colour for the slab, and cal the next value. */
-       offset = l3->colour_next;
-       l3->colour_next++;
-       if (l3->colour_next >= cachep->colour)
-               l3->colour_next = 0;
-       spin_unlock(&l3->list_lock);
+       offset = n->colour_next;
+       n->colour_next++;
+       if (n->colour_next >= cachep->colour)
+               n->colour_next = 0;
+       spin_unlock(&n->list_lock);
  
        offset *= cachep->colour_off;
  
        if (local_flags & __GFP_WAIT)
                local_irq_disable();
        check_irq_off();
-       spin_lock(&l3->list_lock);
+       spin_lock(&n->list_lock);
  
        /* Make slab active. */
-       list_add_tail(&slabp->list, &(l3->slabs_free));
+       list_add_tail(&slabp->list, &(n->slabs_free));
        STATS_INC_GROWN(cachep);
-       l3->free_objects += cachep->num;
-       spin_unlock(&l3->list_lock);
+       n->free_objects += cachep->num;
+       spin_unlock(&n->list_lock);
        return 1;
  opps1:
        kmem_freepages(cachep, objp);
@@@ -3076,7 -2953,7 +2953,7 @@@ static void *cache_alloc_refill(struct 
                                                        bool force_refill)
  {
        int batchcount;
-       struct kmem_list3 *l3;
+       struct kmem_cache_node *n;
        struct array_cache *ac;
        int node;
  
@@@ -3095,14 -2972,14 +2972,14 @@@ retry
                 */
                batchcount = BATCHREFILL_LIMIT;
        }
-       l3 = cachep->nodelists[node];
+       n = cachep->node[node];
  
-       BUG_ON(ac->avail > 0 || !l3);
-       spin_lock(&l3->list_lock);
+       BUG_ON(ac->avail > 0 || !n);
+       spin_lock(&n->list_lock);
  
        /* See if we can refill from the shared array */
-       if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) {
-               l3->shared->touched = 1;
+       if (n->shared && transfer_objects(ac, n->shared, batchcount)) {
+               n->shared->touched = 1;
                goto alloc_done;
        }
  
                struct list_head *entry;
                struct slab *slabp;
                /* Get slab alloc is to come from. */
-               entry = l3->slabs_partial.next;
-               if (entry == &l3->slabs_partial) {
-                       l3->free_touched = 1;
-                       entry = l3->slabs_free.next;
-                       if (entry == &l3->slabs_free)
+               entry = n->slabs_partial.next;
+               if (entry == &n->slabs_partial) {
+                       n->free_touched = 1;
+                       entry = n->slabs_free.next;
+                       if (entry == &n->slabs_free)
                                goto must_grow;
                }
  
                /* move slabp to correct slabp list: */
                list_del(&slabp->list);
                if (slabp->free == BUFCTL_END)
-                       list_add(&slabp->list, &l3->slabs_full);
+                       list_add(&slabp->list, &n->slabs_full);
                else
-                       list_add(&slabp->list, &l3->slabs_partial);
+                       list_add(&slabp->list, &n->slabs_partial);
        }
  
  must_grow:
-       l3->free_objects -= ac->avail;
+       n->free_objects -= ac->avail;
  alloc_done:
-       spin_unlock(&l3->list_lock);
+       spin_unlock(&n->list_lock);
  
        if (unlikely(!ac->avail)) {
                int x;
@@@ -3317,7 -3194,7 +3194,7 @@@ static void *alternate_node_alloc(struc
  /*
   * Fallback function if there was no memory available and no objects on a
   * certain node and fall back is permitted. First we scan all the
-  * available nodelists for available objects. If that fails then we
+  * available node for available objects. If that fails then we
   * perform an allocation without specifying a node. This allows the page
   * allocator to do its reclaim / fallback magic. We then insert the
   * slab into the proper nodelist and then allocate from it.
@@@ -3351,8 -3228,8 +3228,8 @@@ retry
                nid = zone_to_nid(zone);
  
                if (cpuset_zone_allowed_hardwall(zone, flags) &&
-                       cache->nodelists[nid] &&
-                       cache->nodelists[nid]->free_objects) {
+                       cache->node[nid] &&
+                       cache->node[nid]->free_objects) {
                                obj = ____cache_alloc_node(cache,
                                        flags | GFP_THISNODE, nid);
                                if (obj)
@@@ -3408,21 -3285,22 +3285,22 @@@ static void *____cache_alloc_node(struc
  {
        struct list_head *entry;
        struct slab *slabp;
-       struct kmem_list3 *l3;
+       struct kmem_cache_node *n;
        void *obj;
        int x;
  
-       l3 = cachep->nodelists[nodeid];
-       BUG_ON(!l3);
+       VM_BUG_ON(nodeid > num_online_nodes());
+       n = cachep->node[nodeid];
+       BUG_ON(!n);
  
  retry:
        check_irq_off();
-       spin_lock(&l3->list_lock);
-       entry = l3->slabs_partial.next;
-       if (entry == &l3->slabs_partial) {
-               l3->free_touched = 1;
-               entry = l3->slabs_free.next;
-               if (entry == &l3->slabs_free)
+       spin_lock(&n->list_lock);
+       entry = n->slabs_partial.next;
+       if (entry == &n->slabs_partial) {
+               n->free_touched = 1;
+               entry = n->slabs_free.next;
+               if (entry == &n->slabs_free)
                        goto must_grow;
        }
  
  
        obj = slab_get_obj(cachep, slabp, nodeid);
        check_slabp(cachep, slabp);
-       l3->free_objects--;
+       n->free_objects--;
        /* move slabp to correct slabp list: */
        list_del(&slabp->list);
  
        if (slabp->free == BUFCTL_END)
-               list_add(&slabp->list, &l3->slabs_full);
+               list_add(&slabp->list, &n->slabs_full);
        else
-               list_add(&slabp->list, &l3->slabs_partial);
+               list_add(&slabp->list, &n->slabs_partial);
  
-       spin_unlock(&l3->list_lock);
+       spin_unlock(&n->list_lock);
        goto done;
  
  must_grow:
-       spin_unlock(&l3->list_lock);
+       spin_unlock(&n->list_lock);
        x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
        if (x)
                goto retry;
@@@ -3497,7 -3375,7 +3375,7 @@@ slab_alloc_node(struct kmem_cache *cach
        if (nodeid == NUMA_NO_NODE)
                nodeid = slab_node;
  
-       if (unlikely(!cachep->nodelists[nodeid])) {
+       if (unlikely(!cachep->node[nodeid])) {
                /* Node not bootstrapped yet */
                ptr = fallback_alloc(cachep, flags);
                goto out;
@@@ -3603,7 -3481,7 +3481,7 @@@ static void free_block(struct kmem_cach
                       int node)
  {
        int i;
-       struct kmem_list3 *l3;
+       struct kmem_cache_node *n;
  
        for (i = 0; i < nr_objects; i++) {
                void *objp;
                objp = objpp[i];
  
                slabp = virt_to_slab(objp);
-               l3 = cachep->nodelists[node];
+               n = cachep->node[node];
                list_del(&slabp->list);
                check_spinlock_acquired_node(cachep, node);
                check_slabp(cachep, slabp);
                slab_put_obj(cachep, slabp, objp, node);
                STATS_DEC_ACTIVE(cachep);
-               l3->free_objects++;
+               n->free_objects++;
                check_slabp(cachep, slabp);
  
                /* fixup slab chains */
                if (slabp->inuse == 0) {
-                       if (l3->free_objects > l3->free_limit) {
-                               l3->free_objects -= cachep->num;
+                       if (n->free_objects > n->free_limit) {
+                               n->free_objects -= cachep->num;
                                /* No need to drop any previously held
                                 * lock here, even if we have a off-slab slab
                                 * descriptor it is guaranteed to come from
                                 */
                                slab_destroy(cachep, slabp);
                        } else {
-                               list_add(&slabp->list, &l3->slabs_free);
+                               list_add(&slabp->list, &n->slabs_free);
                        }
                } else {
                        /* Unconditionally move a slab to the end of the
                         * partial list on free - maximum time for the
                         * other objects to be freed, too.
                         */
-                       list_add_tail(&slabp->list, &l3->slabs_partial);
+                       list_add_tail(&slabp->list, &n->slabs_partial);
                }
        }
  }
  static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
  {
        int batchcount;
-       struct kmem_list3 *l3;
+       struct kmem_cache_node *n;
        int node = numa_mem_id();
  
        batchcount = ac->batchcount;
        BUG_ON(!batchcount || batchcount > ac->avail);
  #endif
        check_irq_off();
-       l3 = cachep->nodelists[node];
-       spin_lock(&l3->list_lock);
-       if (l3->shared) {
-               struct array_cache *shared_array = l3->shared;
+       n = cachep->node[node];
+       spin_lock(&n->list_lock);
+       if (n->shared) {
+               struct array_cache *shared_array = n->shared;
                int max = shared_array->limit - shared_array->avail;
                if (max) {
                        if (batchcount > max)
@@@ -3679,8 -3557,8 +3557,8 @@@ free_done
                int i = 0;
                struct list_head *p;
  
-               p = l3->slabs_free.next;
-               while (p != &(l3->slabs_free)) {
+               p = n->slabs_free.next;
+               while (p != &(n->slabs_free)) {
                        struct slab *slabp;
  
                        slabp = list_entry(p, struct slab, list);
                STATS_SET_FREEABLE(cachep, i);
        }
  #endif
-       spin_unlock(&l3->list_lock);
+       spin_unlock(&n->list_lock);
        ac->avail -= batchcount;
        memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
  }
@@@ -3802,7 -3680,7 +3680,7 @@@ __do_kmalloc_node(size_t size, gfp_t fl
  {
        struct kmem_cache *cachep;
  
-       cachep = kmem_find_general_cachep(size, flags);
+       cachep = kmalloc_slab(size, flags);
        if (unlikely(ZERO_OR_NULL_PTR(cachep)))
                return cachep;
        return kmem_cache_alloc_node_trace(cachep, flags, node, size);
@@@ -3847,7 -3725,7 +3725,7 @@@ static __always_inline void *__do_kmall
         * Then kmalloc uses the uninlined functions instead of the inline
         * functions.
         */
-       cachep = __find_general_cachep(size, flags);
+       cachep = kmalloc_slab(size, flags);
        if (unlikely(ZERO_OR_NULL_PTR(cachep)))
                return cachep;
        ret = slab_alloc(cachep, flags, caller);
@@@ -3936,12 -3814,12 +3814,12 @@@ void kfree(const void *objp
  EXPORT_SYMBOL(kfree);
  
  /*
-  * This initializes kmem_list3 or resizes various caches for all nodes.
+  * This initializes kmem_cache_node or resizes various caches for all nodes.
   */
  static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
  {
        int node;
-       struct kmem_list3 *l3;
+       struct kmem_cache_node *n;
        struct array_cache *new_shared;
        struct array_cache **new_alien = NULL;
  
                        }
                }
  
-               l3 = cachep->nodelists[node];
-               if (l3) {
-                       struct array_cache *shared = l3->shared;
+               n = cachep->node[node];
+               if (n) {
+                       struct array_cache *shared = n->shared;
  
-                       spin_lock_irq(&l3->list_lock);
+                       spin_lock_irq(&n->list_lock);
  
                        if (shared)
                                free_block(cachep, shared->entry,
                                                shared->avail, node);
  
-                       l3->shared = new_shared;
-                       if (!l3->alien) {
-                               l3->alien = new_alien;
+                       n->shared = new_shared;
+                       if (!n->alien) {
+                               n->alien = new_alien;
                                new_alien = NULL;
                        }
-                       l3->free_limit = (1 + nr_cpus_node(node)) *
+                       n->free_limit = (1 + nr_cpus_node(node)) *
                                        cachep->batchcount + cachep->num;
-                       spin_unlock_irq(&l3->list_lock);
+                       spin_unlock_irq(&n->list_lock);
                        kfree(shared);
                        free_alien_cache(new_alien);
                        continue;
                }
-               l3 = kmalloc_node(sizeof(struct kmem_list3), gfp, node);
-               if (!l3) {
+               n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
+               if (!n) {
                        free_alien_cache(new_alien);
                        kfree(new_shared);
                        goto fail;
                }
  
-               kmem_list3_init(l3);
-               l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
+               kmem_cache_node_init(n);
+               n->next_reap = jiffies + REAPTIMEOUT_LIST3 +
                                ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
-               l3->shared = new_shared;
-               l3->alien = new_alien;
-               l3->free_limit = (1 + nr_cpus_node(node)) *
+               n->shared = new_shared;
+               n->alien = new_alien;
+               n->free_limit = (1 + nr_cpus_node(node)) *
                                        cachep->batchcount + cachep->num;
-               cachep->nodelists[node] = l3;
+               cachep->node[node] = n;
        }
        return 0;
  
@@@ -4009,13 -3887,13 +3887,13 @@@ fail
                /* Cache is not active yet. Roll back what we did */
                node--;
                while (node >= 0) {
-                       if (cachep->nodelists[node]) {
-                               l3 = cachep->nodelists[node];
+                       if (cachep->node[node]) {
+                               n = cachep->node[node];
  
-                               kfree(l3->shared);
-                               free_alien_cache(l3->alien);
-                               kfree(l3);
-                               cachep->nodelists[node] = NULL;
+                               kfree(n->shared);
+                               free_alien_cache(n->alien);
+                               kfree(n);
+                               cachep->node[node] = NULL;
                        }
                        node--;
                }
@@@ -4075,9 -3953,9 +3953,9 @@@ static int __do_tune_cpucache(struct km
                struct array_cache *ccold = new->new[i];
                if (!ccold)
                        continue;
-               spin_lock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
+               spin_lock_irq(&cachep->node[cpu_to_mem(i)]->list_lock);
                free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
-               spin_unlock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
+               spin_unlock_irq(&cachep->node[cpu_to_mem(i)]->list_lock);
                kfree(ccold);
        }
        kfree(new);
@@@ -4178,11 -4056,11 +4056,11 @@@ skip_setup
  }
  
  /*
-  * Drain an array if it contains any elements taking the l3 lock only if
-  * necessary. Note that the l3 listlock also protects the array_cache
+  * Drain an array if it contains any elements taking the node lock only if
+  * necessary. Note that the node listlock also protects the array_cache
   * if drain_array() is used on the shared array.
   */
- static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
+ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
                         struct array_cache *ac, int force, int node)
  {
        int tofree;
        if (ac->touched && !force) {
                ac->touched = 0;
        } else {
-               spin_lock_irq(&l3->list_lock);
+               spin_lock_irq(&n->list_lock);
                if (ac->avail) {
                        tofree = force ? ac->avail : (ac->limit + 4) / 5;
                        if (tofree > ac->avail)
                        memmove(ac->entry, &(ac->entry[tofree]),
                                sizeof(void *) * ac->avail);
                }
-               spin_unlock_irq(&l3->list_lock);
+               spin_unlock_irq(&n->list_lock);
        }
  }
  
  static void cache_reap(struct work_struct *w)
  {
        struct kmem_cache *searchp;
-       struct kmem_list3 *l3;
+       struct kmem_cache_node *n;
        int node = numa_mem_id();
        struct delayed_work *work = to_delayed_work(w);
  
                check_irq_on();
  
                /*
-                * We only take the l3 lock if absolutely necessary and we
+                * We only take the node lock if absolutely necessary and we
                 * have established with reasonable certainty that
                 * we can do some work if the lock was obtained.
                 */
-               l3 = searchp->nodelists[node];
+               n = searchp->node[node];
  
-               reap_alien(searchp, l3);
+               reap_alien(searchp, n);
  
-               drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
+               drain_array(searchp, n, cpu_cache_get(searchp), 0, node);
  
                /*
                 * These are racy checks but it does not matter
                 * if we skip one check or scan twice.
                 */
-               if (time_after(l3->next_reap, jiffies))
+               if (time_after(n->next_reap, jiffies))
                        goto next;
  
-               l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
+               n->next_reap = jiffies + REAPTIMEOUT_LIST3;
  
-               drain_array(searchp, l3, l3->shared, 0, node);
+               drain_array(searchp, n, n->shared, 0, node);
  
-               if (l3->free_touched)
-                       l3->free_touched = 0;
+               if (n->free_touched)
+                       n->free_touched = 0;
                else {
                        int freed;
  
-                       freed = drain_freelist(searchp, l3, (l3->free_limit +
+                       freed = drain_freelist(searchp, n, (n->free_limit +
                                5 * searchp->num - 1) / (5 * searchp->num));
                        STATS_ADD_REAPED(searchp, freed);
                }
@@@ -4285,25 -4163,25 +4163,25 @@@ void get_slabinfo(struct kmem_cache *ca
        const char *name;
        char *error = NULL;
        int node;
-       struct kmem_list3 *l3;
+       struct kmem_cache_node *n;
  
        active_objs = 0;
        num_slabs = 0;
        for_each_online_node(node) {
-               l3 = cachep->nodelists[node];
-               if (!l3)
+               n = cachep->node[node];
+               if (!n)
                        continue;
  
                check_irq_on();
-               spin_lock_irq(&l3->list_lock);
+               spin_lock_irq(&n->list_lock);
  
-               list_for_each_entry(slabp, &l3->slabs_full, list) {
+               list_for_each_entry(slabp, &n->slabs_full, list) {
                        if (slabp->inuse != cachep->num && !error)
                                error = "slabs_full accounting error";
                        active_objs += cachep->num;
                        active_slabs++;
                }
-               list_for_each_entry(slabp, &l3->slabs_partial, list) {
+               list_for_each_entry(slabp, &n->slabs_partial, list) {
                        if (slabp->inuse == cachep->num && !error)
                                error = "slabs_partial inuse accounting error";
                        if (!slabp->inuse && !error)
                        active_objs += slabp->inuse;
                        active_slabs++;
                }
-               list_for_each_entry(slabp, &l3->slabs_free, list) {
+               list_for_each_entry(slabp, &n->slabs_free, list) {
                        if (slabp->inuse && !error)
                                error = "slabs_free/inuse accounting error";
                        num_slabs++;
                }
-               free_objects += l3->free_objects;
-               if (l3->shared)
-                       shared_avail += l3->shared->avail;
+               free_objects += n->free_objects;
+               if (n->shared)
+                       shared_avail += n->shared->avail;
  
-               spin_unlock_irq(&l3->list_lock);
+               spin_unlock_irq(&n->list_lock);
        }
        num_slabs += active_slabs;
        num_objs = num_slabs * cachep->num;
  void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
  {
  #if STATS
-       {                       /* list3 stats */
+       {                       /* node stats */
                unsigned long high = cachep->high_mark;
                unsigned long allocs = cachep->num_allocations;
                unsigned long grown = cachep->grown;
@@@ -4499,9 -4377,9 +4377,9 @@@ static int leaks_show(struct seq_file *
  {
        struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
        struct slab *slabp;
-       struct kmem_list3 *l3;
+       struct kmem_cache_node *n;
        const char *name;
-       unsigned long *n = m->private;
+       unsigned long *x = m->private;
        int node;
        int i;
  
  
        /* OK, we can do it */
  
-       n[1] = 0;
+       x[1] = 0;
  
        for_each_online_node(node) {
-               l3 = cachep->nodelists[node];
-               if (!l3)
+               n = cachep->node[node];
+               if (!n)
                        continue;
  
                check_irq_on();
-               spin_lock_irq(&l3->list_lock);
+               spin_lock_irq(&n->list_lock);
  
-               list_for_each_entry(slabp, &l3->slabs_full, list)
-                       handle_slab(n, cachep, slabp);
-               list_for_each_entry(slabp, &l3->slabs_partial, list)
-                       handle_slab(n, cachep, slabp);
-               spin_unlock_irq(&l3->list_lock);
+               list_for_each_entry(slabp, &n->slabs_full, list)
+                       handle_slab(x, cachep, slabp);
+               list_for_each_entry(slabp, &n->slabs_partial, list)
+                       handle_slab(x, cachep, slabp);
+               spin_unlock_irq(&n->list_lock);
        }
        name = cachep->name;
-       if (n[0] == n[1]) {
+       if (x[0] == x[1]) {
                /* Increase the buffer size */
                mutex_unlock(&slab_mutex);
-               m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
+               m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
                if (!m->private) {
                        /* Too bad, we are really out */
-                       m->private = n;
+                       m->private = x;
                        mutex_lock(&slab_mutex);
                        return -ENOMEM;
                }
-               *(unsigned long *)m->private = n[0] * 2;
-               kfree(n);
+               *(unsigned long *)m->private = x[0] * 2;
+               kfree(x);
                mutex_lock(&slab_mutex);
                /* Now make sure this entry will be retried */
                m->count = m->size;
                return 0;
        }
-       for (i = 0; i < n[1]; i++) {
-               seq_printf(m, "%s: %lu ", name, n[2*i+3]);
-               show_symbol(m, n[2*i+2]);
+       for (i = 0; i < x[1]; i++) {
+               seq_printf(m, "%s: %lu ", name, x[2*i+3]);
+               show_symbol(m, x[2*i+2]);
                seq_putc(m, '\n');
        }
  
diff --combined mm/slub.c
+++ b/mm/slub.c
@@@ -562,7 -562,7 +562,7 @@@ static void slab_bug(struct kmem_cache 
        printk(KERN_ERR "----------------------------------------"
                        "-------------------------------------\n\n");
  
 -      add_taint(TAINT_BAD_PAGE);
 +      add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
  }
  
  static void slab_fix(struct kmem_cache *s, char *fmt, ...)
@@@ -1005,7 -1005,7 +1005,7 @@@ static inline void inc_slabs_node(struc
         * dilemma by deferring the increment of the count during
         * bootstrap (see early_kmem_cache_node_alloc).
         */
-       if (n) {
+       if (likely(n)) {
                atomic_long_inc(&n->nr_slabs);
                atomic_long_add(objects, &n->total_objects);
        }
@@@ -1408,7 -1408,7 +1408,7 @@@ static void __free_slab(struct kmem_cac
        __ClearPageSlab(page);
  
        memcg_release_pages(s, order);
 -      reset_page_mapcount(page);
 +      page_mapcount_reset(page);
        if (current->reclaim_state)
                current->reclaim_state->reclaimed_slab += pages;
        __free_memcg_kmem_pages(page, order);
@@@ -1493,7 -1493,7 +1493,7 @@@ static inline void remove_partial(struc
   */
  static inline void *acquire_slab(struct kmem_cache *s,
                struct kmem_cache_node *n, struct page *page,
-               int mode)
+               int mode, int *objects)
  {
        void *freelist;
        unsigned long counters;
        freelist = page->freelist;
        counters = page->counters;
        new.counters = counters;
+       *objects = new.objects - new.inuse;
        if (mode) {
                new.inuse = page->objects;
                new.freelist = NULL;
        return freelist;
  }
  
- static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
+ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
  static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
  
  /*
@@@ -1539,6 -1540,8 +1540,8 @@@ static void *get_partial_node(struct km
  {
        struct page *page, *page2;
        void *object = NULL;
+       int available = 0;
+       int objects;
  
        /*
         * Racy check. If we mistakenly see no partial slabs then we
        spin_lock(&n->list_lock);
        list_for_each_entry_safe(page, page2, &n->partial, lru) {
                void *t;
-               int available;
  
                if (!pfmemalloc_match(page, flags))
                        continue;
  
-               t = acquire_slab(s, n, page, object == NULL);
+               t = acquire_slab(s, n, page, object == NULL, &objects);
                if (!t)
                        break;
  
+               available += objects;
                if (!object) {
                        c->page = page;
                        stat(s, ALLOC_FROM_PARTIAL);
                        object = t;
-                       available =  page->objects - page->inuse;
                } else {
-                       available = put_cpu_partial(s, page, 0);
+                       put_cpu_partial(s, page, 0);
                        stat(s, CPU_PARTIAL_NODE);
                }
                if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
@@@ -1946,7 -1948,7 +1948,7 @@@ static void unfreeze_partials(struct km
   * If we did not find a slot then simply move all the partials to the
   * per node partial list.
   */
- static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
+ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
  {
        struct page *oldpage;
        int pages;
                page->next = oldpage;
  
        } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
-       return pobjects;
  }
  
  static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
@@@ -2041,7 -2042,7 +2042,7 @@@ static void flush_all(struct kmem_cach
  static inline int node_match(struct page *page, int node)
  {
  #ifdef CONFIG_NUMA
-       if (node != NUMA_NO_NODE && page_to_nid(page) != node)
+       if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node))
                return 0;
  #endif
        return 1;
@@@ -2331,13 -2332,18 +2332,18 @@@ static __always_inline void *slab_alloc
  
        s = memcg_kmem_get_cache(s, gfpflags);
  redo:
        /*
         * Must read kmem_cache cpu data via this cpu ptr. Preemption is
         * enabled. We may switch back and forth between cpus while
         * reading from one cpu area. That does not matter as long
         * as we end up on the original cpu again when doing the cmpxchg.
+        *
+        * Preemption is disabled for the retrieval of the tid because that
+        * must occur from the current processor. We cannot allow rescheduling
+        * on a different processor between the determination of the pointer
+        * and the retrieval of the tid.
         */
+       preempt_disable();
        c = __this_cpu_ptr(s->cpu_slab);
  
        /*
         * linked list in between.
         */
        tid = c->tid;
-       barrier();
+       preempt_enable();
  
        object = c->freelist;
        page = c->page;
@@@ -2594,10 -2600,11 +2600,11 @@@ redo
         * data is retrieved via this pointer. If we are on the same cpu
         * during the cmpxchg then the free will succedd.
         */
+       preempt_disable();
        c = __this_cpu_ptr(s->cpu_slab);
  
        tid = c->tid;
-       barrier();
+       preempt_enable();
  
        if (likely(page == c->page)) {
                set_freepointer(s, object, c->freelist);
@@@ -2775,7 -2782,7 +2782,7 @@@ init_kmem_cache_node(struct kmem_cache_
  static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
  {
        BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
-                       SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
+                       KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
  
        /*
         * Must align to double word boundary for the double cmpxchg
@@@ -2982,7 -2989,7 +2989,7 @@@ static int calculate_sizes(struct kmem_
                s->allocflags |= __GFP_COMP;
  
        if (s->flags & SLAB_CACHE_DMA)
-               s->allocflags |= SLUB_DMA;
+               s->allocflags |= GFP_DMA;
  
        if (s->flags & SLAB_RECLAIM_ACCOUNT)
                s->allocflags |= __GFP_RECLAIMABLE;
@@@ -3174,13 -3181,6 +3181,6 @@@ int __kmem_cache_shutdown(struct kmem_c
   *            Kmalloc subsystem
   *******************************************************************/
  
- struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
- EXPORT_SYMBOL(kmalloc_caches);
- #ifdef CONFIG_ZONE_DMA
- static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
- #endif
  static int __init setup_slub_min_order(char *str)
  {
        get_option(&str, &slub_min_order);
@@@ -3217,73 -3217,15 +3217,15 @@@ static int __init setup_slub_nomerge(ch
  
  __setup("slub_nomerge", setup_slub_nomerge);
  
- /*
-  * Conversion table for small slabs sizes / 8 to the index in the
-  * kmalloc array. This is necessary for slabs < 192 since we have non power
-  * of two cache sizes there. The size of larger slabs can be determined using
-  * fls.
-  */
- static s8 size_index[24] = {
-       3,      /* 8 */
-       4,      /* 16 */
-       5,      /* 24 */
-       5,      /* 32 */
-       6,      /* 40 */
-       6,      /* 48 */
-       6,      /* 56 */
-       6,      /* 64 */
-       1,      /* 72 */
-       1,      /* 80 */
-       1,      /* 88 */
-       1,      /* 96 */
-       7,      /* 104 */
-       7,      /* 112 */
-       7,      /* 120 */
-       7,      /* 128 */
-       2,      /* 136 */
-       2,      /* 144 */
-       2,      /* 152 */
-       2,      /* 160 */
-       2,      /* 168 */
-       2,      /* 176 */
-       2,      /* 184 */
-       2       /* 192 */
- };
- static inline int size_index_elem(size_t bytes)
- {
-       return (bytes - 1) / 8;
- }
- static struct kmem_cache *get_slab(size_t size, gfp_t flags)
- {
-       int index;
-       if (size <= 192) {
-               if (!size)
-                       return ZERO_SIZE_PTR;
-               index = size_index[size_index_elem(size)];
-       } else
-               index = fls(size - 1);
- #ifdef CONFIG_ZONE_DMA
-       if (unlikely((flags & SLUB_DMA)))
-               return kmalloc_dma_caches[index];
- #endif
-       return kmalloc_caches[index];
- }
  void *__kmalloc(size_t size, gfp_t flags)
  {
        struct kmem_cache *s;
        void *ret;
  
-       if (unlikely(size > SLUB_MAX_SIZE))
+       if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
                return kmalloc_large(size, flags);
  
-       s = get_slab(size, flags);
+       s = kmalloc_slab(size, flags);
  
        if (unlikely(ZERO_OR_NULL_PTR(s)))
                return s;
@@@ -3316,7 -3258,7 +3258,7 @@@ void *__kmalloc_node(size_t size, gfp_
        struct kmem_cache *s;
        void *ret;
  
-       if (unlikely(size > SLUB_MAX_SIZE)) {
+       if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
                ret = kmalloc_large_node(size, flags, node);
  
                trace_kmalloc_node(_RET_IP_, ret,
                return ret;
        }
  
-       s = get_slab(size, flags);
+       s = kmalloc_slab(size, flags);
  
        if (unlikely(ZERO_OR_NULL_PTR(s)))
                return s;
@@@ -3617,6 -3559,12 +3559,12 @@@ static struct kmem_cache * __init boots
  
        memcpy(s, static_cache, kmem_cache->object_size);
  
+       /*
+        * This runs very early, and only the boot processor is supposed to be
+        * up.  Even if it weren't true, IRQs are not up so we couldn't fire
+        * IPIs around.
+        */
+       __flush_cpu_slab(s, smp_processor_id());
        for_each_node_state(node, N_NORMAL_MEMORY) {
                struct kmem_cache_node *n = get_node(s, node);
                struct page *p;
@@@ -3639,8 -3587,6 +3587,6 @@@ void __init kmem_cache_init(void
  {
        static __initdata struct kmem_cache boot_kmem_cache,
                boot_kmem_cache_node;
-       int i;
-       int caches = 2;
  
        if (debug_guardpage_minorder())
                slub_max_order = 0;
        kmem_cache_node = bootstrap(&boot_kmem_cache_node);
  
        /* Now we can use the kmem_cache to allocate kmalloc slabs */
-       /*
-        * Patch up the size_index table if we have strange large alignment
-        * requirements for the kmalloc array. This is only the case for
-        * MIPS it seems. The standard arches will not generate any code here.
-        *
-        * Largest permitted alignment is 256 bytes due to the way we
-        * handle the index determination for the smaller caches.
-        *
-        * Make sure that nothing crazy happens if someone starts tinkering
-        * around with ARCH_KMALLOC_MINALIGN
-        */
-       BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
-               (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
-       for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
-               int elem = size_index_elem(i);
-               if (elem >= ARRAY_SIZE(size_index))
-                       break;
-               size_index[elem] = KMALLOC_SHIFT_LOW;
-       }
-       if (KMALLOC_MIN_SIZE == 64) {
-               /*
-                * The 96 byte size cache is not used if the alignment
-                * is 64 byte.
-                */
-               for (i = 64 + 8; i <= 96; i += 8)
-                       size_index[size_index_elem(i)] = 7;
-       } else if (KMALLOC_MIN_SIZE == 128) {
-               /*
-                * The 192 byte sized cache is not used if the alignment
-                * is 128 byte. Redirect kmalloc to use the 256 byte cache
-                * instead.
-                */
-               for (i = 128 + 8; i <= 192; i += 8)
-                       size_index[size_index_elem(i)] = 8;
-       }
-       /* Caches that are not of the two-to-the-power-of size */
-       if (KMALLOC_MIN_SIZE <= 32) {
-               kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
-               caches++;
-       }
-       if (KMALLOC_MIN_SIZE <= 64) {
-               kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
-               caches++;
-       }
-       for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
-               kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
-               caches++;
-       }
-       slab_state = UP;
-       /* Provide the correct kmalloc names now that the caches are up */
-       if (KMALLOC_MIN_SIZE <= 32) {
-               kmalloc_caches[1]->name = kstrdup(kmalloc_caches[1]->name, GFP_NOWAIT);
-               BUG_ON(!kmalloc_caches[1]->name);
-       }
-       if (KMALLOC_MIN_SIZE <= 64) {
-               kmalloc_caches[2]->name = kstrdup(kmalloc_caches[2]->name, GFP_NOWAIT);
-               BUG_ON(!kmalloc_caches[2]->name);
-       }
-       for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
-               char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
-               BUG_ON(!s);
-               kmalloc_caches[i]->name = s;
-       }
+       create_kmalloc_caches(0);
  
  #ifdef CONFIG_SMP
        register_cpu_notifier(&slab_notifier);
  #endif
  
- #ifdef CONFIG_ZONE_DMA
-       for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
-               struct kmem_cache *s = kmalloc_caches[i];
-               if (s && s->size) {
-                       char *name = kasprintf(GFP_NOWAIT,
-                                "dma-kmalloc-%d", s->object_size);
-                       BUG_ON(!name);
-                       kmalloc_dma_caches[i] = create_kmalloc_cache(name,
-                               s->object_size, SLAB_CACHE_DMA);
-               }
-       }
- #endif
        printk(KERN_INFO
-               "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
+               "SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d,"
                " CPUs=%d, Nodes=%d\n",
-               caches, cache_line_size(),
+               cache_line_size(),
                slub_min_order, slub_max_order, slub_min_objects,
                nr_cpu_ids, nr_node_ids);
  }
@@@ -3930,10 -3789,10 +3789,10 @@@ void *__kmalloc_track_caller(size_t siz
        struct kmem_cache *s;
        void *ret;
  
-       if (unlikely(size > SLUB_MAX_SIZE))
+       if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
                return kmalloc_large(size, gfpflags);
  
-       s = get_slab(size, gfpflags);
+       s = kmalloc_slab(size, gfpflags);
  
        if (unlikely(ZERO_OR_NULL_PTR(s)))
                return s;
@@@ -3953,7 -3812,7 +3812,7 @@@ void *__kmalloc_node_track_caller(size_
        struct kmem_cache *s;
        void *ret;
  
-       if (unlikely(size > SLUB_MAX_SIZE)) {
+       if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
                ret = kmalloc_large_node(size, gfpflags, node);
  
                trace_kmalloc_node(caller, ret,
                return ret;
        }
  
-       s = get_slab(size, gfpflags);
+       s = kmalloc_slab(size, gfpflags);
  
        if (unlikely(ZERO_OR_NULL_PTR(s)))
                return s;
@@@ -4312,7 -4171,7 +4171,7 @@@ static void resiliency_test(void
  {
        u8 *p;
  
-       BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10);
+       BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);
  
        printk(KERN_ERR "SLUB resiliency testing\n");
        printk(KERN_ERR "-----------------------\n");