slub: Separate out kmem_cache_cpu processing from deactivate_slab
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / slub.c
index a3395c2..2389a01 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1490,12 +1490,12 @@ static inline void remove_partial(struct kmem_cache_node *n,
 }
 
 /*
- * Lock slab, remove from the partial list and put the object into the
- * per cpu freelist.
+ * Remove slab from the partial list, freeze it and
+ * return the pointer to the freelist.
  *
  * Returns a list of objects or NULL if it fails.
  *
- * Must hold list_lock.
+ * Must hold list_lock since we modify the partial list.
  */
 static inline void *acquire_slab(struct kmem_cache *s,
                struct kmem_cache_node *n, struct page *page,
@@ -1510,22 +1510,24 @@ static inline void *acquire_slab(struct kmem_cache *s,
         * The old freelist is the list of objects for the
         * per cpu allocation list.
         */
-       do {
-               freelist = page->freelist;
-               counters = page->counters;
-               new.counters = counters;
-               if (mode)
-                       new.inuse = page->objects;
+       freelist = page->freelist;
+       counters = page->counters;
+       new.counters = counters;
+       if (mode)
+               new.inuse = page->objects;
 
-               VM_BUG_ON(new.frozen);
-               new.frozen = 1;
+       VM_BUG_ON(new.frozen);
+       new.frozen = 1;
 
-       } while (!__cmpxchg_double_slab(s, page,
+       if (!__cmpxchg_double_slab(s, page,
                        freelist, counters,
                        NULL, new.counters,
-                       "lock and freeze"));
+                       "acquire_slab"))
+
+               return NULL;
 
        remove_partial(n, page);
+       WARN_ON(!freelist);
        return freelist;
 }
 
@@ -1559,7 +1561,6 @@ static void *get_partial_node(struct kmem_cache *s,
 
                if (!object) {
                        c->page = page;
-                       c->node = page_to_nid(page);
                        stat(s, ALLOC_FROM_PARTIAL);
                        object = t;
                        available =  page->objects - page->inuse;
@@ -1728,14 +1729,12 @@ void init_kmem_cache_cpus(struct kmem_cache *s)
 /*
  * Remove the cpu slab
  */
-static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
+static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freelist)
 {
        enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
-       struct page *page = c->page;
        struct kmem_cache_node *n = get_node(s, page_to_nid(page));
        int lock = 0;
        enum slab_modes l = M_NONE, m = M_NONE;
-       void *freelist;
        void *nextfree;
        int tail = DEACTIVATE_TO_HEAD;
        struct page new;
@@ -1746,11 +1745,6 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
                tail = DEACTIVATE_TO_TAIL;
        }
 
-       c->tid = next_tid(c->tid);
-       c->page = NULL;
-       freelist = c->freelist;
-       c->freelist = NULL;
-
        /*
         * Stage one: Free all available per cpu objects back
         * to the page freelist while it is still frozen. Leave the
@@ -2008,7 +2002,11 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
 {
        stat(s, CPUSLAB_FLUSH);
-       deactivate_slab(s, c);
+       deactivate_slab(s, c->page, c->freelist);
+
+       c->tid = next_tid(c->tid);
+       c->page = NULL;
+       c->freelist = NULL;
 }
 
 /*
@@ -2055,7 +2053,7 @@ static void flush_all(struct kmem_cache *s)
 static inline int node_match(struct kmem_cache_cpu *c, int node)
 {
 #ifdef CONFIG_NUMA
-       if (node != NUMA_NO_NODE && c->node != node)
+       if (node != NUMA_NO_NODE && page_to_nid(c->page) != node)
                return 0;
 #endif
        return 1;
@@ -2128,9 +2126,15 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
                        int node, struct kmem_cache_cpu **pc)
 {
        void *freelist;
-       struct kmem_cache_cpu *c;
-       struct page *page = new_slab(s, flags, node);
+       struct kmem_cache_cpu *c = *pc;
+       struct page *page;
+
+       freelist = get_partial(s, flags, node, c);
 
+       if (freelist)
+               return freelist;
+
+       page = new_slab(s, flags, node);
        if (page) {
                c = __this_cpu_ptr(s->cpu_slab);
                if (c->page)
@@ -2144,7 +2148,6 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
                page->freelist = NULL;
 
                stat(s, ALLOC_SLAB);
-               c->node = page_to_nid(page);
                c->page = page;
                *pc = c;
        } else
@@ -2223,7 +2226,9 @@ redo:
 
        if (unlikely(!node_match(c, node))) {
                stat(s, ALLOC_NODE_MISMATCH);
-               deactivate_slab(s, c);
+               deactivate_slab(s, c->page, c->freelist);
+               c->page = NULL;
+               c->freelist = NULL;
                goto new_slab;
        }
 
@@ -2261,26 +2266,19 @@ new_slab:
        if (c->partial) {
                c->page = c->partial;
                c->partial = c->page->next;
-               c->node = page_to_nid(c->page);
                stat(s, CPU_PARTIAL_ALLOC);
                c->freelist = NULL;
                goto redo;
        }
 
-       /* Then do expensive stuff like retrieving pages from the partial lists */
-       freelist = get_partial(s, gfpflags, node, c);
+       freelist = new_slab_objects(s, gfpflags, node, &c);
 
        if (unlikely(!freelist)) {
+               if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
+                       slab_out_of_memory(s, gfpflags, node);
 
-               freelist = new_slab_objects(s, gfpflags, node, &c);
-
-               if (unlikely(!freelist)) {
-                       if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
-                               slab_out_of_memory(s, gfpflags, node);
-
-                       local_irq_restore(flags);
-                       return NULL;
-               }
+               local_irq_restore(flags);
+               return NULL;
        }
 
        if (likely(!kmem_cache_debug(s)))
@@ -2290,9 +2288,9 @@ new_slab:
        if (!alloc_debug_processing(s, c->page, freelist, addr))
                goto new_slab;  /* Slab failed checks. Next slab needed */
 
-       c->freelist = get_freepointer(s, freelist);
-       deactivate_slab(s, c);
-       c->node = NUMA_NO_NODE;
+       deactivate_slab(s, c->page, get_freepointer(s, freelist));
+       c->page = NULL;
+       c->freelist = NULL;
        local_irq_restore(flags);
        return freelist;
 }
@@ -4505,30 +4503,31 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
 
                for_each_possible_cpu(cpu) {
                        struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
-                       int node = ACCESS_ONCE(c->node);
+                       int node;
                        struct page *page;
 
-                       if (node < 0)
-                               continue;
                        page = ACCESS_ONCE(c->page);
-                       if (page) {
-                               if (flags & SO_TOTAL)
-                                       x = page->objects;
-                               else if (flags & SO_OBJECTS)
-                                       x = page->inuse;
-                               else
-                                       x = 1;
+                       if (!page)
+                               continue;
 
-                               total += x;
-                               nodes[node] += x;
-                       }
-                       page = c->partial;
+                       node = page_to_nid(page);
+                       if (flags & SO_TOTAL)
+                               x = page->objects;
+                       else if (flags & SO_OBJECTS)
+                               x = page->inuse;
+                       else
+                               x = 1;
 
+                       total += x;
+                       nodes[node] += x;
+
+                       page = ACCESS_ONCE(c->partial);
                        if (page) {
                                x = page->pobjects;
                                total += x;
                                nodes[node] += x;
                        }
+
                        per_cpu[node]++;
                }
        }