slub: explicitly document position of inserting slab to partial list
authorShaohua Li <shaohua.li@intel.com>
Wed, 24 Aug 2011 00:57:52 +0000 (08:57 +0800)
committerPekka Enberg <penberg@kernel.org>
Sat, 27 Aug 2011 08:59:00 +0000 (11:59 +0300)
Adding slab to partial list head/tail is sensitive to performance.
So explicitly uses DEACTIVATE_TO_TAIL/DEACTIVATE_TO_HEAD to document
it to avoid we get it wrong.

Acked-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Shaohua Li <shli@kernel.org>
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
mm/slub.c

index 7c54fe8..91a120f 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1534,7 +1534,7 @@ static inline void add_partial(struct kmem_cache_node *n,
                                struct page *page, int tail)
 {
        n->nr_partial++;
-       if (tail)
+       if (tail == DEACTIVATE_TO_TAIL)
                list_add_tail(&page->lru, &n->partial);
        else
                list_add(&page->lru, &n->partial);
@@ -1781,13 +1781,13 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
        enum slab_modes l = M_NONE, m = M_NONE;
        void *freelist;
        void *nextfree;
-       int tail = 0;
+       int tail = DEACTIVATE_TO_HEAD;
        struct page new;
        struct page old;
 
        if (page->freelist) {
                stat(s, DEACTIVATE_REMOTE_FREES);
-               tail = 1;
+               tail = DEACTIVATE_TO_TAIL;
        }
 
        c->tid = next_tid(c->tid);
@@ -1893,7 +1893,7 @@ redo:
                if (m == M_PARTIAL) {
 
                        add_partial(n, page, tail);
-                       stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
+                       stat(s, tail);
 
                } else if (m == M_FULL) {
 
@@ -2377,7 +2377,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
                 */
                if (unlikely(!prior)) {
                        remove_full(s, page);
-                       add_partial(n, page, 1);
+                       add_partial(n, page, DEACTIVATE_TO_TAIL);
                        stat(s, FREE_ADD_PARTIAL);
                }
        }
@@ -2695,7 +2695,7 @@ static void early_kmem_cache_node_alloc(int node)
        init_kmem_cache_node(n, kmem_cache_node);
        inc_slabs_node(kmem_cache_node, node, page->objects);
 
-       add_partial(n, page, 0);
+       add_partial(n, page, DEACTIVATE_TO_HEAD);
 }
 
 static void free_kmem_cache_nodes(struct kmem_cache *s)