X-Git-Url: http://review.tizen.org/git/?a=blobdiff_plain;f=mm%2Fslub.c;h=cb53affecca7e7827d955601c9896be082de25aa;hb=acd19fd1a7b5152cf29f67aaab23aa61078aaa74;hp=eb5a8f93338a1819d026dedf71d9db452c3c62cf;hpb=3272cab4063467410802367a8d69f06c65b2a866;p=platform%2Fadaptation%2Frenesas_rcar%2Frenesas_kernel.git diff --git a/mm/slub.c b/mm/slub.c index eb5a8f9..cb53aff 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -701,7 +701,7 @@ static u8 *check_bytes(u8 *start, u8 value, unsigned int bytes) return check_bytes8(start, value, bytes); value64 = value | value << 8 | value << 16 | value << 24; - value64 = value64 | value64 << 32; + value64 = (value64 & 0xffffffff) | value64 << 32; prefix = 8 - ((unsigned long)start) % 8; if (prefix) { @@ -1447,7 +1447,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) set_freepointer(s, last, NULL); page->freelist = start; - page->inuse = 0; + page->inuse = page->objects; page->frozen = 1; out: return page; @@ -1557,7 +1557,8 @@ static inline void remove_partial(struct kmem_cache_node *n, * Must hold list_lock. */ static inline int acquire_slab(struct kmem_cache *s, - struct kmem_cache_node *n, struct page *page) + struct kmem_cache_node *n, struct page *page, + struct kmem_cache_cpu *c) { void *freelist; unsigned long counters; @@ -1586,9 +1587,9 @@ static inline int acquire_slab(struct kmem_cache *s, if (freelist) { /* Populate the per cpu freelist */ - this_cpu_write(s->cpu_slab->freelist, freelist); - this_cpu_write(s->cpu_slab->page, page); - this_cpu_write(s->cpu_slab->node, page_to_nid(page)); + c->freelist = freelist; + c->page = page; + c->node = page_to_nid(page); return 1; } else { /* @@ -1606,7 +1607,7 @@ static inline int acquire_slab(struct kmem_cache *s, * Try to allocate a partial slab from a specific node. */ static struct page *get_partial_node(struct kmem_cache *s, - struct kmem_cache_node *n) + struct kmem_cache_node *n, struct kmem_cache_cpu *c) { struct page *page; @@ -1621,7 +1622,7 @@ static struct page *get_partial_node(struct kmem_cache *s, spin_lock(&n->list_lock); list_for_each_entry(page, &n->partial, lru) - if (acquire_slab(s, n, page)) + if (acquire_slab(s, n, page, c)) goto out; page = NULL; out: @@ -1632,7 +1633,8 @@ out: /* * Get a page from somewhere. Search in increasing NUMA distances. */ -static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) +static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags, + struct kmem_cache_cpu *c) { #ifdef CONFIG_NUMA struct zonelist *zonelist; @@ -1672,7 +1674,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) if (n && cpuset_zone_allowed_hardwall(zone, flags) && n->nr_partial > s->min_partial) { - page = get_partial_node(s, n); + page = get_partial_node(s, n, c); if (page) { put_mems_allowed(); return page; @@ -1687,16 +1689,17 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) /* * Get a partial page, lock it and return it. */ -static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) +static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node, + struct kmem_cache_cpu *c) { struct page *page; int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node; - page = get_partial_node(s, get_node(s, searchnode)); + page = get_partial_node(s, get_node(s, searchnode), c); if (page || node != NUMA_NO_NODE) return page; - return get_any_partial(s, flags); + return get_any_partial(s, flags, c); } #ifdef CONFIG_PREEMPT @@ -1765,9 +1768,6 @@ void init_kmem_cache_cpus(struct kmem_cache *s) for_each_possible_cpu(cpu) per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); } -/* - * Remove the cpu slab - */ /* * Remove the cpu slab @@ -1854,7 +1854,7 @@ redo: new.frozen = 0; - if (!new.inuse && n->nr_partial < s->min_partial) + if (!new.inuse && n->nr_partial > s->min_partial) m = M_FREE; else if (new.freelist) { m = M_PARTIAL; @@ -2064,9 +2064,6 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, c = this_cpu_ptr(s->cpu_slab); #endif - /* We handle __GFP_ZERO in the caller */ - gfpflags &= ~__GFP_ZERO; - page = c->page; if (!page) goto new_slab; @@ -2119,7 +2116,7 @@ load_freelist: return object; new_slab: - page = get_partial(s, gfpflags, node); + page = get_partial(s, gfpflags, node, c); if (page) { stat(s, ALLOC_FROM_PARTIAL); object = c->freelist; @@ -2142,7 +2139,6 @@ new_slab: */ object = page->freelist; page->freelist = NULL; - page->inuse = page->objects; stat(s, ALLOC_SLAB); c->node = page_to_nid(page); @@ -2163,7 +2159,6 @@ debug: c->freelist = get_freepointer(s, object); deactivate_slab(s, c); - c->page = NULL; c->node = NUMA_NO_NODE; local_irq_restore(flags); return object; @@ -2387,11 +2382,13 @@ static void __slab_free(struct kmem_cache *s, struct page *page, slab_empty: if (prior) { /* - * Slab still on the partial list. + * Slab on the partial list. */ remove_partial(n, page); stat(s, FREE_REMOVE_PARTIAL); - } + } else + /* Slab must be on the full list */ + remove_full(s, page); spin_unlock_irqrestore(&n->list_lock, flags); stat(s, FREE_SLAB); @@ -2683,7 +2680,7 @@ static void early_kmem_cache_node_alloc(int node) n = page->freelist; BUG_ON(!n); page->freelist = get_freepointer(kmem_cache_node, n); - page->inuse++; + page->inuse = 1; page->frozen = 0; kmem_cache_node->node[node] = n; #ifdef CONFIG_SLUB_DEBUG @@ -2968,13 +2965,13 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, /* * Attempt to free all partial slabs on a node. + * This is called from kmem_cache_close(). We must be the last thread + * using the cache and therefore we do not need to lock anymore. */ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) { - unsigned long flags; struct page *page, *h; - spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry_safe(page, h, &n->partial, lru) { if (!page->inuse) { remove_partial(n, page); @@ -2984,7 +2981,6 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) "Objects remaining on kmem_cache_close()"); } } - spin_unlock_irqrestore(&n->list_lock, flags); } /* @@ -3018,6 +3014,7 @@ void kmem_cache_destroy(struct kmem_cache *s) s->refcount--; if (!s->refcount) { list_del(&s->list); + up_write(&slub_lock); if (kmem_cache_close(s)) { printk(KERN_ERR "SLUB %s: %s called for cache that " "still has objects.\n", s->name, __func__); @@ -3026,8 +3023,8 @@ void kmem_cache_destroy(struct kmem_cache *s) if (s->flags & SLAB_DESTROY_BY_RCU) rcu_barrier(); sysfs_slab_remove(s); - } - up_write(&slub_lock); + } else + up_write(&slub_lock); } EXPORT_SYMBOL(kmem_cache_destroy); @@ -3345,23 +3342,23 @@ int kmem_cache_shrink(struct kmem_cache *s) * list_lock. page->inuse here is the upper limit. */ list_for_each_entry_safe(page, t, &n->partial, lru) { - if (!page->inuse) { - remove_partial(n, page); - discard_slab(s, page); - } else { - list_move(&page->lru, - slabs_by_inuse + page->inuse); - } + list_move(&page->lru, slabs_by_inuse + page->inuse); + if (!page->inuse) + n->nr_partial--; } /* * Rebuild the partial list with the slabs filled up most * first and the least used slabs at the end. */ - for (i = objects - 1; i >= 0; i--) + for (i = objects - 1; i > 0; i--) list_splice(slabs_by_inuse + i, n->partial.prev); spin_unlock_irqrestore(&n->list_lock, flags); + + /* Release empty slabs */ + list_for_each_entry_safe(page, t, slabs_by_inuse, lru) + discard_slab(s, page); } kfree(slabs_by_inuse);