X-Git-Url: http://review.tizen.org/git/?a=blobdiff_plain;f=mm%2Fslab.c;h=fc4a7744670003dc7ff37de2ef5bb23bc2c8ba47;hb=8c138bc00925521c4e764269db3a903bd2a51592;hp=e901a36e2520c2b7aa5123ddcd0553415b764397;hpb=f52b69f86e27903d6896ed5fa7cd280fec8de532;p=platform%2Fadaptation%2Frenesas_rcar%2Frenesas_kernel.git diff --git a/mm/slab.c b/mm/slab.c index e901a36..fc4a774 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -424,8 +424,8 @@ static void kmem_list3_init(struct kmem_list3 *parent) * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: * redzone word. * cachep->obj_offset: The real object. - * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] - * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address + * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] + * cachep->size - 1* BYTES_PER_WORD: last caller address * [BYTES_PER_WORD long] */ static int obj_offset(struct kmem_cache *cachep) @@ -433,11 +433,6 @@ static int obj_offset(struct kmem_cache *cachep) return cachep->obj_offset; } -static int obj_size(struct kmem_cache *cachep) -{ - return cachep->obj_size; -} - static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) { BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); @@ -449,23 +444,22 @@ static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) { BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); if (cachep->flags & SLAB_STORE_USER) - return (unsigned long long *)(objp + cachep->buffer_size - + return (unsigned long long *)(objp + cachep->size - sizeof(unsigned long long) - REDZONE_ALIGN); - return (unsigned long long *) (objp + cachep->buffer_size - + return (unsigned long long *) (objp + cachep->size - sizeof(unsigned long long)); } static void **dbg_userword(struct kmem_cache *cachep, void *objp) { BUG_ON(!(cachep->flags & SLAB_STORE_USER)); - return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD); + return (void **)(objp + cachep->size - BYTES_PER_WORD); } #else #define obj_offset(x) 0 -#define obj_size(cachep) (cachep->buffer_size) #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) @@ -475,7 +469,7 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp) #ifdef CONFIG_TRACING size_t slab_buffer_size(struct kmem_cache *cachep) { - return cachep->buffer_size; + return cachep->size; } EXPORT_SYMBOL(slab_buffer_size); #endif @@ -489,56 +483,37 @@ EXPORT_SYMBOL(slab_buffer_size); static int slab_max_order = SLAB_MAX_ORDER_LO; static bool slab_max_order_set __initdata; -/* - * Functions for storing/retrieving the cachep and or slab from the page - * allocator. These are used to find the slab an obj belongs to. With kfree(), - * these are used to find the cache which an obj belongs to. - */ -static inline void page_set_cache(struct page *page, struct kmem_cache *cache) -{ - page->lru.next = (struct list_head *)cache; -} - static inline struct kmem_cache *page_get_cache(struct page *page) { page = compound_head(page); BUG_ON(!PageSlab(page)); - return (struct kmem_cache *)page->lru.next; -} - -static inline void page_set_slab(struct page *page, struct slab *slab) -{ - page->lru.prev = (struct list_head *)slab; -} - -static inline struct slab *page_get_slab(struct page *page) -{ - BUG_ON(!PageSlab(page)); - return (struct slab *)page->lru.prev; + return page->slab_cache; } static inline struct kmem_cache *virt_to_cache(const void *obj) { struct page *page = virt_to_head_page(obj); - return page_get_cache(page); + return page->slab_cache; } static inline struct slab *virt_to_slab(const void *obj) { struct page *page = virt_to_head_page(obj); - return page_get_slab(page); + + VM_BUG_ON(!PageSlab(page)); + return page->slab_page; } static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab, unsigned int idx) { - return slab->s_mem + cache->buffer_size * idx; + return slab->s_mem + cache->size * idx; } /* - * We want to avoid an expensive divide : (offset / cache->buffer_size) - * Using the fact that buffer_size is a constant for a particular cache, - * we can replace (offset / cache->buffer_size) by + * We want to avoid an expensive divide : (offset / cache->size) + * Using the fact that size is a constant for a particular cache, + * we can replace (offset / cache->size) by * reciprocal_divide(offset, cache->reciprocal_buffer_size) */ static inline unsigned int obj_to_index(const struct kmem_cache *cache, @@ -584,7 +559,7 @@ static struct kmem_cache cache_cache = { .batchcount = 1, .limit = BOOT_CPUCACHE_ENTRIES, .shared = 1, - .buffer_size = sizeof(struct kmem_cache), + .size = sizeof(struct kmem_cache), .name = "kmem_cache", }; @@ -1153,7 +1128,7 @@ static int init_cache_nodelists_node(int node) struct kmem_list3 *l3; const int memsize = sizeof(struct kmem_list3); - list_for_each_entry(cachep, &cache_chain, next) { + list_for_each_entry(cachep, &cache_chain, list) { /* * Set up the size64 kmemlist for cpu before we can * begin anything. Make sure some other cpu on this @@ -1191,7 +1166,7 @@ static void __cpuinit cpuup_canceled(long cpu) int node = cpu_to_mem(cpu); const struct cpumask *mask = cpumask_of_node(node); - list_for_each_entry(cachep, &cache_chain, next) { + list_for_each_entry(cachep, &cache_chain, list) { struct array_cache *nc; struct array_cache *shared; struct array_cache **alien; @@ -1241,7 +1216,7 @@ free_array_cache: * the respective cache's slabs, now we can go ahead and * shrink each nodelist to its limit. */ - list_for_each_entry(cachep, &cache_chain, next) { + list_for_each_entry(cachep, &cache_chain, list) { l3 = cachep->nodelists[node]; if (!l3) continue; @@ -1270,7 +1245,7 @@ static int __cpuinit cpuup_prepare(long cpu) * Now we can go ahead with allocating the shared arrays and * array caches */ - list_for_each_entry(cachep, &cache_chain, next) { + list_for_each_entry(cachep, &cache_chain, list) { struct array_cache *nc; struct array_cache *shared = NULL; struct array_cache **alien = NULL; @@ -1402,7 +1377,7 @@ static int __meminit drain_cache_nodelists_node(int node) struct kmem_cache *cachep; int ret = 0; - list_for_each_entry(cachep, &cache_chain, next) { + list_for_each_entry(cachep, &cache_chain, list) { struct kmem_list3 *l3; l3 = cachep->nodelists[node]; @@ -1545,7 +1520,7 @@ void __init kmem_cache_init(void) /* 1) create the cache_cache */ INIT_LIST_HEAD(&cache_chain); - list_add(&cache_cache.next, &cache_chain); + list_add(&cache_cache.list, &cache_chain); cache_cache.colour_off = cache_line_size(); cache_cache.array[smp_processor_id()] = &initarray_cache.cache; cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; @@ -1553,18 +1528,16 @@ void __init kmem_cache_init(void) /* * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids */ - cache_cache.buffer_size = offsetof(struct kmem_cache, array[nr_cpu_ids]) + + cache_cache.size = offsetof(struct kmem_cache, array[nr_cpu_ids]) + nr_node_ids * sizeof(struct kmem_list3 *); -#if DEBUG - cache_cache.obj_size = cache_cache.buffer_size; -#endif - cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, + cache_cache.object_size = cache_cache.size; + cache_cache.size = ALIGN(cache_cache.size, cache_line_size()); cache_cache.reciprocal_buffer_size = - reciprocal_value(cache_cache.buffer_size); + reciprocal_value(cache_cache.size); for (order = 0; order < MAX_ORDER; order++) { - cache_estimate(order, cache_cache.buffer_size, + cache_estimate(order, cache_cache.size, cache_line_size(), 0, &left_over, &cache_cache.num); if (cache_cache.num) break; @@ -1690,7 +1663,7 @@ void __init kmem_cache_init_late(void) /* 6) resize the head arrays to their final sizes */ mutex_lock(&cache_chain_mutex); - list_for_each_entry(cachep, &cache_chain, next) + list_for_each_entry(cachep, &cache_chain, list) if (enable_cpucache(cachep, GFP_NOWAIT)) BUG(); mutex_unlock(&cache_chain_mutex); @@ -1743,7 +1716,7 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) "SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n", nodeid, gfpflags); printk(KERN_WARNING " cache: %s, object size: %d, order: %d\n", - cachep->name, cachep->buffer_size, cachep->gfporder); + cachep->name, cachep->size, cachep->gfporder); for_each_online_node(node) { unsigned long active_objs = 0, num_objs = 0, free_objects = 0; @@ -1874,7 +1847,7 @@ static void kmem_rcu_free(struct rcu_head *head) static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, unsigned long caller) { - int size = obj_size(cachep); + int size = cachep->object_size; addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; @@ -1906,7 +1879,7 @@ static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) { - int size = obj_size(cachep); + int size = cachep->object_size; addr = &((char *)addr)[obj_offset(cachep)]; memset(addr, val, size); @@ -1966,7 +1939,7 @@ static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) printk("\n"); } realobj = (char *)objp + obj_offset(cachep); - size = obj_size(cachep); + size = cachep->object_size; for (i = 0; i < size && lines; i += 16, lines--) { int limit; limit = 16; @@ -1983,7 +1956,7 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp) int lines = 0; realobj = (char *)objp + obj_offset(cachep); - size = obj_size(cachep); + size = cachep->object_size; for (i = 0; i < size; i++) { char exp = POISON_FREE; @@ -2047,10 +2020,10 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab if (cachep->flags & SLAB_POISON) { #ifdef CONFIG_DEBUG_PAGEALLOC - if (cachep->buffer_size % PAGE_SIZE == 0 && + if (cachep->size % PAGE_SIZE == 0 && OFF_SLAB(cachep)) kernel_map_pages(virt_to_page(objp), - cachep->buffer_size / PAGE_SIZE, 1); + cachep->size / PAGE_SIZE, 1); else check_poison_obj(cachep, objp); #else @@ -2300,7 +2273,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, mutex_lock(&cache_chain_mutex); } - list_for_each_entry(pc, &cache_chain, next) { + list_for_each_entry(pc, &cache_chain, list) { char tmp; int res; @@ -2313,7 +2286,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, if (res) { printk(KERN_ERR "SLAB: cache with size %d has lost its name\n", - pc->buffer_size); + pc->size); continue; } @@ -2418,8 +2391,9 @@ kmem_cache_create (const char *name, size_t size, size_t align, goto oops; cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; + cachep->object_size = size; + cachep->align = align; #if DEBUG - cachep->obj_size = size; /* * Both debugging options require word-alignment which is calculated @@ -2442,7 +2416,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, } #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) if (size >= malloc_sizes[INDEX_L3 + 1].cs_size - && cachep->obj_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) { + && cachep->object_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) { cachep->obj_offset += PAGE_SIZE - ALIGN(size, align); size = PAGE_SIZE; } @@ -2511,7 +2485,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, cachep->gfpflags = 0; if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) cachep->gfpflags |= GFP_DMA; - cachep->buffer_size = size; + cachep->size = size; cachep->reciprocal_buffer_size = reciprocal_value(size); if (flags & CFLGS_OFF_SLAB) { @@ -2545,7 +2519,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, } /* cache setup completed, link it into the list */ - list_add(&cachep->next, &cache_chain); + list_add(&cachep->list, &cache_chain); oops: if (!cachep && (flags & SLAB_PANIC)) panic("kmem_cache_create(): failed to create slab `%s'\n", @@ -2740,10 +2714,10 @@ void kmem_cache_destroy(struct kmem_cache *cachep) /* * the chain is never empty, cache_cache is never destroyed */ - list_del(&cachep->next); + list_del(&cachep->list); if (__cache_shrink(cachep)) { slab_error(cachep, "Can't free all objects"); - list_add(&cachep->next, &cache_chain); + list_add(&cachep->list, &cache_chain); mutex_unlock(&cache_chain_mutex); put_online_cpus(); return; @@ -2840,10 +2814,10 @@ static void cache_init_objs(struct kmem_cache *cachep, slab_error(cachep, "constructor overwrote the" " start of an object"); } - if ((cachep->buffer_size % PAGE_SIZE) == 0 && + if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) kernel_map_pages(virt_to_page(objp), - cachep->buffer_size / PAGE_SIZE, 0); + cachep->size / PAGE_SIZE, 0); #else if (cachep->ctor) cachep->ctor(objp); @@ -2918,8 +2892,8 @@ static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, nr_pages <<= cache->gfporder; do { - page_set_cache(page, cache); - page_set_slab(page, slab); + page->slab_cache = cache; + page->slab_page = slab; page++; } while (--nr_pages); } @@ -3057,7 +3031,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, kfree_debugcheck(objp); page = virt_to_head_page(objp); - slabp = page_get_slab(page); + slabp = page->slab_page; if (cachep->flags & SLAB_RED_ZONE) { verify_redzone_free(cachep, objp); @@ -3077,10 +3051,10 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, #endif if (cachep->flags & SLAB_POISON) { #ifdef CONFIG_DEBUG_PAGEALLOC - if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { + if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { store_stackinfo(cachep, objp, (unsigned long)caller); kernel_map_pages(virt_to_page(objp), - cachep->buffer_size / PAGE_SIZE, 0); + cachep->size / PAGE_SIZE, 0); } else { poison_obj(cachep, objp, POISON_FREE); } @@ -3230,9 +3204,9 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, return objp; if (cachep->flags & SLAB_POISON) { #ifdef CONFIG_DEBUG_PAGEALLOC - if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) + if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) kernel_map_pages(virt_to_page(objp), - cachep->buffer_size / PAGE_SIZE, 1); + cachep->size / PAGE_SIZE, 1); else check_poison_obj(cachep, objp); #else @@ -3261,8 +3235,8 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, struct slab *slabp; unsigned objnr; - slabp = page_get_slab(virt_to_head_page(objp)); - objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; + slabp = virt_to_head_page(objp)->slab_page; + objnr = (unsigned)(objp - slabp->s_mem) / cachep->size; slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; } #endif @@ -3285,7 +3259,7 @@ static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) if (cachep == &cache_cache) return false; - return should_failslab(obj_size(cachep), flags, cachep->flags); + return should_failslab(cachep->object_size, flags, cachep->flags); } static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) @@ -3545,14 +3519,14 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, out: local_irq_restore(save_flags); ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); - kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags, + kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags, flags); if (likely(ptr)) - kmemcheck_slab_alloc(cachep, flags, ptr, obj_size(cachep)); + kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size); if (unlikely((flags & __GFP_ZERO) && ptr)) - memset(ptr, 0, obj_size(cachep)); + memset(ptr, 0, cachep->object_size); return ptr; } @@ -3607,15 +3581,15 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) objp = __do_cache_alloc(cachep, flags); local_irq_restore(save_flags); objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); - kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags, + kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags, flags); prefetchw(objp); if (likely(objp)) - kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep)); + kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size); if (unlikely((flags & __GFP_ZERO) && objp)) - memset(objp, 0, obj_size(cachep)); + memset(objp, 0, cachep->object_size); return objp; } @@ -3731,7 +3705,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp, kmemleak_free_recursive(objp, cachep->flags); objp = cache_free_debugcheck(cachep, objp, caller); - kmemcheck_slab_free(cachep, objp, obj_size(cachep)); + kmemcheck_slab_free(cachep, objp, cachep->object_size); /* * Skip calling cache_free_alien() when the platform is not numa. @@ -3766,7 +3740,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); trace_kmem_cache_alloc(_RET_IP_, ret, - obj_size(cachep), cachep->buffer_size, flags); + cachep->object_size, cachep->size, flags); return ret; } @@ -3794,7 +3768,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) __builtin_return_address(0)); trace_kmem_cache_alloc_node(_RET_IP_, ret, - obj_size(cachep), cachep->buffer_size, + cachep->object_size, cachep->size, flags, nodeid); return ret; @@ -3876,7 +3850,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, ret = __cache_alloc(cachep, flags, caller); trace_kmalloc((unsigned long) caller, ret, - size, cachep->buffer_size, flags); + size, cachep->size, flags); return ret; } @@ -3916,9 +3890,9 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) unsigned long flags; local_irq_save(flags); - debug_check_no_locks_freed(objp, obj_size(cachep)); + debug_check_no_locks_freed(objp, cachep->size); if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) - debug_check_no_obj_freed(objp, obj_size(cachep)); + debug_check_no_obj_freed(objp, cachep->object_size); __cache_free(cachep, objp, __builtin_return_address(0)); local_irq_restore(flags); @@ -3947,8 +3921,9 @@ void kfree(const void *objp) local_irq_save(flags); kfree_debugcheck(objp); c = virt_to_cache(objp); - debug_check_no_locks_freed(objp, obj_size(c)); - debug_check_no_obj_freed(objp, obj_size(c)); + debug_check_no_locks_freed(objp, c->object_size); + + debug_check_no_obj_freed(objp, c->object_size); __cache_free(c, (void *)objp, __builtin_return_address(0)); local_irq_restore(flags); } @@ -3956,7 +3931,7 @@ EXPORT_SYMBOL(kfree); unsigned int kmem_cache_size(struct kmem_cache *cachep) { - return obj_size(cachep); + return cachep->object_size; } EXPORT_SYMBOL(kmem_cache_size); @@ -4030,7 +4005,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) return 0; fail: - if (!cachep->next.next) { + if (!cachep->list.next) { /* Cache is not active yet. Roll back what we did */ node--; while (node >= 0) { @@ -4124,13 +4099,13 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) * The numbers are guessed, we should auto-tune as described by * Bonwick. */ - if (cachep->buffer_size > 131072) + if (cachep->size > 131072) limit = 1; - else if (cachep->buffer_size > PAGE_SIZE) + else if (cachep->size > PAGE_SIZE) limit = 8; - else if (cachep->buffer_size > 1024) + else if (cachep->size > 1024) limit = 24; - else if (cachep->buffer_size > 256) + else if (cachep->size > 256) limit = 54; else limit = 120; @@ -4145,7 +4120,7 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) * to a larger limit. Thus disabled by default. */ shared = 0; - if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1) + if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1) shared = 8; #if DEBUG @@ -4215,7 +4190,7 @@ static void cache_reap(struct work_struct *w) /* Give up. Setup the next iteration. */ goto out; - list_for_each_entry(searchp, &cache_chain, next) { + list_for_each_entry(searchp, &cache_chain, list) { check_irq_on(); /* @@ -4308,7 +4283,7 @@ static void s_stop(struct seq_file *m, void *p) static int s_show(struct seq_file *m, void *p) { - struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next); + struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); struct slab *slabp; unsigned long active_objs; unsigned long num_objs; @@ -4364,7 +4339,7 @@ static int s_show(struct seq_file *m, void *p) printk(KERN_ERR "slab: cache %s error: %s\n", name, error); seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", - name, active_objs, num_objs, cachep->buffer_size, + name, active_objs, num_objs, cachep->size, cachep->num, (1 << cachep->gfporder)); seq_printf(m, " : tunables %4u %4u %4u", cachep->limit, cachep->batchcount, cachep->shared); @@ -4456,7 +4431,7 @@ static ssize_t slabinfo_write(struct file *file, const char __user *buffer, /* Find the cache in the chain of caches. */ mutex_lock(&cache_chain_mutex); res = -EINVAL; - list_for_each_entry(cachep, &cache_chain, next) { + list_for_each_entry(cachep, &cache_chain, list) { if (!strcmp(cachep->name, kbuf)) { if (limit < 1 || batchcount < 1 || batchcount > limit || shared < 0) { @@ -4532,7 +4507,7 @@ static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) int i; if (n[0] == n[1]) return; - for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) { + for (i = 0, p = s->s_mem; i < c->num; i++, p += c->size) { if (slab_bufctl(s)[i] != BUFCTL_ACTIVE) continue; if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) @@ -4677,6 +4652,6 @@ size_t ksize(const void *objp) if (unlikely(objp == ZERO_SIZE_PTR)) return 0; - return obj_size(virt_to_cache(objp)); + return virt_to_cache(objp)->object_size; } EXPORT_SYMBOL(ksize);