From: Benjamin LaHaise Date: Wed, 1 Feb 2006 11:05:30 +0000 (-0800) Subject: [PATCH] Use 32 bit division in slab_put_obj() X-Git-Tag: v3.12-rc1~38874 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=9884fd8df195fe48d4e1be2279b419be96127cae;p=kernel%2Fkernel-generic.git [PATCH] Use 32 bit division in slab_put_obj() Improve the performance of slab_put_obj(). Without the cast, gcc considers ptrdiff_t a 64 bit signed integer and ends up emitting code to use a full signed 128 bit divide on EM64T, which is substantially slower than a 32 bit unsigned divide. I noticed this when looking at the profile of a case where the slab balance is just on edge and thrashes back and forth freeing a block. Signed-off-by: Benjamin LaHaise Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/mm/slab.c b/mm/slab.c index 6f8495e..88082ae 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1398,7 +1398,7 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp) struct slab *slabp = page_get_slab(virt_to_page(objp)); int objnr; - objnr = (objp - slabp->s_mem) / cachep->objsize; + objnr = (unsigned)(objp - slabp->s_mem) / cachep->objsize; if (objnr) { objp = slabp->s_mem + (objnr - 1) * cachep->objsize; realobj = (char *)objp + obj_dbghead(cachep); @@ -2341,7 +2341,7 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp, if (cachep->flags & SLAB_STORE_USER) *dbg_userword(cachep, objp) = caller; - objnr = (objp - slabp->s_mem) / cachep->objsize; + objnr = (unsigned)(objp - slabp->s_mem) / cachep->objsize; BUG_ON(objnr >= cachep->num); BUG_ON(objp != slabp->s_mem + objnr * cachep->objsize); @@ -2699,7 +2699,7 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, slabp = page_get_slab(virt_to_page(objp)); l3 = cachep->nodelists[node]; list_del(&slabp->list); - objnr = (objp - slabp->s_mem) / cachep->objsize; + objnr = (unsigned)(objp - slabp->s_mem) / cachep->objsize; check_spinlock_acquired_node(cachep, node); check_slabp(cachep, slabp);