/**
* slab_destroy - destroy and release all objects in a slab
* @cachep: cache pointer being destroyed
- * @slabp: slab pointer being destroyed
+ * @page: page pointer being destroyed
*
* Destroy all the objs in a slab, and release the mem back to the system.
* Before calling the slab must have been unlinked from the cache. The
*/
static void slab_destroy(struct kmem_cache *cachep, struct page *page)
{
- struct freelist *freelist;
+ void *freelist;
freelist = page->freelist;
slab_destroy_debugcheck(cachep, page);
* kmem_find_general_cachep till the initialization is complete.
* Hence we cannot have freelist_cache same as the original cache.
*/
-static struct freelist *alloc_slabmgmt(struct kmem_cache *cachep,
+static void *alloc_slabmgmt(struct kmem_cache *cachep,
struct page *page, int colour_off,
gfp_t local_flags, int nodeid)
{
- struct freelist *freelist;
+ void *freelist;
void *addr = page_address(page);
if (OFF_SLAB(cachep)) {
* virtual address for kfree, ksize, and slab debugging.
*/
static void slab_map_pages(struct kmem_cache *cache, struct page *page,
- struct freelist *freelist)
+ void *freelist)
{
page->slab_cache = cache;
page->freelist = freelist;
static int cache_grow(struct kmem_cache *cachep,
gfp_t flags, int nodeid, struct page *page)
{
- struct freelist *freelist;
+ void *freelist;
size_t offset;
gfp_t local_flags;
struct kmem_cache_node *n;
VM_BUG_ON(!mutex_is_locked(&slab_mutex));
for_each_memcg_cache_index(i) {
- c = cache_from_memcg(cachep, i);
+ c = cache_from_memcg_idx(cachep, i);
if (c)
/* return value determined by the parent cache only */
__do_tune_cpucache(c, limit, batchcount, shared, gfp);