From 8b2e9b712f6139df9c754af0d67fecc4bbc88545 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 20 Nov 2013 14:41:47 -0800 Subject: [PATCH] Revert "mm: create a separate slab for page->ptl allocation" This reverts commit ea1e7ed33708c7a760419ff9ded0a6cb90586a50. Al points out that while the commit *does* actually create a separate slab for the page->ptl allocation, that slab is never actually used, and the code continues to use kmalloc/kfree. Damien Wyart points out that the original patch did have the conversion to use kmem_cache_alloc/free, so it got lost somewhere on its way to me. Revert the half-arsed attempt that didn't do anything. If we really do want the special slab (remember: this is all relevant just for debug builds, so it's not necessarily all that critical) we might as well redo the patch fully. Reported-by: Al Viro Acked-by: Andrew Morton Cc: Kirill A Shutemov Signed-off-by: Linus Torvalds --- include/linux/mm.h | 9 --------- init/main.c | 2 +- mm/memory.c | 7 ------- 3 files changed, 1 insertion(+), 17 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 0548eb201e05..1cedd000cf29 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1318,7 +1318,6 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a #if USE_SPLIT_PTE_PTLOCKS #if BLOATED_SPINLOCKS -void __init ptlock_cache_init(void); extern bool ptlock_alloc(struct page *page); extern void ptlock_free(struct page *page); @@ -1327,7 +1326,6 @@ static inline spinlock_t *ptlock_ptr(struct page *page) return page->ptl; } #else /* BLOATED_SPINLOCKS */ -static inline void ptlock_cache_init(void) {} static inline bool ptlock_alloc(struct page *page) { return true; @@ -1380,17 +1378,10 @@ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) { return &mm->page_table_lock; } -static inline void ptlock_cache_init(void) {} static inline bool ptlock_init(struct page *page) { return true; } static inline void pte_lock_deinit(struct page *page) {} #endif /* USE_SPLIT_PTE_PTLOCKS */ -static inline void pgtable_init(void) -{ - ptlock_cache_init(); - pgtable_cache_init(); -} - static inline bool pgtable_page_ctor(struct page *page) { inc_zone_page_state(page, NR_PAGETABLE); diff --git a/init/main.c b/init/main.c index 01573fdfa186..febc511e078a 100644 --- a/init/main.c +++ b/init/main.c @@ -476,7 +476,7 @@ static void __init mm_init(void) mem_init(); kmem_cache_init(); percpu_init_late(); - pgtable_init(); + pgtable_cache_init(); vmalloc_init(); } diff --git a/mm/memory.c b/mm/memory.c index 0409e8f43fa0..5d9025f3b3e1 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4272,13 +4272,6 @@ void copy_user_huge_page(struct page *dst, struct page *src, #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ #if USE_SPLIT_PTE_PTLOCKS && BLOATED_SPINLOCKS -static struct kmem_cache *page_ptl_cachep; -void __init ptlock_cache_init(void) -{ - page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0, - SLAB_PANIC, NULL); -} - bool ptlock_alloc(struct page *page) { spinlock_t *ptl; -- 2.34.1