From b462705ac679f6195d1b23a752cda592d9107495 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Sat, 29 Oct 2005 18:16:24 -0700 Subject: [PATCH] [PATCH] mm: arches skip ptlock Convert those few architectures which are calling pud_alloc, pmd_alloc, pte_alloc_map on a user mm, not to take the page_table_lock first, nor drop it after. Each of these can continue to use pte_alloc_map, no need to change over to pte_alloc_map_lock, they're neither racy nor swappable. In the sparc64 io_remap_pfn_range, flush_tlb_range then falls outside of the page_table_lock: that's okay, on sparc64 it's like flush_tlb_mm, and that has always been called from outside of page_table_lock in dup_mmap. Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/mm/mm-armv.c | 14 -------------- arch/arm26/mm/memc.c | 15 --------------- arch/sparc/mm/generic.c | 4 +--- arch/sparc64/mm/generic.c | 6 ++---- arch/um/kernel/skas/mmu.c | 3 --- 5 files changed, 3 insertions(+), 39 deletions(-) diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c index 61bc2fa..60f3e03 100644 --- a/arch/arm/mm/mm-armv.c +++ b/arch/arm/mm/mm-armv.c @@ -180,11 +180,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) if (!vectors_high()) { /* - * This lock is here just to satisfy pmd_alloc and pte_lock - */ - spin_lock(&mm->page_table_lock); - - /* * On ARM, first page must always be allocated since it * contains the machine vectors. */ @@ -201,23 +196,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) set_pte(new_pte, *init_pte); pte_unmap_nested(init_pte); pte_unmap(new_pte); - - spin_unlock(&mm->page_table_lock); } return new_pgd; no_pte: - spin_unlock(&mm->page_table_lock); pmd_free(new_pmd); - free_pages((unsigned long)new_pgd, 2); - return NULL; - no_pmd: - spin_unlock(&mm->page_table_lock); free_pages((unsigned long)new_pgd, 2); - return NULL; - no_pgd: return NULL; } diff --git a/arch/arm26/mm/memc.c b/arch/arm26/mm/memc.c index d6b008b..34def63 100644 --- a/arch/arm26/mm/memc.c +++ b/arch/arm26/mm/memc.c @@ -79,12 +79,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) goto no_pgd; /* - * This lock is here just to satisfy pmd_alloc and pte_lock - * FIXME: I bet we could avoid taking it pretty much altogether - */ - spin_lock(&mm->page_table_lock); - - /* * On ARM, first page must always be allocated since it contains * the machine vectors. */ @@ -113,23 +107,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); - spin_unlock(&mm->page_table_lock); - /* update MEMC tables */ cpu_memc_update_all(new_pgd); return new_pgd; no_pte: - spin_unlock(&mm->page_table_lock); pmd_free(new_pmd); - free_pgd_slow(new_pgd); - return NULL; - no_pmd: - spin_unlock(&mm->page_table_lock); free_pgd_slow(new_pgd); - return NULL; - no_pgd: return NULL; } diff --git a/arch/sparc/mm/generic.c b/arch/sparc/mm/generic.c index 659c9a7..9604893 100644 --- a/arch/sparc/mm/generic.c +++ b/arch/sparc/mm/generic.c @@ -81,9 +81,8 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, dir = pgd_offset(mm, from); flush_cache_range(vma, beg, end); - spin_lock(&mm->page_table_lock); while (from < end) { - pmd_t *pmd = pmd_alloc(current->mm, dir, from); + pmd_t *pmd = pmd_alloc(mm, dir, from); error = -ENOMEM; if (!pmd) break; @@ -93,7 +92,6 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, from = (from + PGDIR_SIZE) & PGDIR_MASK; dir++; } - spin_unlock(&mm->page_table_lock); flush_tlb_range(vma, beg, end); return error; diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c index afc01ce..112c316 100644 --- a/arch/sparc64/mm/generic.c +++ b/arch/sparc64/mm/generic.c @@ -135,9 +135,8 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, dir = pgd_offset(mm, from); flush_cache_range(vma, beg, end); - spin_lock(&mm->page_table_lock); while (from < end) { - pud_t *pud = pud_alloc(current->mm, dir, from); + pud_t *pud = pud_alloc(mm, dir, from); error = -ENOMEM; if (!pud) break; @@ -147,8 +146,7 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, from = (from + PGDIR_SIZE) & PGDIR_MASK; dir++; } - flush_tlb_range(vma, beg, end); - spin_unlock(&mm->page_table_lock); + flush_tlb_range(vma, beg, end); return error; } diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c index 240143b..02cf36e 100644 --- a/arch/um/kernel/skas/mmu.c +++ b/arch/um/kernel/skas/mmu.c @@ -28,7 +28,6 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc, pmd_t *pmd; pte_t *pte; - spin_lock(&mm->page_table_lock); pgd = pgd_offset(mm, proc); pud = pud_alloc(mm, pgd, proc); if (!pud) @@ -63,7 +62,6 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc, *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); *pte = pte_mkexec(*pte); *pte = pte_wrprotect(*pte); - spin_unlock(&mm->page_table_lock); return(0); out_pmd: @@ -71,7 +69,6 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc, out_pte: pmd_free(pmd); out: - spin_unlock(&mm->page_table_lock); return(-ENOMEM); } -- 2.7.4