From: Martin Schwidefsky Date: Thu, 26 Jul 2012 06:53:06 +0000 (+0200) Subject: s390/mm: downgrade page table after fork of a 31 bit process X-Git-Tag: v3.2.27~39 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=f22027d0e8398e377a42a62241137b77a35eda97;p=profile%2Fivi%2Fkernel-adaptation-intel-automotive.git s390/mm: downgrade page table after fork of a 31 bit process commit 0f6f281b731d20bfe75c13f85d33f3f05b440222 upstream. The downgrade of the 4 level page table created by init_new_context is currently done only in start_thread31. If a 31 bit process forks the new mm uses a 4 level page table, including the task size of 2<<42 that goes along with it. This is incorrect as now a 31 bit process can map memory beyond 2GB. Define arch_dup_mmap to do the downgrade after fork. Signed-off-by: Martin Schwidefsky [bwh: Backported to 3.2: adjust context] Signed-off-by: Ben Hutchings --- diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 5682f16..20f0e01 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -12,7 +12,6 @@ #include #include #include -#include static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) @@ -92,4 +91,17 @@ static inline void activate_mm(struct mm_struct *prev, switch_mm(prev, next, current); } +static inline void arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) +{ +#ifdef CONFIG_64BIT + if (oldmm->context.asce_limit < mm->context.asce_limit) + crst_table_downgrade(mm, oldmm->context.asce_limit); +#endif +} + +static inline void arch_exit_mmap(struct mm_struct *mm) +{ +} + #endif /* __S390_MMU_CONTEXT_H */ diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 5f33d37..172550d 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -130,7 +130,9 @@ struct stack_frame { regs->psw.mask = psw_user_bits | PSW_MASK_BA; \ regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ regs->gprs[15] = new_stackp; \ + __tlb_flush_mm(current->mm); \ crst_table_downgrade(current->mm, 1UL << 31); \ + update_mm(current->mm, current); \ } while (0) /* Forward declaration, a strange C thing */ diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index a0155c0..c70b3d8 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -106,9 +106,15 @@ EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); int s390_mmap_check(unsigned long addr, unsigned long len) { + int rc; + if (!is_compat_task() && - len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) - return crst_table_upgrade(current->mm, 1UL << 53); + len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) { + rc = crst_table_upgrade(current->mm, 1UL << 53); + if (rc) + return rc; + update_mm(current->mm, current); + } return 0; } @@ -128,6 +134,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr, rc = crst_table_upgrade(mm, 1UL << 53); if (rc) return (unsigned long) rc; + update_mm(mm, current); area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); } return area; @@ -150,6 +157,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr, rc = crst_table_upgrade(mm, 1UL << 53); if (rc) return (unsigned long) rc; + update_mm(mm, current); area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); } diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index f8ceac4..f8e92f8 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -97,7 +97,6 @@ repeat: crst_table_free(mm, table); if (mm->context.asce_limit < limit) goto repeat; - update_mm(mm, current); return 0; } @@ -105,9 +104,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) { pgd_t *pgd; - if (mm->context.asce_limit <= limit) - return; - __tlb_flush_mm(mm); while (mm->context.asce_limit > limit) { pgd = mm->pgd; switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { @@ -130,7 +126,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) mm->task_size = mm->context.asce_limit; crst_table_free(mm, (unsigned long *) pgd); } - update_mm(mm, current); } #endif