From: David Daney Date: Tue, 16 Oct 2012 22:48:10 +0000 (+0200) Subject: MIPS: Control huge tlb support via Kconfig symbol MIPS_HUGE_TLB_SUPPORT X-Git-Tag: v3.8-rc1~109^2~36 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=aa1762f49c81a14d0453e4f67f922e4f155510a3;p=platform%2Fkernel%2Flinux-exynos.git MIPS: Control huge tlb support via Kconfig symbol MIPS_HUGE_TLB_SUPPORT We need Huge TLBs for HUGETLB_PAGE, or the soon to follow TRANSPARENT_HUGEPAGE. collect this information under a single Kconfig symbol. Signed-off-by: David Daney --- diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index dba9390..397194a 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1077,6 +1077,9 @@ config SYS_SUPPORTS_HUGETLBFS depends on CPU_SUPPORTS_HUGEPAGES && 64BIT default y +config MIPS_HUGE_TLB_SUPPORT + def_bool HUGETLB_PAGE || TRANSPARENT_HUGEPAGE + config IRQ_CPU bool diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index eb74289..881b980 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -240,7 +240,7 @@ #define PM_HUGE_MASK PM_64M #elif defined(CONFIG_PAGE_SIZE_64KB) #define PM_HUGE_MASK PM_256M -#elif defined(CONFIG_HUGETLB_PAGE) +#elif defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) #error Bad page size configuration for hugetlbfs! #endif diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index c48a7f0..31ab10f 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -33,17 +33,17 @@ #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE - 1)) -#ifdef CONFIG_HUGETLB_PAGE +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT #define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) #define HPAGE_MASK (~(HPAGE_SIZE - 1)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) -#else /* !CONFIG_HUGETLB_PAGE */ +#else /* !CONFIG_MIPS_HUGE_TLB_SUPPORT */ #define HPAGE_SHIFT ({BUILD_BUG(); 0; }) #define HPAGE_SIZE ({BUILD_BUG(); 0; }) #define HPAGE_MASK ({BUILD_BUG(); 0; }) #define HUGETLB_PAGE_ORDER ({BUILD_BUG(); 0; }) -#endif /* CONFIG_HUGETLB_PAGE */ +#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ #ifndef __ASSEMBLY__ diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h index f8b8fb3..1e2642c 100644 --- a/arch/mips/include/asm/pgtable-bits.h +++ b/arch/mips/include/asm/pgtable-bits.h @@ -128,7 +128,7 @@ #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) #define _PAGE_FILE (_PAGE_MODIFIED) -#ifdef CONFIG_HUGETLB_PAGE +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT /* huge tlb page */ #define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1) #define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT) diff --git a/arch/mips/include/asm/sparsemem.h b/arch/mips/include/asm/sparsemem.h index 4461198..65900da 100644 --- a/arch/mips/include/asm/sparsemem.h +++ b/arch/mips/include/asm/sparsemem.h @@ -6,7 +6,7 @@ * SECTION_SIZE_BITS 2^N: how big each section will be * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space */ -#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PAGE_SIZE_64KB) +#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && defined(CONFIG_PAGE_SIZE_64KB) # define SECTION_SIZE_BITS 29 #else # define SECTION_SIZE_BITS 28 diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 4b9b935..936165d 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -305,7 +305,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) pudp = pud_offset(pgdp, address); pmdp = pmd_offset(pudp, address); idx = read_c0_index(); -#ifdef CONFIG_HUGETLB_PAGE +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT /* this could be a huge page */ if (pmd_huge(*pmdp)) { unsigned long lo; diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index a36b495..98b2b73 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -158,7 +158,7 @@ enum label_id { label_smp_pgtable_change, label_r3000_write_probe_fail, label_large_segbits_fault, -#ifdef CONFIG_HUGETLB_PAGE +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT label_tlb_huge_update, #endif }; @@ -177,7 +177,7 @@ UASM_L_LA(_nopage_tlbm) UASM_L_LA(_smp_pgtable_change) UASM_L_LA(_r3000_write_probe_fail) UASM_L_LA(_large_segbits_fault) -#ifdef CONFIG_HUGETLB_PAGE +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT UASM_L_LA(_tlb_huge_update) #endif @@ -666,7 +666,7 @@ static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p, } } -#ifdef CONFIG_HUGETLB_PAGE +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT static __cpuinit void build_restore_pagemask(u32 **p, struct uasm_reloc **r, @@ -792,7 +792,7 @@ static __cpuinit void build_huge_handler_tail(u32 **p, build_huge_update_entries(p, pte, ptr); build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0); } -#endif /* CONFIG_HUGETLB_PAGE */ +#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ #ifdef CONFIG_64BIT /* @@ -1237,7 +1237,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, /* Adjust the context during the load latency. */ build_adjust_context(p, tmp); -#ifdef CONFIG_HUGETLB_PAGE +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update); /* * The in the LWX case we don't want to do the load in the @@ -1246,7 +1246,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, */ if (use_lwx_insns()) uasm_i_nop(p); -#endif /* CONFIG_HUGETLB_PAGE */ +#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ /* build_update_entries */ @@ -1349,7 +1349,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ #endif -#ifdef CONFIG_HUGETLB_PAGE +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update); #endif @@ -1359,7 +1359,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) uasm_l_leave(&l, p); uasm_i_eret(&p); /* return from trap */ } -#ifdef CONFIG_HUGETLB_PAGE +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT uasm_l_tlb_huge_update(&l, p); build_huge_update_entries(&p, htlb_info.huge_pte, K1); build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random, @@ -1404,7 +1404,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) uasm_copy_handler(relocs, labels, tlb_handler, p, f); final_len = p - tlb_handler; } else { -#if defined(CONFIG_HUGETLB_PAGE) +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT const enum label_id ls = label_tlb_huge_update; #else const enum label_id ls = label_vmalloc; @@ -1880,7 +1880,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */ #endif -#ifdef CONFIG_HUGETLB_PAGE +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT /* * For huge tlb entries, pmd doesn't contain an address but * instead contains the tlb pte. Check the PAGE_HUGE bit and @@ -1996,7 +1996,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void) build_make_valid(&p, &r, wr.r1, wr.r2); build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); -#ifdef CONFIG_HUGETLB_PAGE +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT /* * This is the entry point when build_r4000_tlbchange_handler_head * spots a huge page. @@ -2089,7 +2089,7 @@ static void __cpuinit build_r4000_tlb_store_handler(void) build_make_write(&p, &r, wr.r1, wr.r2); build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); -#ifdef CONFIG_HUGETLB_PAGE +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT /* * This is the entry point when * build_r4000_tlbchange_handler_head spots a huge page. @@ -2137,7 +2137,7 @@ static void __cpuinit build_r4000_tlb_modify_handler(void) build_make_write(&p, &r, wr.r1, wr.r2); build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); -#ifdef CONFIG_HUGETLB_PAGE +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT /* * This is the entry point when * build_r4000_tlbchange_handler_head spots a huge page.