powerpc: Add hugepage support to 64-bit tablewalk code for FSL_BOOK3E
authorBecky Bruce <beckyb@kernel.crashing.org>
Mon, 10 Oct 2011 10:50:42 +0000 (10:50 +0000)
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>
Wed, 7 Dec 2011 05:26:22 +0000 (16:26 +1100)
Before hugetlb, at each level of the table, we test for
!0 to determine if we have a valid table entry.  With hugetlb, this
compare becomes:
        < 0 is a normal entry
        0 is an invalid entry
        > 0 is huge

This works because the hugepage code pulls the top bit off the entry
(which for non-huge entries always has the top bit set) as an
indicator that we have a hugepage.

Signed-off-by: Becky Bruce <beckyb@kernel.crashing.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
arch/powerpc/mm/tlb_low_64e.S

index 71d5d9a..ff672bd 100644 (file)
@@ -136,22 +136,22 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
 #ifndef CONFIG_PPC_64K_PAGES
        rldicl  r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
        clrrdi  r15,r15,3
-       cmlpdi  cr0,r14,0
-       beq     tlb_miss_fault_bolted   /* Bad pgd entry */
+       cmpdi   cr0,r14,0
+       bge     tlb_miss_fault_bolted   /* Bad pgd entry or hugepage; bail */
        ldx     r14,r14,r15             /* grab pud entry */
 #endif /* CONFIG_PPC_64K_PAGES */
 
        rldicl  r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
        clrrdi  r15,r15,3
-       cmpldi  cr0,r14,0
-       beq     tlb_miss_fault_bolted
+       cmpdi   cr0,r14,0
+       bge     tlb_miss_fault_bolted
        ldx     r14,r14,r15             /* Grab pmd entry */
 
        rldicl  r15,r16,64-PAGE_SHIFT+3,64-PTE_INDEX_SIZE-3
        clrrdi  r15,r15,3
-       cmpldi  cr0,r14,0
-       beq     tlb_miss_fault_bolted
-       ldx     r14,r14,r15             /* Grab PTE */
+       cmpdi   cr0,r14,0
+       bge     tlb_miss_fault_bolted
+       ldx     r14,r14,r15             /* Grab PTE, normal (!huge) page */
 
        /* Check if required permissions are met */
        andc.   r15,r11,r14