powerpc: Fix fatal SLB miss when restoring PPR
[platform/adaptation/renesas_rcar/renesas_kernel.git] / arch / powerpc / kernel / entry_64.S
index 2bd0b88..bbfb029 100644 (file)
@@ -102,7 +102,8 @@ BEGIN_FW_FTR_SECTION
        /* if from user, see if there are any DTL entries to process */
        ld      r10,PACALPPACAPTR(r13)  /* get ptr to VPA */
        ld      r11,PACA_DTL_RIDX(r13)  /* get log read index */
-       ld      r10,LPPACA_DTLIDX(r10)  /* get log write index */
+       addi    r10,r10,LPPACA_DTLIDX
+       LDX_BE  r10,0,r10               /* get log write index */
        cmpd    cr1,r11,r10
        beq+    cr1,33f
        bl      .accumulate_stolen_time
@@ -522,9 +523,11 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
         */
        ld      r9,PACA_SLBSHADOWPTR(r13)
        li      r12,0
-       std     r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
-       std     r7,SLBSHADOW_STACKVSID(r9)  /* Save VSID */
-       std     r0,SLBSHADOW_STACKESID(r9)  /* Save ESID */
+       std     r12,SLBSHADOW_STACKESID(r9)     /* Clear ESID */
+       li      r12,SLBSHADOW_STACKVSID
+       STDX_BE r7,r12,r9                       /* Save VSID */
+       li      r12,SLBSHADOW_STACKESID
+       STDX_BE r0,r12,r9                       /* Save ESID */
 
        /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
         * we have 1TB segments, the only CPUs known to have the errata
@@ -575,34 +578,15 @@ BEGIN_FTR_SECTION
        ld      r7,DSCR_DEFAULT@toc(2)
        ld      r0,THREAD_DSCR(r4)
        cmpwi   r6,0
-       li      r8, FSCR_DSCR
        bne     1f
        ld      r0,0(r7)
-       b       3f
 1:
-  BEGIN_FTR_SECTION_NESTED(70)
-       mfspr   r6, SPRN_FSCR
-       or      r6, r6, r8
-       mtspr   SPRN_FSCR, r6
-    BEGIN_FTR_SECTION_NESTED(69)
-       mfspr   r6, SPRN_HFSCR
-       or      r6, r6, r8
-       mtspr   SPRN_HFSCR, r6
-    END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
-       b       4f
-  END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
-3:
-  BEGIN_FTR_SECTION_NESTED(70)
-       mfspr   r6, SPRN_FSCR
-       andc    r6, r6, r8
-       mtspr   SPRN_FSCR, r6
-    BEGIN_FTR_SECTION_NESTED(69)
-       mfspr   r6, SPRN_HFSCR
-       andc    r6, r6, r8
-       mtspr   SPRN_HFSCR, r6
-    END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
-  END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
-4:     cmpd    r0,r25
+BEGIN_FTR_SECTION_NESTED(70)
+       mfspr   r8, SPRN_FSCR
+       rldimi  r8, r6, FSCR_DSCR_LG, (63 - FSCR_DSCR_LG)
+       mtspr   SPRN_FSCR, r8
+END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
+       cmpd    r0,r25
        beq     2f
        mtspr   SPRN_DSCR,r0
 2:
@@ -689,9 +673,7 @@ _GLOBAL(ret_from_except_lite)
 
 resume_kernel:
        /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
-       CURRENT_THREAD_INFO(r9, r1)
-       ld      r8,TI_FLAGS(r9)
-       andis.  r8,r8,_TIF_EMULATE_STACK_STORE@h
+       andis.  r8,r4,_TIF_EMULATE_STACK_STORE@h
        beq+    1f
 
        addi    r8,r1,INT_FRAME_SIZE    /* Get the kprobed function entry */
@@ -737,9 +719,9 @@ resume_kernel:
 
        /*
         * Here we are preempting the current task. We want to make
-        * sure we are soft-disabled first
+        * sure we are soft-disabled first and reconcile irq state.
         */
-       SOFT_DISABLE_INTS(r3,r4)
+       RECONCILE_IRQ_STATE(r3,r4)
 1:     bl      .preempt_schedule_irq
 
        /* Re-test flags and eventually loop */
@@ -836,6 +818,12 @@ fast_exception_return:
        andi.   r0,r3,MSR_RI
        beq-    unrecov_restore
 
+       /* Load PPR from thread struct before we clear MSR:RI */
+BEGIN_FTR_SECTION
+       ld      r2,PACACURRENT(r13)
+       ld      r2,TASKTHREADPPR(r2)
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
        /*
         * Clear RI before restoring r13.  If we are returning to
         * userspace and we take an exception after restoring r13,
@@ -856,8 +844,10 @@ fast_exception_return:
         */
        andi.   r0,r3,MSR_PR
        beq     1f
+BEGIN_FTR_SECTION
+       mtspr   SPRN_PPR,r2     /* Restore PPR */
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
        ACCOUNT_CPU_USER_EXIT(r2, r4)
-       RESTORE_PPR(r2, r4)
        REST_GPR(13, r1)
 1:
        mtspr   SPRN_SRR1,r3
@@ -1033,7 +1023,7 @@ _GLOBAL(enter_rtas)
        
         li      r9,1
         rldicr  r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
-       ori     r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI
+       ori     r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
        andc    r6,r0,r9
        sync                            /* disable interrupts so SRR0/1 */
        mtmsrd  r0                      /* don't get trashed */
@@ -1048,6 +1038,8 @@ _GLOBAL(enter_rtas)
        b       .       /* prevent speculative execution */
 
 _STATIC(rtas_return_loc)
+       FIXUP_ENDIAN
+
        /* relocation is off at this point */
        GET_PACA(r4)
        clrldi  r4,r4,2                 /* convert to realmode address */
@@ -1119,28 +1111,30 @@ _GLOBAL(enter_prom)
        std     r10,_CCR(r1)
        std     r11,_MSR(r1)
 
-       /* Get the PROM entrypoint */
-       mtlr    r4
+       /* Put PROM address in SRR0 */
+       mtsrr0  r4
 
-       /* Switch MSR to 32 bits mode
+       /* Setup our trampoline return addr in LR */
+       bcl     20,31,$+4
+0:     mflr    r4
+       addi    r4,r4,(1f - 0b)
+               mtlr    r4
+
+       /* Prepare a 32-bit mode big endian MSR
         */
 #ifdef CONFIG_PPC_BOOK3E
        rlwinm  r11,r11,0,1,31
-       mtmsr   r11
+       mtsrr1  r11
+       rfi
 #else /* CONFIG_PPC_BOOK3E */
-        mfmsr   r11
-        li      r12,1
-        rldicr  r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
-        andc    r11,r11,r12
-        li      r12,1
-        rldicr  r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
-        andc    r11,r11,r12
-        mtmsrd  r11
+       LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
+       andc    r11,r11,r12
+       mtsrr1  r11
+       rfid
 #endif /* CONFIG_PPC_BOOK3E */
-        isync
 
-       /* Enter PROM here... */
-       blrl
+1:     /* Return from OF */
+       FIXUP_ENDIAN
 
        /* Just make sure that r1 top 32 bits didn't get
         * corrupt by OF