powerpc: Fix fatal SLB miss when restoring PPR
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>
Tue, 5 Nov 2013 05:33:22 +0000 (16:33 +1100)
committerJiri Slaby <jslaby@suse.cz>
Wed, 12 Mar 2014 12:25:36 +0000 (13:25 +0100)
commit 0c4888ef1d8a8b82c29075ce7e257ff795af15c7 upstream.

When restoring the PPR value, we incorrectly access the thread structure
at a time where MSR:RI is clear, which means we cannot recover from nested
faults. However the thread structure isn't covered by the "bolted" SLB
entries and thus accessing can fault.

This fixes it by splitting the code so that the PPR value is loaded into
a GPR before MSR:RI is cleared.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Jiri Slaby <jslaby@suse.cz>
arch/powerpc/include/asm/ppc_asm.h
arch/powerpc/kernel/entry_64.S

index 599545738af3e2354b137221cf1c07cf68e25440..c2dcfaa51987bc9706f13381d37762f1cabd69b3 100644 (file)
@@ -478,13 +478,6 @@ BEGIN_FTR_SECTION_NESTED(945)                                              \
        std     ra,TASKTHREADPPR(rb);                                   \
 END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,945)
 
-#define RESTORE_PPR(ra, rb)                                            \
-BEGIN_FTR_SECTION_NESTED(946)                                          \
-       ld      ra,PACACURRENT(r13);                                    \
-       ld      rb,TASKTHREADPPR(ra);                                   \
-       mtspr   SPRN_PPR,rb;    /* Restore PPR */                       \
-END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946)
-
 #endif
 
 /*
index c04cdf70d487536614899da24fe88beac50a0f54..7be37170fda7714daddb53b2ddfb9a1b6ea3d674 100644 (file)
@@ -820,6 +820,12 @@ fast_exception_return:
        andi.   r0,r3,MSR_RI
        beq-    unrecov_restore
 
+       /* Load PPR from thread struct before we clear MSR:RI */
+BEGIN_FTR_SECTION
+       ld      r2,PACACURRENT(r13)
+       ld      r2,TASKTHREADPPR(r2)
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
        /*
         * Clear RI before restoring r13.  If we are returning to
         * userspace and we take an exception after restoring r13,
@@ -840,8 +846,10 @@ fast_exception_return:
         */
        andi.   r0,r3,MSR_PR
        beq     1f
+BEGIN_FTR_SECTION
+       mtspr   SPRN_PPR,r2     /* Restore PPR */
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
        ACCOUNT_CPU_USER_EXIT(r2, r4)
-       RESTORE_PPR(r2, r4)
        REST_GPR(13, r1)
 1:
        mtspr   SPRN_SRR1,r3