ARM: 9169/1: entry: fix Thumb2 bug in iWMMXt exception handling
authorArd Biesheuvel <ardb@kernel.org>
Wed, 15 Dec 2021 08:31:36 +0000 (09:31 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 29 Dec 2021 11:28:57 +0000 (12:28 +0100)
commit 8536a5ef886005bc443c2da9b842d69fd3d7647f upstream.

The Thumb2 version of the FP exception handling entry code treats the
register holding the CP number (R8) differently, resulting in the iWMMXT
CP number check to be incorrect.

Fix this by unifying the ARM and Thumb2 code paths, and switch the
order of the additions of the TI_USED_CP offset and the shifted CP
index.

Cc: <stable@vger.kernel.org>
Fixes: b86040a59feb ("Thumb-2: Implementation of the unified start-up and exceptions code")
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm/kernel/entry-armv.S

index 241b73d..d8e28ab 100644 (file)
@@ -597,11 +597,9 @@ call_fpe:
        tstne   r0, #0x04000000                 @ bit 26 set on both ARM and Thumb-2
        reteq   lr
        and     r8, r0, #0x00000f00             @ mask out CP number
- THUMB(        lsr     r8, r8, #8              )
        mov     r7, #1
-       add     r6, r10, #TI_USED_CP
- ARM(  strb    r7, [r6, r8, lsr #8]    )       @ set appropriate used_cp[]
- THUMB(        strb    r7, [r6, r8]            )       @ set appropriate used_cp[]
+       add     r6, r10, r8, lsr #8             @ add used_cp[] array offset first
+       strb    r7, [r6, #TI_USED_CP]           @ set appropriate used_cp[]
 #ifdef CONFIG_IWMMXT
        @ Test if we need to give access to iWMMXt coprocessors
        ldr     r5, [r10, #TI_FLAGS]
@@ -610,7 +608,7 @@ call_fpe:
        bcs     iwmmxt_task_enable
 #endif
  ARM(  add     pc, pc, r8, lsr #6      )
- THUMB(        lsl     r8, r8, #2              )
+ THUMB(        lsr     r8, r8, #6              )
  THUMB(        add     pc, r8                  )
        nop