Merge branch 'for-next/entry-s-to-c' into for-next/core
[platform/kernel/linux-rpi.git] / arch / arm64 / kernel / entry.S
index 84a8227..d440a2a 100644 (file)
@@ -578,76 +578,9 @@ ENDPROC(el1_error_invalid)
        .align  6
 el1_sync:
        kernel_entry 1
-       mrs     x1, esr_el1                     // read the syndrome register
-       lsr     x24, x1, #ESR_ELx_EC_SHIFT      // exception class
-       cmp     x24, #ESR_ELx_EC_DABT_CUR       // data abort in EL1
-       b.eq    el1_da
-       cmp     x24, #ESR_ELx_EC_IABT_CUR       // instruction abort in EL1
-       b.eq    el1_ia
-       cmp     x24, #ESR_ELx_EC_SYS64          // configurable trap
-       b.eq    el1_undef
-       cmp     x24, #ESR_ELx_EC_PC_ALIGN       // pc alignment exception
-       b.eq    el1_pc
-       cmp     x24, #ESR_ELx_EC_UNKNOWN        // unknown exception in EL1
-       b.eq    el1_undef
-       cmp     x24, #ESR_ELx_EC_BREAKPT_CUR    // debug exception in EL1
-       b.ge    el1_dbg
-       b       el1_inv
-
-el1_ia:
-       /*
-        * Fall through to the Data abort case
-        */
-el1_da:
-       /*
-        * Data abort handling
-        */
-       mrs     x3, far_el1
-       inherit_daif    pstate=x23, tmp=x2
-       clear_address_tag x0, x3
-       mov     x2, sp                          // struct pt_regs
-       bl      do_mem_abort
-
-       kernel_exit 1
-el1_pc:
-       /*
-        * PC alignment exception handling. We don't handle SP alignment faults,
-        * since we will have hit a recursive exception when trying to push the
-        * initial pt_regs.
-        */
-       mrs     x0, far_el1
-       inherit_daif    pstate=x23, tmp=x2
-       mov     x2, sp
-       bl      do_sp_pc_abort
-       ASM_BUG()
-el1_undef:
-       /*
-        * Undefined instruction
-        */
-       inherit_daif    pstate=x23, tmp=x2
        mov     x0, sp
-       bl      do_undefinstr
+       bl      el1_sync_handler
        kernel_exit 1
-el1_dbg:
-       /*
-        * Debug exception handling
-        */
-       cmp     x24, #ESR_ELx_EC_BRK64          // if BRK64
-       cinc    x24, x24, eq                    // set bit '0'
-       tbz     x24, #0, el1_inv                // EL1 only
-       gic_prio_kentry_setup tmp=x3
-       mrs     x0, far_el1
-       mov     x2, sp                          // struct pt_regs
-       bl      do_debug_exception
-       kernel_exit 1
-el1_inv:
-       // TODO: add support for undefined instructions in kernel mode
-       inherit_daif    pstate=x23, tmp=x2
-       mov     x0, sp
-       mov     x2, x1
-       mov     x1, #BAD_SYNC
-       bl      bad_mode
-       ASM_BUG()
 ENDPROC(el1_sync)
 
        .align  6
@@ -680,7 +613,7 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
        orr     x24, x24, x0
 alternative_else_nop_endif
        cbnz    x24, 1f                         // preempt count != 0 || NMI return path
-       bl      preempt_schedule_irq            // irq en/disable is done inside
+       bl      arm64_preempt_schedule_irq      // irq en/disable is done inside
 1:
 #endif
 
@@ -714,70 +647,18 @@ ENDPROC(el1_irq)
        .align  6
 el0_sync:
        kernel_entry 0
-       mrs     x25, esr_el1                    // read the syndrome register
-       lsr     x24, x25, #ESR_ELx_EC_SHIFT     // exception class
-       cmp     x24, #ESR_ELx_EC_SVC64          // SVC in 64-bit state
-       b.eq    el0_svc
-       cmp     x24, #ESR_ELx_EC_DABT_LOW       // data abort in EL0
-       b.eq    el0_da
-       cmp     x24, #ESR_ELx_EC_IABT_LOW       // instruction abort in EL0
-       b.eq    el0_ia
-       cmp     x24, #ESR_ELx_EC_FP_ASIMD       // FP/ASIMD access
-       b.eq    el0_fpsimd_acc
-       cmp     x24, #ESR_ELx_EC_SVE            // SVE access
-       b.eq    el0_sve_acc
-       cmp     x24, #ESR_ELx_EC_FP_EXC64       // FP/ASIMD exception
-       b.eq    el0_fpsimd_exc
-       cmp     x24, #ESR_ELx_EC_SYS64          // configurable trap
-       ccmp    x24, #ESR_ELx_EC_WFx, #4, ne
-       b.eq    el0_sys
-       cmp     x24, #ESR_ELx_EC_SP_ALIGN       // stack alignment exception
-       b.eq    el0_sp
-       cmp     x24, #ESR_ELx_EC_PC_ALIGN       // pc alignment exception
-       b.eq    el0_pc
-       cmp     x24, #ESR_ELx_EC_UNKNOWN        // unknown exception in EL0
-       b.eq    el0_undef
-       cmp     x24, #ESR_ELx_EC_BREAKPT_LOW    // debug exception in EL0
-       b.ge    el0_dbg
-       b       el0_inv
+       mov     x0, sp
+       bl      el0_sync_handler
+       b       ret_to_user
 
 #ifdef CONFIG_COMPAT
        .align  6
 el0_sync_compat:
        kernel_entry 0, 32
-       mrs     x25, esr_el1                    // read the syndrome register
-       lsr     x24, x25, #ESR_ELx_EC_SHIFT     // exception class
-       cmp     x24, #ESR_ELx_EC_SVC32          // SVC in 32-bit state
-       b.eq    el0_svc_compat
-       cmp     x24, #ESR_ELx_EC_DABT_LOW       // data abort in EL0
-       b.eq    el0_da
-       cmp     x24, #ESR_ELx_EC_IABT_LOW       // instruction abort in EL0
-       b.eq    el0_ia
-       cmp     x24, #ESR_ELx_EC_FP_ASIMD       // FP/ASIMD access
-       b.eq    el0_fpsimd_acc
-       cmp     x24, #ESR_ELx_EC_FP_EXC32       // FP/ASIMD exception
-       b.eq    el0_fpsimd_exc
-       cmp     x24, #ESR_ELx_EC_PC_ALIGN       // pc alignment exception
-       b.eq    el0_pc
-       cmp     x24, #ESR_ELx_EC_UNKNOWN        // unknown exception in EL0
-       b.eq    el0_undef
-       cmp     x24, #ESR_ELx_EC_CP15_32        // CP15 MRC/MCR trap
-       b.eq    el0_cp15
-       cmp     x24, #ESR_ELx_EC_CP15_64        // CP15 MRRC/MCRR trap
-       b.eq    el0_cp15
-       cmp     x24, #ESR_ELx_EC_CP14_MR        // CP14 MRC/MCR trap
-       b.eq    el0_undef
-       cmp     x24, #ESR_ELx_EC_CP14_LS        // CP14 LDC/STC trap
-       b.eq    el0_undef
-       cmp     x24, #ESR_ELx_EC_CP14_64        // CP14 MRRC/MCRR trap
-       b.eq    el0_undef
-       cmp     x24, #ESR_ELx_EC_BREAKPT_LOW    // debug exception in EL0
-       b.ge    el0_dbg
-       b       el0_inv
-el0_svc_compat:
        mov     x0, sp
-       bl      el0_svc_compat_handler
+       bl      el0_sync_compat_handler
        b       ret_to_user
+ENDPROC(el0_sync)
 
        .align  6
 el0_irq_compat:
@@ -787,140 +668,8 @@ el0_irq_compat:
 el0_error_compat:
        kernel_entry 0, 32
        b       el0_error_naked
-
-el0_cp15:
-       /*
-        * Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions
-        */
-       ct_user_exit_irqoff
-       enable_daif
-       mov     x0, x25
-       mov     x1, sp
-       bl      do_cp15instr
-       b       ret_to_user
 #endif
 
-el0_da:
-       /*
-        * Data abort handling
-        */
-       mrs     x26, far_el1
-       ct_user_exit_irqoff
-       enable_daif
-       clear_address_tag x0, x26
-       mov     x1, x25
-       mov     x2, sp
-       bl      do_mem_abort
-       b       ret_to_user
-el0_ia:
-       /*
-        * Instruction abort handling
-        */
-       mrs     x26, far_el1
-       gic_prio_kentry_setup tmp=x0
-       ct_user_exit_irqoff
-       enable_da_f
-#ifdef CONFIG_TRACE_IRQFLAGS
-       bl      trace_hardirqs_off
-#endif
-       mov     x0, x26
-       mov     x1, x25
-       mov     x2, sp
-       bl      do_el0_ia_bp_hardening
-       b       ret_to_user
-el0_fpsimd_acc:
-       /*
-        * Floating Point or Advanced SIMD access
-        */
-       ct_user_exit_irqoff
-       enable_daif
-       mov     x0, x25
-       mov     x1, sp
-       bl      do_fpsimd_acc
-       b       ret_to_user
-el0_sve_acc:
-       /*
-        * Scalable Vector Extension access
-        */
-       ct_user_exit_irqoff
-       enable_daif
-       mov     x0, x25
-       mov     x1, sp
-       bl      do_sve_acc
-       b       ret_to_user
-el0_fpsimd_exc:
-       /*
-        * Floating Point, Advanced SIMD or SVE exception
-        */
-       ct_user_exit_irqoff
-       enable_daif
-       mov     x0, x25
-       mov     x1, sp
-       bl      do_fpsimd_exc
-       b       ret_to_user
-el0_sp:
-       ldr     x26, [sp, #S_SP]
-       b       el0_sp_pc
-el0_pc:
-       mrs     x26, far_el1
-el0_sp_pc:
-       /*
-        * Stack or PC alignment exception handling
-        */
-       gic_prio_kentry_setup tmp=x0
-       ct_user_exit_irqoff
-       enable_da_f
-#ifdef CONFIG_TRACE_IRQFLAGS
-       bl      trace_hardirqs_off
-#endif
-       mov     x0, x26
-       mov     x1, x25
-       mov     x2, sp
-       bl      do_sp_pc_abort
-       b       ret_to_user
-el0_undef:
-       /*
-        * Undefined instruction
-        */
-       ct_user_exit_irqoff
-       enable_daif
-       mov     x0, sp
-       bl      do_undefinstr
-       b       ret_to_user
-el0_sys:
-       /*
-        * System instructions, for trapped cache maintenance instructions
-        */
-       ct_user_exit_irqoff
-       enable_daif
-       mov     x0, x25
-       mov     x1, sp
-       bl      do_sysinstr
-       b       ret_to_user
-el0_dbg:
-       /*
-        * Debug exception handling
-        */
-       tbnz    x24, #0, el0_inv                // EL0 only
-       mrs     x24, far_el1
-       gic_prio_kentry_setup tmp=x3
-       ct_user_exit_irqoff
-       mov     x0, x24
-       mov     x1, x25
-       mov     x2, sp
-       bl      do_debug_exception
-       enable_da_f
-       b       ret_to_user
-el0_inv:
-       ct_user_exit_irqoff
-       enable_daif
-       mov     x0, sp
-       mov     x1, #BAD_SYNC
-       mov     x2, x25
-       bl      bad_el0_sync
-       b       ret_to_user
-ENDPROC(el0_sync)
-
        .align  6
 el0_irq:
        kernel_entry 0
@@ -998,17 +747,6 @@ finish_ret_to_user:
        kernel_exit 0
 ENDPROC(ret_to_user)
 
-/*
- * SVC handler.
- */
-       .align  6
-el0_svc:
-       gic_prio_kentry_setup tmp=x1
-       mov     x0, sp
-       bl      el0_svc_handler
-       b       ret_to_user
-ENDPROC(el0_svc)
-
        .popsection                             // .entry.text
 
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
@@ -1070,7 +808,9 @@ alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
 #else
        ldr     x30, =vectors
 #endif
+alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
        prfm    plil1strm, [x30, #(1b - tramp_vectors)]
+alternative_else_nop_endif
        msr     vbar_el1, x30
        add     x30, x30, #(1b - tramp_vectors)
        isb