1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * linux/arch/arm/kernel/entry-armv.S
5 * Copyright (C) 1996,1997,1998 Russell King.
6 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
7 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
9 * Low-level vector interface routines
11 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
12 * that causes it to save wrong values... Be aware!
15 #include <linux/init.h>
17 #include <asm/assembler.h>
18 #include <asm/memory.h>
19 #include <asm/glue-df.h>
20 #include <asm/glue-pf.h>
21 #include <asm/vfpmacros.h>
22 #include <asm/thread_notify.h>
23 #include <asm/unwind.h>
24 #include <asm/unistd.h>
26 #include <asm/system_info.h>
27 #include <asm/uaccess-asm.h>
29 #include "entry-header.S"
30 #include <asm/probes.h>
35 .macro irq_handler, from_user:req
37 ldr_this_cpu r2, irq_stack_ptr, r2, r3
40 @ If we took the interrupt while running in the kernel, we may already
41 @ be using the IRQ stack, so revert to the original value in that case.
43 subs r3, r2, r1 @ SP above bottom of IRQ stack?
44 rsbscs r3, r3, #THREAD_SIZE @ ... and below the top?
45 #ifdef CONFIG_VMAP_STACK
46 ldr_va r3, high_memory, cc @ End of the linear region
47 cmpcc r3, r1 @ Stack pointer was below it?
49 bcc 0f @ If not, switch to the IRQ stack
51 bl generic_handle_arch_irq
56 mov_l r0, generic_handle_arch_irq
62 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
64 ldr_va ip, processor, offset=PROCESSOR_PABT_FUNC
74 @ Call the processor-specific abort handler:
77 @ r4 - aborted context pc
78 @ r5 - aborted context psr
80 @ The abort handler must return the aborted address in r0, and
81 @ the fault status register in r1. r9 must be preserved.
84 ldr_va ip, processor, offset=PROCESSOR_DABT_FUNC
91 .section .entry.text,"ax",%progbits
94 * Invalid mode handlers
96 .macro inv_entry, reason
97 sub sp, sp, #PT_REGS_SIZE
98 ARM( stmib sp, {r1 - lr} )
99 THUMB( stmia sp, {r0 - r12} )
100 THUMB( str sp, [sp, #S_SP] )
101 THUMB( str lr, [sp, #S_LR] )
106 inv_entry BAD_PREFETCH
108 ENDPROC(__pabt_invalid)
113 ENDPROC(__dabt_invalid)
118 ENDPROC(__irq_invalid)
121 inv_entry BAD_UNDEFINSTR
124 @ XXX fall through to common_invalid
128 @ common_invalid - generic code for failed exception (re-entrant version of handlers)
134 add r0, sp, #S_PC @ here for interlock avoidance
135 mov r7, #-1 @ "" "" "" ""
136 str r4, [sp] @ save preserved r0
137 stmia r0, {r5 - r7} @ lr_<exception>,
138 @ cpsr_<exception>, "old_r0"
142 ENDPROC(__und_invalid)
148 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
149 #define SPFIX(code...) code
151 #define SPFIX(code...)
154 .macro svc_entry, stack_hole=0, trace=1, uaccess=1, overflow_check=1
156 sub sp, sp, #(SVC_REGS_SIZE + \stack_hole)
157 THUMB( add sp, r1 ) @ get SP in a GPR without
158 THUMB( sub r1, sp, r1 ) @ using a temp register
161 UNWIND(.save {r0 - pc} )
162 do_overflow_check (SVC_REGS_SIZE + \stack_hole)
165 #ifdef CONFIG_THUMB2_KERNEL
166 tst r1, #4 @ test stack pointer alignment
167 sub r1, sp, r1 @ restore original R1
168 sub sp, r1 @ restore original SP
172 SPFIX( subne sp, sp, #4 )
174 ARM( stmib sp, {r1 - r12} )
175 THUMB( stmia sp, {r0 - r12} ) @ No STMIB in Thumb-2
178 add r7, sp, #S_SP @ here for interlock avoidance
179 mov r6, #-1 @ "" "" "" ""
180 add r2, sp, #(SVC_REGS_SIZE + \stack_hole)
181 SPFIX( addne r2, r2, #4 )
182 str r3, [sp] @ save the "real" r0 copied
183 @ from the exception stack
188 @ We are now ready to fill in the remaining blanks on the stack:
192 @ r4 - lr_<exception>, already fixed up for correct return/restart
193 @ r5 - spsr_<exception>
194 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
199 uaccess_entry tsk, r0, r1, r2, \uaccess
202 #ifdef CONFIG_TRACE_IRQFLAGS
203 bl trace_hardirqs_off
213 THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
214 svc_exit r5 @ return from exception
221 irq_handler from_user=0
223 #ifdef CONFIG_PREEMPTION
224 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
225 ldr r0, [tsk, #TI_FLAGS] @ get flags
226 teq r8, #0 @ if preempt count != 0
227 movne r0, #0 @ force flags to 0
228 tst r0, #_TIF_NEED_RESCHED
232 svc_exit r5, irq = 1 @ return from exception
238 #ifdef CONFIG_PREEMPTION
241 1: bl preempt_schedule_irq @ irq en/disable is done inside
242 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
243 tst r0, #_TIF_NEED_RESCHED
249 @ Correct the PC such that it is pointing at the instruction
250 @ which caused the fault. If the faulting instruction was ARM
251 @ the PC will be pointing at the next instruction, and have to
252 @ subtract 4. Otherwise, it is Thumb, and the PC will be
253 @ pointing at the second half of the Thumb instruction. We
254 @ have to subtract 2.
263 #ifdef CONFIG_KPROBES
264 @ If a kprobe is about to simulate a "stmdb sp..." instruction,
265 @ it obviously needs free stack space which then will belong to
267 svc_entry MAX_STACK_SIZE
272 mov r1, #4 @ PC correction to apply
273 THUMB( tst r5, #PSR_T_BIT ) @ exception taken in Thumb mode?
274 THUMB( movne r1, #2 ) @ if so, fix up PC correction
275 mov r0, sp @ struct pt_regs *regs
280 ldr r5, [sp, #S_PSR] @ Get SVC cpsr
281 svc_exit r5 @ return from exception
290 svc_exit r5 @ return from exception
297 mov r0, sp @ struct pt_regs *regs
304 * Abort mode handlers
308 @ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode
309 @ and reuses the same macros. However in abort mode we must also
310 @ save/restore lr_abt and spsr_abt to make nested aborts safe.
316 ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
317 THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
318 THUMB( msr cpsr_c, r0 )
319 mov r1, lr @ Save lr_abt
320 mrs r2, spsr @ Save spsr_abt, abort is now safe
321 ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
322 THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
323 THUMB( msr cpsr_c, r0 )
326 add r0, sp, #8 @ struct pt_regs *regs
330 ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
331 THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
332 THUMB( msr cpsr_c, r0 )
333 mov lr, r1 @ Restore lr_abt, abort is unsafe
334 msr spsr_cxsf, r2 @ Restore spsr_abt
335 ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
336 THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
337 THUMB( msr cpsr_c, r0 )
346 * EABI note: sp_svc is always 64-bit aligned here, so should PT_REGS_SIZE
349 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (PT_REGS_SIZE & 7)
350 #error "sizeof(struct pt_regs) must be a multiple of 8"
353 .macro usr_entry, trace=1, uaccess=1
355 UNWIND(.cantunwind ) @ don't unwind the user space
356 sub sp, sp, #PT_REGS_SIZE
357 ARM( stmib sp, {r1 - r12} )
358 THUMB( stmia sp, {r0 - r12} )
360 ATRAP( mrc p15, 0, r7, c1, c0, 0)
361 ATRAP( ldr_va r8, cr_alignment)
364 add r0, sp, #S_PC @ here for interlock avoidance
365 mov r6, #-1 @ "" "" "" ""
367 str r3, [sp] @ save the "real" r0 copied
368 @ from the exception stack
371 @ We are now ready to fill in the remaining blanks on the stack:
373 @ r4 - lr_<exception>, already fixed up for correct return/restart
374 @ r5 - spsr_<exception>
375 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
377 @ Also, separately save sp_usr and lr_usr
380 ARM( stmdb r0, {sp, lr}^ )
381 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
387 @ Enable the alignment trap while in kernel mode
389 ATRAP( mcrne p15, 0, r8, c1, c0, 0)
391 reload_current r7, r8
394 @ Clear FP to mark the first stack frame
399 #ifdef CONFIG_TRACE_IRQFLAGS
400 bl trace_hardirqs_off
402 ct_user_exit save = 0
406 .macro kuser_cmpxchg_check
407 #if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS)
409 #warning "NPTL on non MMU needs fixing"
411 @ Make sure our user space atomic helper is restarted
412 @ if it was interrupted in a critical region. Here we
413 @ perform a quick test inline since it should be false
414 @ 99.9999% of the time. The rest is done out of line.
417 blhs kuser_cmpxchg64_fixup
436 irq_handler from_user=1
439 b ret_to_user_from_irq
452 @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
453 @ faulting instruction depending on Thumb mode.
454 @ r3 = regs->ARM_cpsr
456 @ The emulation code returns using r9 if it has emulated the
457 @ instruction, or the more conventional lr if we are to treat
458 @ this as a real undefined instruction
460 badr r9, ret_from_exception
462 @ IRQs must be enabled before attempting to read the instruction from
463 @ user space since that could cause a page/translation fault if the
464 @ page table was modified by another CPU.
467 tst r3, #PSR_T_BIT @ Thumb mode?
469 sub r4, r2, #4 @ ARM instr at LR - 4
471 ARM_BE8(rev r0, r0) @ little endian instruction
475 @ r0 = 32-bit ARM instruction which caused the exception
476 @ r2 = PC value for the following instruction (:= regs->ARM_pc)
477 @ r4 = PC value for the faulting instruction
478 @ lr = 32-bit undefined instruction function
479 badr lr, __und_usr_fault_32
484 sub r4, r2, #2 @ First half of thumb instr at LR - 2
485 #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
487 * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
488 * can never be supported in a single kernel, this code is not applicable at
489 * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be
490 * made about .arch directives.
492 #if __LINUX_ARM_ARCH__ < 7
493 /* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
494 ldr_va r5, cpu_architecture
495 cmp r5, #CPU_ARCH_ARMv7
496 blo __und_usr_fault_16 @ 16bit undefined instruction
498 * The following code won't get run unless the running CPU really is v7, so
499 * coding round the lack of ldrht on older arches is pointless. Temporarily
500 * override the assembler target arch with the minimum required instead:
505 ARM_BE8(rev16 r5, r5) @ little endian instruction
506 cmp r5, #0xe800 @ 32bit instruction if xx != 0
507 blo __und_usr_fault_16_pan @ 16bit undefined instruction
509 ARM_BE8(rev16 r0, r0) @ little endian instruction
511 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
512 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
513 orr r0, r0, r5, lsl #16
514 badr lr, __und_usr_fault_32
515 @ r0 = the two 16-bit Thumb instructions which caused the exception
516 @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
517 @ r4 = PC value for the first 16-bit Thumb instruction
518 @ lr = 32bit undefined instruction function
520 #if __LINUX_ARM_ARCH__ < 7
521 /* If the target arch was overridden, change it back: */
522 #ifdef CONFIG_CPU_32v6K
527 #endif /* __LINUX_ARM_ARCH__ < 7 */
528 #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
535 * The out of line fixup for the ldrt instructions above.
537 .pushsection .text.fixup, "ax"
539 4: str r4, [sp, #S_PC] @ retry current instruction
542 .pushsection __ex_table,"a"
544 #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
551 * Check whether the instruction is a co-processor instruction.
552 * If yes, we need to call the relevant co-processor handler.
554 * Note that we don't do a full check here for the co-processor
555 * instructions; all instructions with bit 27 set are well
556 * defined. The only instructions that should fault are the
557 * co-processor instructions. However, we have to watch out
558 * for the ARM6/ARM7 SWI bug.
560 * Emulators may wish to make use of the following registers:
561 * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
562 * r2 = PC value to resume execution after successful emulation
563 * r9 = normal "successful" return address
564 * r10 = this threads thread_info structure
565 * lr = unrecognised instruction return address
566 * IRQs enabled, FIQs enabled.
569 @ Fall-through from Thumb-2 __und_usr
572 get_thread_info r10 @ get current thread
573 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
574 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
576 and r8, r0, #0x00000f00 @ mask out CP number
578 @ Test if we need to give access to iWMMXt coprocessors
579 ldr r5, [r10, #TI_FLAGS]
580 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
581 movscs r7, r5, lsr #(TIF_USING_IWMMXT + 1)
582 bcs iwmmxt_task_enable
584 ARM( add pc, pc, r8, lsr #6 )
585 THUMB( lsr r8, r8, #6 )
590 W(b) do_fpe @ CP#1 (FPE)
591 W(b) do_fpe @ CP#2 (FPE)
599 ret.w lr @ CP#10 (VFP)
600 ret.w lr @ CP#11 (VFP)
603 ret.w lr @ CP#14 (Debug)
604 ret.w lr @ CP#15 (Control)
607 add r10, r10, #TI_FPSTATE @ r10 = workspace
608 ldr_va pc, fp_enter, tmp=r4 @ Call FP module USR entry point
611 * The FP module is called with these registers set:
614 * r9 = normal "successful" return address
616 * lr = unrecognised FP instruction return address
632 __und_usr_fault_16_pan:
637 badr lr, ret_from_exception
639 ENDPROC(__und_usr_fault_32)
640 ENDPROC(__und_usr_fault_16)
650 * This is the return code to user mode for abort handlers
652 ENTRY(ret_from_exception)
660 ENDPROC(ret_from_exception)
666 mov r0, sp @ struct pt_regs *regs
669 restore_user_regs fast = 0, offset = 0
674 * Register switch for ARMv3 and ARMv4 processors
675 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
676 * previous and next are guaranteed not to be the same.
681 add ip, r1, #TI_CPU_SAVE
682 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
683 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
684 THUMB( str sp, [ip], #4 )
685 THUMB( str lr, [ip], #4 )
686 ldr r4, [r2, #TI_TP_VALUE]
687 ldr r5, [r2, #TI_TP_VALUE + 4]
688 #ifdef CONFIG_CPU_USE_DOMAINS
689 mrc p15, 0, r6, c3, c0, 0 @ Get domain register
690 str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register
691 ldr r6, [r2, #TI_CPU_DOMAIN]
693 switch_tls r1, r4, r5, r3, r7
694 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) && \
695 !defined(CONFIG_STACKPROTECTOR_PER_TASK)
696 ldr r8, =__stack_chk_guard
697 .if (TSK_STACK_CANARY > IMM12_MASK)
698 add r9, r2, #TSK_STACK_CANARY & ~IMM12_MASK
699 ldr r9, [r9, #TSK_STACK_CANARY & IMM12_MASK]
701 ldr r9, [r2, #TSK_STACK_CANARY & IMM12_MASK]
704 mov r7, r2 @ Preserve 'next'
705 #ifdef CONFIG_CPU_USE_DOMAINS
706 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
709 add r4, r2, #TI_CPU_SAVE
710 ldr r0, =thread_notify_head
711 mov r1, #THREAD_NOTIFY_SWITCH
712 bl atomic_notifier_call_chain
713 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) && \
714 !defined(CONFIG_STACKPROTECTOR_PER_TASK)
718 #if !defined(CONFIG_THUMB2_KERNEL) && !defined(CONFIG_VMAP_STACK)
720 ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
723 ldmia r4, {r4 - sl, fp, ip, lr} @ Load all regs saved previously
724 #ifdef CONFIG_VMAP_STACK
726 @ Do a dummy read from the new stack while running from the old one so
727 @ that we can rely on do_translation_fault() to fix up any stale PMD
728 @ entries covering the vmalloc region.
733 @ When CONFIG_THREAD_INFO_IN_TASK=n, the update of SP itself is what
734 @ effectuates the task switch, as that is what causes the observable
735 @ values of current and current_thread_info to change. When
736 @ CONFIG_THREAD_INFO_IN_TASK=y, setting current (and therefore
737 @ current_thread_info) is done explicitly, and the update of SP just
738 @ switches us to another stack, with few other side effects. In order
739 @ to prevent this distinction from causing any inconsistencies, let's
740 @ keep the 'set_current' call as close as we can to the update of SP.
748 #ifdef CONFIG_VMAP_STACK
753 @ We've just detected an overflow. We need to load the address of this
754 @ CPU's overflow stack into the stack pointer register. We have only one
755 @ scratch register so let's use a sequence of ADDs including one
756 @ involving the PC, and decorate them with PC-relative group
757 @ relocations. As these are ARM only, switch to ARM mode first.
759 @ We enter here with IP clobbered and its value stashed on the mode
765 ldr_this_cpu_armv6 ip, overflow_stack_ptr
767 str sp, [ip, #-4]! @ Preserve original SP value
768 mov sp, ip @ Switch to overflow stack
769 pop {ip} @ Original SP in IP
771 #if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
772 mov ip, ip @ mov expected by unwinder
773 push {fp, ip, lr, pc} @ GCC flavor frame record
775 str ip, [sp, #-8]! @ store original SP
776 push {fpreg, lr} @ Clang flavor frame record
778 UNWIND( ldr ip, [r0, #4] ) @ load exception LR
779 UNWIND( str ip, [sp, #12] ) @ store in the frame record
780 ldr ip, [r0, #12] @ reload IP
782 @ Store the original GPRs to the new stack.
783 svc_entry uaccess=0, overflow_check=0
785 UNWIND( .save {sp, pc} )
786 UNWIND( .save {fpreg, lr} )
787 UNWIND( .setfp fpreg, sp )
789 ldr fpreg, [sp, #S_SP] @ Add our frame record
791 #if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
792 ldr r1, [fp, #4] @ reload SP at entry
797 str r1, [sp, #S_SP] @ store in pt_regs
799 @ Stash the regs for handle_bad_stack
814 * Each segment is 32-byte aligned and will be moved to the top of the high
815 * vector page. New segments (if ever needed) must be added in front of
816 * existing ones. This mechanism should be used only for things that are
817 * really small and justified, and not be abused freely.
819 * See Documentation/arm/kernel_user_helpers.rst for formal definitions.
824 #ifdef CONFIG_ARM_THUMB
831 .macro kuser_pad, sym, size
833 .rept 4 - (. - \sym) & 3
837 .rept (\size - (. - \sym)) / 4
842 #ifdef CONFIG_KUSER_HELPERS
844 .globl __kuser_helper_start
845 __kuser_helper_start:
848 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
849 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
852 __kuser_cmpxchg64: @ 0xffff0f60
854 #if defined(CONFIG_CPU_32v6K)
856 stmfd sp!, {r4, r5, r6, r7}
857 ldrd r4, r5, [r0] @ load old val
858 ldrd r6, r7, [r1] @ load new val
860 1: ldrexd r0, r1, [r2] @ load current val
861 eors r3, r0, r4 @ compare with oldval (1)
862 eorseq r3, r1, r5 @ compare with oldval (2)
863 strexdeq r3, r6, r7, [r2] @ store newval if eq
864 teqeq r3, #1 @ success?
865 beq 1b @ if no then retry
867 rsbs r0, r3, #0 @ set returned val and C flag
868 ldmfd sp!, {r4, r5, r6, r7}
871 #elif !defined(CONFIG_SMP)
876 * The only thing that can break atomicity in this cmpxchg64
877 * implementation is either an IRQ or a data abort exception
878 * causing another process/thread to be scheduled in the middle of
879 * the critical sequence. The same strategy as for cmpxchg is used.
881 stmfd sp!, {r4, r5, r6, lr}
882 ldmia r0, {r4, r5} @ load old val
883 ldmia r1, {r6, lr} @ load new val
884 1: ldmia r2, {r0, r1} @ load current val
885 eors r3, r0, r4 @ compare with oldval (1)
886 eorseq r3, r1, r5 @ compare with oldval (2)
887 2: stmiaeq r2, {r6, lr} @ store newval if eq
888 rsbs r0, r3, #0 @ set return val and C flag
889 ldmfd sp!, {r4, r5, r6, pc}
892 kuser_cmpxchg64_fixup:
893 @ Called from kuser_cmpxchg_fixup.
894 @ r4 = address of interrupted insn (must be preserved).
895 @ sp = saved regs. r7 and r8 are clobbered.
896 @ 1b = first critical insn, 2b = last critical insn.
897 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
899 sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
901 rsbscs r8, r8, #(2b - 1b)
902 strcs r7, [sp, #S_PC]
903 #if __LINUX_ARM_ARCH__ < 6
904 bcc kuser_cmpxchg32_fixup
910 #warning "NPTL on non MMU needs fixing"
917 #error "incoherent kernel configuration"
920 kuser_pad __kuser_cmpxchg64, 64
922 __kuser_memory_barrier: @ 0xffff0fa0
926 kuser_pad __kuser_memory_barrier, 32
928 __kuser_cmpxchg: @ 0xffff0fc0
930 #if __LINUX_ARM_ARCH__ < 6
935 * The only thing that can break atomicity in this cmpxchg
936 * implementation is either an IRQ or a data abort exception
937 * causing another process/thread to be scheduled in the middle
938 * of the critical sequence. To prevent this, code is added to
939 * the IRQ and data abort exception handlers to set the pc back
940 * to the beginning of the critical section if it is found to be
941 * within that critical section (see kuser_cmpxchg_fixup).
943 1: ldr r3, [r2] @ load current val
944 subs r3, r3, r0 @ compare with oldval
945 2: streq r1, [r2] @ store newval if eq
946 rsbs r0, r3, #0 @ set return val and C flag
950 kuser_cmpxchg32_fixup:
951 @ Called from kuser_cmpxchg_check macro.
952 @ r4 = address of interrupted insn (must be preserved).
953 @ sp = saved regs. r7 and r8 are clobbered.
954 @ 1b = first critical insn, 2b = last critical insn.
955 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
957 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
959 rsbscs r8, r8, #(2b - 1b)
960 strcs r7, [sp, #S_PC]
965 #warning "NPTL on non MMU needs fixing"
980 /* beware -- each __kuser slot must be 8 instructions max */
981 ALT_SMP(b __kuser_memory_barrier)
986 kuser_pad __kuser_cmpxchg, 32
988 __kuser_get_tls: @ 0xffff0fe0
989 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
991 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
992 kuser_pad __kuser_get_tls, 16
994 .word 0 @ 0xffff0ff0 software TLS value, then
995 .endr @ pad up to __kuser_helper_version
997 __kuser_helper_version: @ 0xffff0ffc
998 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
1000 .globl __kuser_helper_end
1010 * This code is copied to 0xffff1000 so we can use branches in the
1011 * vectors, rather than ldr's. Note that this code must not exceed
1014 * Common stub entry macro:
1015 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1017 * SP points to a minimal amount of processor-private memory, the address
1018 * of which is copied into r0 for the mode specific abort handler.
1020 .macro vector_stub, name, mode, correction=0
1022 #ifdef CONFIG_HARDEN_BRANCH_HISTORY
1023 vector_bhb_bpiall_\name:
1024 mcr p15, 0, r0, c7, c5, 6 @ BPIALL
1025 @ isb not needed due to "movs pc, lr" in the vector stub
1026 @ which gives a "context synchronisation".
1031 sub lr, lr, #\correction
1034 @ Save r0, lr_<exception> (parent PC)
1035 stmia sp, {r0, lr} @ save r0, lr
1037 @ Save spsr_<exception> (parent CPSR)
1040 str lr, [sp, #8] @ save spsr
1043 @ Prepare for SVC32 mode. IRQs remain disabled.
1046 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
1050 @ the branch table must immediately follow this code
1054 THUMB( ldr lr, [r0, lr, lsl #2] )
1056 ARM( ldr lr, [pc, lr, lsl #2] )
1057 movs pc, lr @ branch to handler in SVC mode
1058 ENDPROC(vector_\name)
1060 #ifdef CONFIG_HARDEN_BRANCH_HISTORY
1063 vector_bhb_loop8_\name:
1065 sub lr, lr, #\correction
1068 @ Save r0, lr_<exception> (parent PC)
1077 @ isb not needed due to "movs pc, lr" in the vector stub
1078 @ which gives a "context synchronisation".
1080 ENDPROC(vector_bhb_loop8_\name)
1085 @ handler addresses follow this label
1089 .section .stubs, "ax", %progbits
1090 @ These need to remain at the start of the section so that
1091 @ they are in range of the 'SWI' entries in the vector tables
1095 #ifdef CONFIG_HARDEN_BRANCH_HISTORY
1096 .L__vector_bhb_loop8_swi:
1097 .word vector_bhb_loop8_swi
1098 .L__vector_bhb_bpiall_swi:
1099 .word vector_bhb_bpiall_swi
1103 ARM( swi SYS_ERROR0 )
1109 * Interrupt dispatcher
1111 vector_stub irq, IRQ_MODE, 4
1113 .long __irq_usr @ 0 (USR_26 / USR_32)
1114 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
1115 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
1116 .long __irq_svc @ 3 (SVC_26 / SVC_32)
1117 .long __irq_invalid @ 4
1118 .long __irq_invalid @ 5
1119 .long __irq_invalid @ 6
1120 .long __irq_invalid @ 7
1121 .long __irq_invalid @ 8
1122 .long __irq_invalid @ 9
1123 .long __irq_invalid @ a
1124 .long __irq_invalid @ b
1125 .long __irq_invalid @ c
1126 .long __irq_invalid @ d
1127 .long __irq_invalid @ e
1128 .long __irq_invalid @ f
1131 * Data abort dispatcher
1132 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1134 vector_stub dabt, ABT_MODE, 8
1136 .long __dabt_usr @ 0 (USR_26 / USR_32)
1137 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
1138 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
1139 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
1140 .long __dabt_invalid @ 4
1141 .long __dabt_invalid @ 5
1142 .long __dabt_invalid @ 6
1143 .long __dabt_invalid @ 7
1144 .long __dabt_invalid @ 8
1145 .long __dabt_invalid @ 9
1146 .long __dabt_invalid @ a
1147 .long __dabt_invalid @ b
1148 .long __dabt_invalid @ c
1149 .long __dabt_invalid @ d
1150 .long __dabt_invalid @ e
1151 .long __dabt_invalid @ f
1154 * Prefetch abort dispatcher
1155 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1157 vector_stub pabt, ABT_MODE, 4
1159 .long __pabt_usr @ 0 (USR_26 / USR_32)
1160 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
1161 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
1162 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
1163 .long __pabt_invalid @ 4
1164 .long __pabt_invalid @ 5
1165 .long __pabt_invalid @ 6
1166 .long __pabt_invalid @ 7
1167 .long __pabt_invalid @ 8
1168 .long __pabt_invalid @ 9
1169 .long __pabt_invalid @ a
1170 .long __pabt_invalid @ b
1171 .long __pabt_invalid @ c
1172 .long __pabt_invalid @ d
1173 .long __pabt_invalid @ e
1174 .long __pabt_invalid @ f
1177 * Undef instr entry dispatcher
1178 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1180 vector_stub und, UND_MODE
1182 .long __und_usr @ 0 (USR_26 / USR_32)
1183 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
1184 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
1185 .long __und_svc @ 3 (SVC_26 / SVC_32)
1186 .long __und_invalid @ 4
1187 .long __und_invalid @ 5
1188 .long __und_invalid @ 6
1189 .long __und_invalid @ 7
1190 .long __und_invalid @ 8
1191 .long __und_invalid @ 9
1192 .long __und_invalid @ a
1193 .long __und_invalid @ b
1194 .long __und_invalid @ c
1195 .long __und_invalid @ d
1196 .long __und_invalid @ e
1197 .long __und_invalid @ f
1201 /*=============================================================================
1202 * Address exception handler
1203 *-----------------------------------------------------------------------------
1204 * These aren't too critical.
1205 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1211 /*=============================================================================
1213 *-----------------------------------------------------------------------------
1214 * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
1215 * systems. This must be the last vector stub, so lets place it in its own
1219 vector_stub fiq, FIQ_MODE, 4
1221 .long __fiq_usr @ 0 (USR_26 / USR_32)
1222 .long __fiq_svc @ 1 (FIQ_26 / FIQ_32)
1223 .long __fiq_svc @ 2 (IRQ_26 / IRQ_32)
1224 .long __fiq_svc @ 3 (SVC_26 / SVC_32)
1240 .section .vectors, "ax", %progbits
1243 ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_swi )
1244 THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_swi )
1248 W(b) vector_addrexcptn
1252 #ifdef CONFIG_HARDEN_BRANCH_HISTORY
1253 .section .vectors.bhb.loop8, "ax", %progbits
1255 W(b) vector_bhb_loop8_und
1256 ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_loop8_swi )
1257 THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_bhb_loop8_swi )
1259 W(b) vector_bhb_loop8_pabt
1260 W(b) vector_bhb_loop8_dabt
1261 W(b) vector_addrexcptn
1262 W(b) vector_bhb_loop8_irq
1263 W(b) vector_bhb_loop8_fiq
1265 .section .vectors.bhb.bpiall, "ax", %progbits
1267 W(b) vector_bhb_bpiall_und
1268 ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_bpiall_swi )
1269 THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_bhb_bpiall_swi )
1271 W(b) vector_bhb_bpiall_pabt
1272 W(b) vector_bhb_bpiall_dabt
1273 W(b) vector_addrexcptn
1274 W(b) vector_bhb_bpiall_irq
1275 W(b) vector_bhb_bpiall_fiq