1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/vfp/vfpmodule.c
5 * Copyright (C) 2004 ARM Limited.
6 * Written by Deep Blue Solutions Limited.
8 #include <linux/types.h>
10 #include <linux/cpu_pm.h>
11 #include <linux/hardirq.h>
12 #include <linux/kernel.h>
13 #include <linux/notifier.h>
14 #include <linux/signal.h>
15 #include <linux/sched/signal.h>
16 #include <linux/smp.h>
17 #include <linux/init.h>
18 #include <linux/uaccess.h>
19 #include <linux/user.h>
20 #include <linux/export.h>
21 #include <linux/perf_event.h>
24 #include <asm/cputype.h>
25 #include <asm/system_info.h>
26 #include <asm/thread_notify.h>
27 #include <asm/traps.h>
33 static bool have_vfp __ro_after_init;
37 * Used in startup: set to non-zero if VFP checks fail
38 * After startup, holds VFP architecture
40 static unsigned int VFP_arch;
42 #ifdef CONFIG_CPU_FEROCEON
43 extern unsigned int VFP_arch_feroceon __alias(VFP_arch);
47 * The pointer to the vfpstate structure of the thread which currently
48 * owns the context held in the VFP hardware, or NULL if the hardware
51 * For UP, this is sufficient to tell which thread owns the VFP context.
52 * However, for SMP, we also need to check the CPU number stored in the
53 * saved state too to catch migrations.
55 union vfp_state *vfp_current_hw_state[NR_CPUS];
58 * Is 'thread's most up to date state stored in this CPUs hardware?
59 * Must be called from non-preemptible context.
61 static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread)
64 if (thread->vfpstate.hard.cpu != cpu)
67 return vfp_current_hw_state[cpu] == &thread->vfpstate;
71 * Force a reload of the VFP context from the thread structure. We do
72 * this by ensuring that access to the VFP hardware is disabled, and
73 * clear vfp_current_hw_state. Must be called from non-preemptible context.
75 static void vfp_force_reload(unsigned int cpu, struct thread_info *thread)
77 if (vfp_state_in_hw(cpu, thread)) {
78 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
79 vfp_current_hw_state[cpu] = NULL;
82 thread->vfpstate.hard.cpu = NR_CPUS;
87 * Per-thread VFP initialization.
89 static void vfp_thread_flush(struct thread_info *thread)
91 union vfp_state *vfp = &thread->vfpstate;
95 * Disable VFP to ensure we initialize it first. We must ensure
96 * that the modification of vfp_current_hw_state[] and hardware
97 * disable are done for the same CPU and without preemption.
99 * Do this first to ensure that preemption won't overwrite our
100 * state saving should access to the VFP be enabled at this point.
103 if (vfp_current_hw_state[cpu] == vfp)
104 vfp_current_hw_state[cpu] = NULL;
105 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
108 memset(vfp, 0, sizeof(union vfp_state));
110 vfp->hard.fpexc = FPEXC_EN;
111 vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
113 vfp->hard.cpu = NR_CPUS;
117 static void vfp_thread_exit(struct thread_info *thread)
119 /* release case: Per-thread VFP cleanup. */
120 union vfp_state *vfp = &thread->vfpstate;
121 unsigned int cpu = get_cpu();
123 if (vfp_current_hw_state[cpu] == vfp)
124 vfp_current_hw_state[cpu] = NULL;
128 static void vfp_thread_copy(struct thread_info *thread)
130 struct thread_info *parent = current_thread_info();
132 vfp_sync_hwstate(parent);
133 thread->vfpstate = parent->vfpstate;
135 thread->vfpstate.hard.cpu = NR_CPUS;
140 * When this function is called with the following 'cmd's, the following
141 * is true while this function is being run:
142 * THREAD_NOFTIFY_SWTICH:
143 * - the previously running thread will not be scheduled onto another CPU.
144 * - the next thread to be run (v) will not be running on another CPU.
145 * - thread->cpu is the local CPU number
146 * - not preemptible as we're called in the middle of a thread switch
147 * THREAD_NOTIFY_FLUSH:
148 * - the thread (v) will be running on the local CPU, so
149 * v === current_thread_info()
150 * - thread->cpu is the local CPU number at the time it is accessed,
151 * but may change at any time.
152 * - we could be preempted if tree preempt rcu is enabled, so
153 * it is unsafe to use thread->cpu.
155 * - we could be preempted if tree preempt rcu is enabled, so
156 * it is unsafe to use thread->cpu.
158 static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
160 struct thread_info *thread = v;
167 case THREAD_NOTIFY_SWITCH:
174 * On SMP, if VFP is enabled, save the old state in
175 * case the thread migrates to a different CPU. The
176 * restoring is done lazily.
178 if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu])
179 vfp_save_state(vfp_current_hw_state[cpu], fpexc);
183 * Always disable VFP so we can lazily save/restore the
186 fmxr(FPEXC, fpexc & ~FPEXC_EN);
189 case THREAD_NOTIFY_FLUSH:
190 vfp_thread_flush(thread);
193 case THREAD_NOTIFY_EXIT:
194 vfp_thread_exit(thread);
197 case THREAD_NOTIFY_COPY:
198 vfp_thread_copy(thread);
205 static struct notifier_block vfp_notifier_block = {
206 .notifier_call = vfp_notifier,
210 * Raise a SIGFPE for the current process.
211 * sicode describes the signal being raised.
213 static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs)
216 * This is the same as NWFPE, because it's not clear what
219 current->thread.error_code = 0;
220 current->thread.trap_no = 6;
222 send_sig_fault(SIGFPE, sicode,
223 (void __user *)(instruction_pointer(regs) - 4),
227 static void vfp_panic(char *reason, u32 inst)
231 pr_err("VFP: Error: %s\n", reason);
232 pr_err("VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n",
233 fmrx(FPEXC), fmrx(FPSCR), inst);
234 for (i = 0; i < 32; i += 2)
235 pr_err("VFP: s%2u: 0x%08x s%2u: 0x%08x\n",
236 i, vfp_get_float(i), i+1, vfp_get_float(i+1));
240 * Process bitmask of exception conditions.
242 static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs)
246 pr_debug("VFP: raising exceptions %08x\n", exceptions);
248 if (exceptions == VFP_EXCEPTION_ERROR) {
249 vfp_panic("unhandled bounce", inst);
250 vfp_raise_sigfpe(FPE_FLTINV, regs);
255 * If any of the status flags are set, update the FPSCR.
256 * Comparison instructions always return at least one of
259 if (exceptions & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V))
260 fpscr &= ~(FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V);
266 #define RAISE(stat,en,sig) \
267 if (exceptions & stat && fpscr & en) \
271 * These are arranged in priority order, least to highest.
273 RAISE(FPSCR_DZC, FPSCR_DZE, FPE_FLTDIV);
274 RAISE(FPSCR_IXC, FPSCR_IXE, FPE_FLTRES);
275 RAISE(FPSCR_UFC, FPSCR_UFE, FPE_FLTUND);
276 RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF);
277 RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV);
280 vfp_raise_sigfpe(si_code, regs);
284 * Emulate a VFP instruction.
286 static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs)
288 u32 exceptions = VFP_EXCEPTION_ERROR;
290 pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst, fpscr);
292 if (INST_CPRTDO(inst)) {
293 if (!INST_CPRT(inst)) {
297 if (vfp_single(inst)) {
298 exceptions = vfp_single_cpdo(inst, fpscr);
300 exceptions = vfp_double_cpdo(inst, fpscr);
304 * A CPRT instruction can not appear in FPINST2, nor
305 * can it cause an exception. Therefore, we do not
306 * have to emulate it.
311 * A CPDT instruction can not appear in FPINST2, nor can
312 * it cause an exception. Therefore, we do not have to
316 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc);
317 return exceptions & ~VFP_NAN_FLAG;
321 * Package up a bounce condition.
323 static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
325 u32 fpscr, orig_fpscr, fpsid, exceptions;
327 pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
330 * At this point, FPEXC can have the following configuration:
333 * 0 1 x - synchronous exception
334 * 1 x 0 - asynchronous exception
335 * 1 x 1 - sychronous on VFP subarch 1 and asynchronous on later
336 * 0 0 1 - synchronous on VFP9 (non-standard subarch 1
337 * implementation), undefined otherwise
339 * Clear various bits and enable access to the VFP so we can
342 fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_DEX|FPEXC_FP2V|FPEXC_VV|FPEXC_TRAP_MASK));
345 orig_fpscr = fpscr = fmrx(FPSCR);
348 * Check for the special VFP subarch 1 and FPSCR.IXE bit case
350 if ((fpsid & FPSID_ARCH_MASK) == (1 << FPSID_ARCH_BIT)
351 && (fpscr & FPSCR_IXE)) {
353 * Synchronous exception, emulate the trigger instruction
358 if (fpexc & FPEXC_EX) {
360 * Asynchronous exception. The instruction is read from FPINST
361 * and the interrupted instruction has to be restarted.
363 trigger = fmrx(FPINST);
365 } else if (!(fpexc & FPEXC_DEX)) {
367 * Illegal combination of bits. It can be caused by an
368 * unallocated VFP instruction but with FPSCR.IXE set and not
371 vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
376 * Modify fpscr to indicate the number of iterations remaining.
377 * If FPEXC.EX is 0, FPEXC.DEX is 1 and the FPEXC.VV bit indicates
378 * whether FPEXC.VECITR or FPSCR.LEN is used.
380 if (fpexc & (FPEXC_EX | FPEXC_VV)) {
383 len = fpexc + (1 << FPEXC_LENGTH_BIT);
385 fpscr &= ~FPSCR_LENGTH_MASK;
386 fpscr |= (len & FPEXC_LENGTH_MASK) << (FPSCR_LENGTH_BIT - FPEXC_LENGTH_BIT);
390 * Handle the first FP instruction. We used to take note of the
391 * FPEXC bounce reason, but this appears to be unreliable.
392 * Emulate the bounced instruction instead.
394 exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
396 vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
399 * If there isn't a second FP instruction, exit now. Note that
400 * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
402 if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
406 * The barrier() here prevents fpinst2 being read
407 * before the condition above.
410 trigger = fmrx(FPINST2);
413 exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
415 vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
418 static void vfp_enable(void *unused)
422 BUG_ON(preemptible());
423 access = get_copro_access();
426 * Enable full access to VFP (cp10 and cp11)
428 set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11));
431 /* Called by platforms on which we want to disable VFP because it may not be
432 * present on all CPUs within a SMP complex. Needs to be called prior to
435 void __init vfp_disable(void)
438 pr_debug("%s: should be called prior to vfp_init\n", __func__);
445 static int vfp_pm_suspend(void)
447 struct thread_info *ti = current_thread_info();
448 u32 fpexc = fmrx(FPEXC);
450 /* if vfp is on, then save state for resumption */
451 if (fpexc & FPEXC_EN) {
452 pr_debug("%s: saving vfp state\n", __func__);
453 vfp_save_state(&ti->vfpstate, fpexc);
455 /* disable, just in case */
456 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
457 } else if (vfp_current_hw_state[ti->cpu]) {
459 fmxr(FPEXC, fpexc | FPEXC_EN);
460 vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc);
465 /* clear any information we had about last context state */
466 vfp_current_hw_state[ti->cpu] = NULL;
471 static void vfp_pm_resume(void)
473 /* ensure we have access to the vfp */
476 /* and disable it to ensure the next usage restores the state */
477 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
480 static int vfp_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd,
487 case CPU_PM_ENTER_FAILED:
495 static struct notifier_block vfp_cpu_pm_notifier_block = {
496 .notifier_call = vfp_cpu_pm_notifier,
499 static void vfp_pm_init(void)
501 cpu_pm_register_notifier(&vfp_cpu_pm_notifier_block);
505 static inline void vfp_pm_init(void) { }
506 #endif /* CONFIG_CPU_PM */
509 * Ensure that the VFP state stored in 'thread->vfpstate' is up to date
510 * with the hardware state.
512 void vfp_sync_hwstate(struct thread_info *thread)
514 unsigned int cpu = get_cpu();
518 if (vfp_state_in_hw(cpu, thread)) {
519 u32 fpexc = fmrx(FPEXC);
522 * Save the last VFP state on this CPU.
524 fmxr(FPEXC, fpexc | FPEXC_EN);
525 vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN);
533 /* Ensure that the thread reloads the hardware VFP state on the next use. */
534 void vfp_flush_hwstate(struct thread_info *thread)
536 unsigned int cpu = get_cpu();
538 vfp_force_reload(cpu, thread);
544 * Save the current VFP state into the provided structures and prepare
545 * for entry into a new function (signal handler).
547 int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp,
548 struct user_vfp_exc *ufp_exc)
550 struct thread_info *thread = current_thread_info();
551 struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
553 /* Ensure that the saved hwstate is up-to-date. */
554 vfp_sync_hwstate(thread);
557 * Copy the floating point registers. There can be unused
558 * registers see asm/hwcap.h for details.
560 memcpy(&ufp->fpregs, &hwstate->fpregs, sizeof(hwstate->fpregs));
563 * Copy the status and control register.
565 ufp->fpscr = hwstate->fpscr;
568 * Copy the exception registers.
570 ufp_exc->fpexc = hwstate->fpexc;
571 ufp_exc->fpinst = hwstate->fpinst;
572 ufp_exc->fpinst2 = hwstate->fpinst2;
574 /* Ensure that VFP is disabled. */
575 vfp_flush_hwstate(thread);
578 * As per the PCS, clear the length and stride bits for function
581 hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK);
585 /* Sanitise and restore the current VFP state from the provided structures. */
586 int vfp_restore_user_hwstate(struct user_vfp *ufp, struct user_vfp_exc *ufp_exc)
588 struct thread_info *thread = current_thread_info();
589 struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
592 /* Disable VFP to avoid corrupting the new thread state. */
593 vfp_flush_hwstate(thread);
596 * Copy the floating point registers. There can be unused
597 * registers see asm/hwcap.h for details.
599 memcpy(&hwstate->fpregs, &ufp->fpregs, sizeof(hwstate->fpregs));
601 * Copy the status and control register.
603 hwstate->fpscr = ufp->fpscr;
606 * Sanitise and restore the exception registers.
608 fpexc = ufp_exc->fpexc;
610 /* Ensure the VFP is enabled. */
613 /* Ensure FPINST2 is invalid and the exception flag is cleared. */
614 fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
615 hwstate->fpexc = fpexc;
617 hwstate->fpinst = ufp_exc->fpinst;
618 hwstate->fpinst2 = ufp_exc->fpinst2;
624 * VFP hardware can lose all context when a CPU goes offline.
625 * As we will be running in SMP mode with CPU hotplug, we will save the
626 * hardware state at every thread switch. We clear our held state when
627 * a CPU has been killed, indicating that the VFP hardware doesn't contain
628 * a threads VFP state. When a CPU starts up, we re-enable access to the
629 * VFP hardware. The callbacks below are called on the CPU which
630 * is being offlined/onlined.
632 static int vfp_dying_cpu(unsigned int cpu)
634 vfp_current_hw_state[cpu] = NULL;
638 static int vfp_starting_cpu(unsigned int unused)
645 * vfp_support_entry - Handle VFP exception from user mode
647 * @regs: pt_regs structure holding the register state at exception entry
648 * @trigger: The opcode of the instruction that triggered the exception
650 * Returns 0 if the exception was handled, or an error code otherwise.
652 asmlinkage int vfp_support_entry(struct pt_regs *regs, u32 trigger)
654 struct thread_info *ti = current_thread_info();
657 if (unlikely(!have_vfp))
664 * If the VFP unit was not enabled yet, we have to check whether the
665 * VFP state in the CPU's registers is the most recent VFP state
666 * associated with the process. On UP systems, we don't save the VFP
667 * state eagerly on a context switch, so we may need to save the
668 * VFP state to memory first, as it may belong to another process.
670 if (!(fpexc & FPEXC_EN)) {
672 * Enable the VFP unit but mask the FP exception flag for the
673 * time being, so we can access all the registers.
676 fmxr(FPEXC, fpexc & ~FPEXC_EX);
679 * Check whether or not the VFP state in the CPU's registers is
680 * the most recent VFP state associated with this task. On SMP,
681 * migration may result in multiple CPUs holding VFP states
682 * that belong to the same task, but only the most recent one
685 if (!vfp_state_in_hw(ti->cpu, ti)) {
686 if (!IS_ENABLED(CONFIG_SMP) &&
687 vfp_current_hw_state[ti->cpu] != NULL) {
689 * This CPU is currently holding the most
690 * recent VFP state associated with another
691 * task, and we must save that to memory first.
693 vfp_save_state(vfp_current_hw_state[ti->cpu],
698 * We can now proceed with loading the task's VFP state
699 * from memory into the CPU registers.
701 fpexc = vfp_load_state(&ti->vfpstate);
702 vfp_current_hw_state[ti->cpu] = &ti->vfpstate;
705 * Record that this CPU is now the one holding the most
706 * recent VFP state of the task.
708 ti->vfpstate.hard.cpu = ti->cpu;
712 if (fpexc & FPEXC_EX)
714 * Might as well handle the pending exception before
715 * retrying branch out before setting an FPEXC that
716 * stops us reading stuff.
721 * No FP exception is pending: just enable the VFP and
722 * replay the instruction that trapped.
727 /* Check for synchronous or asynchronous exceptions */
728 if (!(fpexc & (FPEXC_EX | FPEXC_DEX))) {
729 u32 fpscr = fmrx(FPSCR);
732 * On some implementations of the VFP subarch 1,
733 * setting FPSCR.IXE causes all the CDP instructions to
734 * be bounced synchronously without setting the
737 if (!(fpscr & FPSCR_IXE)) {
738 if (!(fpscr & FPSCR_LENGTH_MASK)) {
739 pr_debug("not VFP\n");
746 bounce: VFP_bounce(trigger, fpexc, regs);
753 #ifdef CONFIG_KERNEL_MODE_NEON
755 static int vfp_kmode_exception(struct pt_regs *regs, unsigned int instr)
758 * If we reach this point, a floating point exception has been raised
759 * while running in kernel mode. If the NEON/VFP unit was enabled at the
760 * time, it means a VFP instruction has been issued that requires
761 * software assistance to complete, something which is not currently
762 * supported in kernel mode.
763 * If the NEON/VFP unit was disabled, and the location pointed to below
764 * is properly preceded by a call to kernel_neon_begin(), something has
765 * caused the task to be scheduled out and back in again. In this case,
766 * rebuilding and running with CONFIG_DEBUG_ATOMIC_SLEEP enabled should
767 * be helpful in localizing the problem.
769 if (fmrx(FPEXC) & FPEXC_EN)
770 pr_crit("BUG: unsupported FP instruction in kernel mode\n");
772 pr_crit("BUG: FP instruction issued in kernel mode with FP unit disabled\n");
773 pr_crit("FPEXC == 0x%08x\n", fmrx(FPEXC));
777 static struct undef_hook vfp_kmode_exception_hook[] = {{
778 .instr_mask = 0xfe000000,
779 .instr_val = 0xf2000000,
780 .cpsr_mask = MODE_MASK | PSR_T_BIT,
781 .cpsr_val = SVC_MODE,
782 .fn = vfp_kmode_exception,
784 .instr_mask = 0xff100000,
785 .instr_val = 0xf4000000,
786 .cpsr_mask = MODE_MASK | PSR_T_BIT,
787 .cpsr_val = SVC_MODE,
788 .fn = vfp_kmode_exception,
790 .instr_mask = 0xef000000,
791 .instr_val = 0xef000000,
792 .cpsr_mask = MODE_MASK | PSR_T_BIT,
793 .cpsr_val = SVC_MODE | PSR_T_BIT,
794 .fn = vfp_kmode_exception,
796 .instr_mask = 0xff100000,
797 .instr_val = 0xf9000000,
798 .cpsr_mask = MODE_MASK | PSR_T_BIT,
799 .cpsr_val = SVC_MODE | PSR_T_BIT,
800 .fn = vfp_kmode_exception,
802 .instr_mask = 0x0c000e00,
803 .instr_val = 0x0c000a00,
804 .cpsr_mask = MODE_MASK,
805 .cpsr_val = SVC_MODE,
806 .fn = vfp_kmode_exception,
809 static int __init vfp_kmode_exception_hook_init(void)
813 for (i = 0; i < ARRAY_SIZE(vfp_kmode_exception_hook); i++)
814 register_undef_hook(&vfp_kmode_exception_hook[i]);
817 subsys_initcall(vfp_kmode_exception_hook_init);
820 * Kernel-side NEON support functions
822 void kernel_neon_begin(void)
824 struct thread_info *thread = current_thread_info();
831 * Kernel mode NEON is only allowed outside of hardirq context with
832 * preemption and softirq processing disabled. This will make sure that
833 * the kernel mode NEON register contents never need to be preserved.
835 BUG_ON(in_hardirq());
836 cpu = __smp_processor_id();
838 fpexc = fmrx(FPEXC) | FPEXC_EN;
842 * Save the userland NEON/VFP state. Under UP,
843 * the owner could be a task other than 'current'
845 if (vfp_state_in_hw(cpu, thread))
846 vfp_save_state(&thread->vfpstate, fpexc);
848 else if (vfp_current_hw_state[cpu] != NULL)
849 vfp_save_state(vfp_current_hw_state[cpu], fpexc);
851 vfp_current_hw_state[cpu] = NULL;
853 EXPORT_SYMBOL(kernel_neon_begin);
855 void kernel_neon_end(void)
857 /* Disable the NEON/VFP unit. */
858 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
861 EXPORT_SYMBOL(kernel_neon_end);
863 #endif /* CONFIG_KERNEL_MODE_NEON */
865 static int __init vfp_detect(struct pt_regs *regs, unsigned int instr)
867 VFP_arch = UINT_MAX; /* mark as not present */
872 static struct undef_hook vfp_detect_hook __initdata = {
873 .instr_mask = 0x0c000e00,
874 .instr_val = 0x0c000a00,
875 .cpsr_mask = MODE_MASK,
876 .cpsr_val = SVC_MODE,
881 * VFP support code initialisation.
883 static int __init vfp_init(void)
886 unsigned int cpu_arch = cpu_architecture();
890 * Enable the access to the VFP on all online CPUs so the
891 * following test on FPSID will succeed.
893 if (cpu_arch >= CPU_ARCH_ARMv6)
894 on_each_cpu(vfp_enable, NULL, 1);
897 * First check that there is a VFP that we can use.
898 * The handler is already setup to just log calls, so
899 * we just need to read the VFPSID register.
901 register_undef_hook(&vfp_detect_hook);
903 vfpsid = fmrx(FPSID);
905 unregister_undef_hook(&vfp_detect_hook);
907 pr_info("VFP support v0.3: ");
909 pr_cont("not present\n");
911 /* Extract the architecture on CPUID scheme */
912 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
913 VFP_arch = vfpsid & FPSID_CPUID_ARCH_MASK;
914 VFP_arch >>= FPSID_ARCH_BIT;
916 * Check for the presence of the Advanced SIMD
917 * load/store instructions, integer and single
918 * precision floating point operations. Only check
919 * for NEON if the hardware has the MVFR registers.
921 if (IS_ENABLED(CONFIG_NEON) &&
922 (fmrx(MVFR1) & 0x000fff00) == 0x00011100)
923 elf_hwcap |= HWCAP_NEON;
925 if (IS_ENABLED(CONFIG_VFPv3)) {
926 u32 mvfr0 = fmrx(MVFR0);
927 if (((mvfr0 & MVFR0_DP_MASK) >> MVFR0_DP_BIT) == 0x2 ||
928 ((mvfr0 & MVFR0_SP_MASK) >> MVFR0_SP_BIT) == 0x2) {
929 elf_hwcap |= HWCAP_VFPv3;
931 * Check for VFPv3 D16 and VFPv4 D16. CPUs in
932 * this configuration only have 16 x 64bit
935 if ((mvfr0 & MVFR0_A_SIMD_MASK) == 1)
937 elf_hwcap |= HWCAP_VFPv3D16;
939 elf_hwcap |= HWCAP_VFPD32;
942 if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
943 elf_hwcap |= HWCAP_VFPv4;
944 if (((fmrx(MVFR1) & MVFR1_ASIMDHP_MASK) >> MVFR1_ASIMDHP_BIT) == 0x2)
945 elf_hwcap |= HWCAP_ASIMDHP;
946 if (((fmrx(MVFR1) & MVFR1_FPHP_MASK) >> MVFR1_FPHP_BIT) == 0x3)
947 elf_hwcap |= HWCAP_FPHP;
951 * Check for the presence of Advanced SIMD Dot Product
954 isar6 = read_cpuid_ext(CPUID_EXT_ISAR6);
955 if (cpuid_feature_extract_field(isar6, 4) == 0x1)
956 elf_hwcap |= HWCAP_ASIMDDP;
958 * Check for the presence of Advanced SIMD Floating point
959 * half-precision multiplication instructions.
961 if (cpuid_feature_extract_field(isar6, 8) == 0x1)
962 elf_hwcap |= HWCAP_ASIMDFHM;
964 * Check for the presence of Advanced SIMD Bfloat16
965 * floating point instructions.
967 if (cpuid_feature_extract_field(isar6, 20) == 0x1)
968 elf_hwcap |= HWCAP_ASIMDBF16;
970 * Check for the presence of Advanced SIMD and floating point
971 * Int8 matrix multiplication instructions instructions.
973 if (cpuid_feature_extract_field(isar6, 24) == 0x1)
974 elf_hwcap |= HWCAP_I8MM;
976 /* Extract the architecture version on pre-cpuid scheme */
978 if (vfpsid & FPSID_NODOUBLE) {
979 pr_cont("no double precision support\n");
983 VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;
986 cpuhp_setup_state_nocalls(CPUHP_AP_ARM_VFP_STARTING,
987 "arm/vfp:starting", vfp_starting_cpu,
992 thread_register_notifier(&vfp_notifier_block);
996 * We detected VFP, and the support code is
997 * in place; report VFP support to userspace.
999 elf_hwcap |= HWCAP_VFP;
1001 pr_cont("implementor %02x architecture %d part %02x variant %x rev %x\n",
1002 (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
1004 (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
1005 (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
1006 (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);
1011 core_initcall(vfp_init);