1 // SPDX-License-Identifier: GPL-2.0-only
3 * FP/SIMD context switching and fault handling
5 * Copyright (C) 2012 ARM Ltd.
6 * Author: Catalin Marinas <catalin.marinas@arm.com>
9 #include <linux/bitmap.h>
10 #include <linux/bitops.h>
11 #include <linux/bottom_half.h>
12 #include <linux/bug.h>
13 #include <linux/cache.h>
14 #include <linux/compat.h>
15 #include <linux/compiler.h>
16 #include <linux/cpu.h>
17 #include <linux/cpu_pm.h>
18 #include <linux/ctype.h>
19 #include <linux/kernel.h>
20 #include <linux/linkage.h>
21 #include <linux/irqflags.h>
22 #include <linux/init.h>
23 #include <linux/percpu.h>
24 #include <linux/prctl.h>
25 #include <linux/preempt.h>
26 #include <linux/ptrace.h>
27 #include <linux/sched/signal.h>
28 #include <linux/sched/task_stack.h>
29 #include <linux/signal.h>
30 #include <linux/slab.h>
31 #include <linux/stddef.h>
32 #include <linux/sysctl.h>
33 #include <linux/swab.h>
36 #include <asm/exception.h>
37 #include <asm/fpsimd.h>
38 #include <asm/cpufeature.h>
39 #include <asm/cputype.h>
41 #include <asm/processor.h>
43 #include <asm/sigcontext.h>
44 #include <asm/sysreg.h>
45 #include <asm/traps.h>
48 #define FPEXC_IOF (1 << 0)
49 #define FPEXC_DZF (1 << 1)
50 #define FPEXC_OFF (1 << 2)
51 #define FPEXC_UFF (1 << 3)
52 #define FPEXC_IXF (1 << 4)
53 #define FPEXC_IDF (1 << 7)
56 * (Note: in this discussion, statements about FPSIMD apply equally to SVE.)
58 * In order to reduce the number of times the FPSIMD state is needlessly saved
59 * and restored, we need to keep track of two things:
60 * (a) for each task, we need to remember which CPU was the last one to have
61 * the task's FPSIMD state loaded into its FPSIMD registers;
62 * (b) for each CPU, we need to remember which task's userland FPSIMD state has
63 * been loaded into its FPSIMD registers most recently, or whether it has
64 * been used to perform kernel mode NEON in the meantime.
66 * For (a), we add a fpsimd_cpu field to thread_struct, which gets updated to
67 * the id of the current CPU every time the state is loaded onto a CPU. For (b),
68 * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the
69 * address of the userland FPSIMD state of the task that was loaded onto the CPU
70 * the most recently, or NULL if kernel mode NEON has been performed after that.
72 * With this in place, we no longer have to restore the next FPSIMD state right
73 * when switching between tasks. Instead, we can defer this check to userland
74 * resume, at which time we verify whether the CPU's fpsimd_last_state and the
75 * task's fpsimd_cpu are still mutually in sync. If this is the case, we
76 * can omit the FPSIMD restore.
78 * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to
79 * indicate whether or not the userland FPSIMD state of the current task is
80 * present in the registers. The flag is set unless the FPSIMD registers of this
81 * CPU currently contain the most recent userland FPSIMD state of the current
82 * task. If the task is behaving as a VMM, then this is will be managed by
83 * KVM which will clear it to indicate that the vcpu FPSIMD state is currently
84 * loaded on the CPU, allowing the state to be saved if a FPSIMD-aware
85 * softirq kicks in. Upon vcpu_put(), KVM will save the vcpu FP state and
86 * flag the register state as invalid.
88 * In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may
89 * save the task's FPSIMD context back to task_struct from softirq context.
90 * To prevent this from racing with the manipulation of the task's FPSIMD state
91 * from task context and thereby corrupting the state, it is necessary to
92 * protect any manipulation of a task's fpsimd_state or TIF_FOREIGN_FPSTATE
93 * flag with {, __}get_cpu_fpsimd_context(). This will still allow softirqs to
94 * run but prevent them to use FPSIMD.
96 * For a certain task, the sequence may look something like this:
97 * - the task gets scheduled in; if both the task's fpsimd_cpu field
98 * contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu
99 * variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is
100 * cleared, otherwise it is set;
102 * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's
103 * userland FPSIMD state is copied from memory to the registers, the task's
104 * fpsimd_cpu field is set to the id of the current CPU, the current
105 * CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the
106 * TIF_FOREIGN_FPSTATE flag is cleared;
108 * - the task executes an ordinary syscall; upon return to userland, the
109 * TIF_FOREIGN_FPSTATE flag will still be cleared, so no FPSIMD state is
112 * - the task executes a syscall which executes some NEON instructions; this is
113 * preceded by a call to kernel_neon_begin(), which copies the task's FPSIMD
114 * register contents to memory, clears the fpsimd_last_state per-cpu variable
115 * and sets the TIF_FOREIGN_FPSTATE flag;
117 * - the task gets preempted after kernel_neon_end() is called; as we have not
118 * returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
119 * whatever is in the FPSIMD registers is not saved to memory, but discarded.
121 struct fpsimd_last_state_struct {
122 struct user_fpsimd_state *st;
127 static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
129 __ro_after_init struct vl_info vl_info[ARM64_VEC_MAX] = {
130 #ifdef CONFIG_ARM64_SVE
132 .type = ARM64_VEC_SVE,
134 .min_vl = SVE_VL_MIN,
135 .max_vl = SVE_VL_MIN,
136 .max_virtualisable_vl = SVE_VL_MIN,
141 static unsigned int vec_vl_inherit_flag(enum vec_type type)
145 return TIF_SVE_VL_INHERIT;
153 int __default_vl; /* Default VL for tasks */
156 static struct vl_config vl_config[ARM64_VEC_MAX];
158 static inline int get_default_vl(enum vec_type type)
160 return READ_ONCE(vl_config[type].__default_vl);
163 #ifdef CONFIG_ARM64_SVE
165 static inline int get_sve_default_vl(void)
167 return get_default_vl(ARM64_VEC_SVE);
170 static inline void set_default_vl(enum vec_type type, int val)
172 WRITE_ONCE(vl_config[type].__default_vl, val);
175 static inline void set_sve_default_vl(int val)
177 set_default_vl(ARM64_VEC_SVE, val);
180 static void __percpu *efi_sve_state;
182 #else /* ! CONFIG_ARM64_SVE */
184 /* Dummy declaration for code that will be optimised out: */
185 extern void __percpu *efi_sve_state;
187 #endif /* ! CONFIG_ARM64_SVE */
189 DEFINE_PER_CPU(bool, fpsimd_context_busy);
190 EXPORT_PER_CPU_SYMBOL(fpsimd_context_busy);
192 static void fpsimd_bind_task_to_cpu(void);
194 static void __get_cpu_fpsimd_context(void)
196 bool busy = __this_cpu_xchg(fpsimd_context_busy, true);
202 * Claim ownership of the CPU FPSIMD context for use by the calling context.
204 * The caller may freely manipulate the FPSIMD context metadata until
205 * put_cpu_fpsimd_context() is called.
207 * The double-underscore version must only be called if you know the task
208 * can't be preempted.
210 static void get_cpu_fpsimd_context(void)
213 __get_cpu_fpsimd_context();
216 static void __put_cpu_fpsimd_context(void)
218 bool busy = __this_cpu_xchg(fpsimd_context_busy, false);
220 WARN_ON(!busy); /* No matching get_cpu_fpsimd_context()? */
224 * Release the CPU FPSIMD context.
226 * Must be called from a context in which get_cpu_fpsimd_context() was
227 * previously called, with no call to put_cpu_fpsimd_context() in the
230 static void put_cpu_fpsimd_context(void)
232 __put_cpu_fpsimd_context();
236 static bool have_cpu_fpsimd_context(void)
238 return !preemptible() && __this_cpu_read(fpsimd_context_busy);
242 * Call __sve_free() directly only if you know task can't be scheduled
245 static void __sve_free(struct task_struct *task)
247 kfree(task->thread.sve_state);
248 task->thread.sve_state = NULL;
251 static void sve_free(struct task_struct *task)
253 WARN_ON(test_tsk_thread_flag(task, TIF_SVE));
258 unsigned int task_get_vl(const struct task_struct *task, enum vec_type type)
260 return task->thread.vl[type];
263 void task_set_vl(struct task_struct *task, enum vec_type type,
266 task->thread.vl[type] = vl;
269 unsigned int task_get_vl_onexec(const struct task_struct *task,
272 return task->thread.vl_onexec[type];
275 void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
278 task->thread.vl_onexec[type] = vl;
282 * TIF_SVE controls whether a task can use SVE without trapping while
283 * in userspace, and also the way a task's FPSIMD/SVE state is stored
286 * The kernel uses this flag to track whether a user task is actively
287 * using SVE, and therefore whether full SVE register state needs to
288 * be tracked. If not, the cheaper FPSIMD context handling code can
289 * be used instead of the more costly SVE equivalents.
293 * The task can execute SVE instructions while in userspace without
294 * trapping to the kernel.
296 * When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the
297 * corresponding Zn), P0-P15 and FFR are encoded in in
298 * task->thread.sve_state, formatted appropriately for vector
299 * length task->thread.sve_vl.
301 * task->thread.sve_state must point to a valid buffer at least
302 * sve_state_size(task) bytes in size.
304 * During any syscall, the kernel may optionally clear TIF_SVE and
305 * discard the vector state except for the FPSIMD subset.
309 * An attempt by the user task to execute an SVE instruction causes
310 * do_sve_acc() to be called, which does some preparation and then
313 * When stored, FPSIMD registers V0-V31 are encoded in
314 * task->thread.uw.fpsimd_state; bits [max : 128] for each of Z0-Z31 are
315 * logically zero but not stored anywhere; P0-P15 and FFR are not
316 * stored and have unspecified values from userspace's point of
317 * view. For hygiene purposes, the kernel zeroes them on next use,
318 * but userspace is discouraged from relying on this.
320 * task->thread.sve_state does not need to be non-NULL, valid or any
321 * particular size: it must not be dereferenced.
323 * * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state
324 * irrespective of whether TIF_SVE is clear or set, since these are
325 * not vector length dependent.
329 * Update current's FPSIMD/SVE registers from thread_struct.
331 * This function should be called only when the FPSIMD/SVE state in
332 * thread_struct is known to be up to date, when preparing to enter
335 static void task_fpsimd_load(void)
337 WARN_ON(!system_supports_fpsimd());
338 WARN_ON(!have_cpu_fpsimd_context());
340 if (IS_ENABLED(CONFIG_ARM64_SVE) && test_thread_flag(TIF_SVE)) {
341 sve_set_vq(sve_vq_from_vl(task_get_sve_vl(current)) - 1);
342 sve_load_state(sve_pffr(¤t->thread),
343 ¤t->thread.uw.fpsimd_state.fpsr, true);
345 fpsimd_load_state(¤t->thread.uw.fpsimd_state);
350 * Ensure FPSIMD/SVE storage in memory for the loaded context is up to
351 * date with respect to the CPU registers.
353 static void fpsimd_save(void)
355 struct fpsimd_last_state_struct const *last =
356 this_cpu_ptr(&fpsimd_last_state);
357 /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
359 WARN_ON(!system_supports_fpsimd());
360 WARN_ON(!have_cpu_fpsimd_context());
362 if (test_thread_flag(TIF_FOREIGN_FPSTATE))
365 if (IS_ENABLED(CONFIG_ARM64_SVE) &&
366 test_thread_flag(TIF_SVE)) {
367 if (WARN_ON(sve_get_vl() != last->sve_vl)) {
369 * Can't save the user regs, so current would
370 * re-enter user with corrupt state.
371 * There's no way to recover, so kill it:
373 force_signal_inject(SIGKILL, SI_KERNEL, 0, 0);
377 sve_save_state((char *)last->sve_state +
378 sve_ffr_offset(last->sve_vl),
379 &last->st->fpsr, true);
381 fpsimd_save_state(last->st);
386 * All vector length selection from userspace comes through here.
387 * We're on a slow path, so some sanity-checks are included.
388 * If things go wrong there's a bug somewhere, but try to fall back to a
391 static unsigned int find_supported_vector_length(enum vec_type type,
394 struct vl_info *info = &vl_info[type];
396 int max_vl = info->max_vl;
398 if (WARN_ON(!sve_vl_valid(vl)))
401 if (WARN_ON(!sve_vl_valid(max_vl)))
402 max_vl = info->min_vl;
407 bit = find_next_bit(info->vq_map, SVE_VQ_MAX,
408 __vq_to_bit(sve_vq_from_vl(vl)));
409 return sve_vl_from_vq(__bit_to_vq(bit));
412 #if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL)
414 static int vec_proc_do_default_vl(struct ctl_table *table, int write,
415 void *buffer, size_t *lenp, loff_t *ppos)
417 struct vl_info *info = table->extra1;
418 enum vec_type type = info->type;
420 int vl = get_default_vl(type);
421 struct ctl_table tmp_table = {
423 .maxlen = sizeof(vl),
426 ret = proc_dointvec(&tmp_table, write, buffer, lenp, ppos);
430 /* Writing -1 has the special meaning "set to max": */
434 if (!sve_vl_valid(vl))
437 set_default_vl(type, find_supported_vector_length(type, vl));
441 static struct ctl_table sve_default_vl_table[] = {
443 .procname = "sve_default_vector_length",
445 .proc_handler = vec_proc_do_default_vl,
446 .extra1 = &vl_info[ARM64_VEC_SVE],
451 static int __init sve_sysctl_init(void)
453 if (system_supports_sve())
454 if (!register_sysctl("abi", sve_default_vl_table))
460 #else /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
461 static int __init sve_sysctl_init(void) { return 0; }
462 #endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
464 #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
465 (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
467 #ifdef CONFIG_CPU_BIG_ENDIAN
468 static __uint128_t arm64_cpu_to_le128(__uint128_t x)
471 u64 b = swab64(x >> 64);
473 return ((__uint128_t)a << 64) | b;
476 static __uint128_t arm64_cpu_to_le128(__uint128_t x)
482 #define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x)
484 static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst,
490 for (i = 0; i < SVE_NUM_ZREGS; ++i) {
491 p = (__uint128_t *)ZREG(sst, vq, i);
492 *p = arm64_cpu_to_le128(fst->vregs[i]);
497 * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
498 * task->thread.sve_state.
500 * Task can be a non-runnable task, or current. In the latter case,
501 * the caller must have ownership of the cpu FPSIMD context before calling
503 * task->thread.sve_state must point to at least sve_state_size(task)
504 * bytes of allocated kernel memory.
505 * task->thread.uw.fpsimd_state must be up to date before calling this
508 static void fpsimd_to_sve(struct task_struct *task)
511 void *sst = task->thread.sve_state;
512 struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
514 if (!system_supports_sve())
517 vq = sve_vq_from_vl(task_get_sve_vl(task));
518 __fpsimd_to_sve(sst, fst, vq);
522 * Transfer the SVE state in task->thread.sve_state to
523 * task->thread.uw.fpsimd_state.
525 * Task can be a non-runnable task, or current. In the latter case,
526 * the caller must have ownership of the cpu FPSIMD context before calling
528 * task->thread.sve_state must point to at least sve_state_size(task)
529 * bytes of allocated kernel memory.
530 * task->thread.sve_state must be up to date before calling this function.
532 static void sve_to_fpsimd(struct task_struct *task)
535 void const *sst = task->thread.sve_state;
536 struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
538 __uint128_t const *p;
540 if (!system_supports_sve())
543 vq = sve_vq_from_vl(task_get_sve_vl(task));
544 for (i = 0; i < SVE_NUM_ZREGS; ++i) {
545 p = (__uint128_t const *)ZREG(sst, vq, i);
546 fst->vregs[i] = arm64_le128_to_cpu(*p);
550 #ifdef CONFIG_ARM64_SVE
553 * Return how many bytes of memory are required to store the full SVE
554 * state for task, given task's currently configured vector length.
556 static size_t sve_state_size(struct task_struct const *task)
558 return SVE_SIG_REGS_SIZE(sve_vq_from_vl(task_get_sve_vl(task)));
562 * Ensure that task->thread.sve_state is allocated and sufficiently large.
564 * This function should be used only in preparation for replacing
565 * task->thread.sve_state with new data. The memory is always zeroed
566 * here to prevent stale data from showing through: this is done in
567 * the interest of testability and predictability: except in the
568 * do_sve_acc() case, there is no ABI requirement to hide stale data
569 * written previously be task.
571 void sve_alloc(struct task_struct *task)
573 if (task->thread.sve_state) {
574 memset(task->thread.sve_state, 0, sve_state_size(task));
578 /* This is a small allocation (maximum ~8KB) and Should Not Fail. */
579 task->thread.sve_state =
580 kzalloc(sve_state_size(task), GFP_KERNEL);
585 * Ensure that task->thread.sve_state is up to date with respect to
586 * the user task, irrespective of when SVE is in use or not.
588 * This should only be called by ptrace. task must be non-runnable.
589 * task->thread.sve_state must point to at least sve_state_size(task)
590 * bytes of allocated kernel memory.
592 void fpsimd_sync_to_sve(struct task_struct *task)
594 if (!test_tsk_thread_flag(task, TIF_SVE))
599 * Ensure that task->thread.uw.fpsimd_state is up to date with respect to
600 * the user task, irrespective of whether SVE is in use or not.
602 * This should only be called by ptrace. task must be non-runnable.
603 * task->thread.sve_state must point to at least sve_state_size(task)
604 * bytes of allocated kernel memory.
606 void sve_sync_to_fpsimd(struct task_struct *task)
608 if (test_tsk_thread_flag(task, TIF_SVE))
613 * Ensure that task->thread.sve_state is up to date with respect to
614 * the task->thread.uw.fpsimd_state.
616 * This should only be called by ptrace to merge new FPSIMD register
617 * values into a task for which SVE is currently active.
618 * task must be non-runnable.
619 * task->thread.sve_state must point to at least sve_state_size(task)
620 * bytes of allocated kernel memory.
621 * task->thread.uw.fpsimd_state must already have been initialised with
622 * the new FPSIMD register values to be merged in.
624 void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
627 void *sst = task->thread.sve_state;
628 struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
630 if (!test_tsk_thread_flag(task, TIF_SVE))
633 vq = sve_vq_from_vl(task_get_sve_vl(task));
635 memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
636 __fpsimd_to_sve(sst, fst, vq);
639 int vec_set_vector_length(struct task_struct *task, enum vec_type type,
640 unsigned long vl, unsigned long flags)
642 if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
643 PR_SVE_SET_VL_ONEXEC))
646 if (!sve_vl_valid(vl))
650 * Clamp to the maximum vector length that VL-agnostic code
651 * can work with. A flag may be assigned in the future to
652 * allow setting of larger vector lengths without confusing
655 if (vl > VL_ARCH_MAX)
658 vl = find_supported_vector_length(type, vl);
660 if (flags & (PR_SVE_VL_INHERIT |
661 PR_SVE_SET_VL_ONEXEC))
662 task_set_vl_onexec(task, type, vl);
664 /* Reset VL to system default on next exec: */
665 task_set_vl_onexec(task, type, 0);
667 /* Only actually set the VL if not deferred: */
668 if (flags & PR_SVE_SET_VL_ONEXEC)
671 if (vl == task_get_vl(task, type))
675 * To ensure the FPSIMD bits of the SVE vector registers are preserved,
676 * write any live register state back to task_struct, and convert to a
677 * regular FPSIMD thread. Since the vector length can only be changed
678 * with a syscall we can't be in streaming mode while reconfiguring.
680 if (task == current) {
681 get_cpu_fpsimd_context();
686 fpsimd_flush_task_state(task);
687 if (test_and_clear_tsk_thread_flag(task, TIF_SVE))
691 put_cpu_fpsimd_context();
694 * Force reallocation of task SVE state to the correct size
699 task_set_vl(task, type, vl);
702 update_tsk_thread_flag(task, vec_vl_inherit_flag(type),
703 flags & PR_SVE_VL_INHERIT);
709 * Encode the current vector length and flags for return.
710 * This is only required for prctl(): ptrace has separate fields.
711 * SVE and SME use the same bits for _ONEXEC and _INHERIT.
713 * flags are as for vec_set_vector_length().
715 static int vec_prctl_status(enum vec_type type, unsigned long flags)
719 if (flags & PR_SVE_SET_VL_ONEXEC)
720 ret = task_get_vl_onexec(current, type);
722 ret = task_get_vl(current, type);
724 if (test_thread_flag(vec_vl_inherit_flag(type)))
725 ret |= PR_SVE_VL_INHERIT;
731 int sve_set_current_vl(unsigned long arg)
733 unsigned long vl, flags;
736 vl = arg & PR_SVE_VL_LEN_MASK;
739 if (!system_supports_sve() || is_compat_task())
742 ret = vec_set_vector_length(current, ARM64_VEC_SVE, vl, flags);
746 return vec_prctl_status(ARM64_VEC_SVE, flags);
750 int sve_get_current_vl(void)
752 if (!system_supports_sve() || is_compat_task())
755 return vec_prctl_status(ARM64_VEC_SVE, 0);
758 static void vec_probe_vqs(struct vl_info *info,
759 DECLARE_BITMAP(map, SVE_VQ_MAX))
763 bitmap_zero(map, SVE_VQ_MAX);
765 for (vq = SVE_VQ_MAX; vq >= SVE_VQ_MIN; --vq) {
766 write_vl(info->type, vq - 1); /* self-syncing */
768 vq = sve_vq_from_vl(vl); /* skip intervening lengths */
769 set_bit(__vq_to_bit(vq), map);
774 * Initialise the set of known supported VQs for the boot CPU.
775 * This is called during kernel boot, before secondary CPUs are brought up.
777 void __init vec_init_vq_map(enum vec_type type)
779 struct vl_info *info = &vl_info[type];
780 vec_probe_vqs(info, info->vq_map);
781 bitmap_copy(info->vq_partial_map, info->vq_map, SVE_VQ_MAX);
785 * If we haven't committed to the set of supported VQs yet, filter out
786 * those not supported by the current CPU.
787 * This function is called during the bring-up of early secondary CPUs only.
789 void vec_update_vq_map(enum vec_type type)
791 struct vl_info *info = &vl_info[type];
792 DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
794 vec_probe_vqs(info, tmp_map);
795 bitmap_and(info->vq_map, info->vq_map, tmp_map, SVE_VQ_MAX);
796 bitmap_or(info->vq_partial_map, info->vq_partial_map, tmp_map,
801 * Check whether the current CPU supports all VQs in the committed set.
802 * This function is called during the bring-up of late secondary CPUs only.
804 int vec_verify_vq_map(enum vec_type type)
806 struct vl_info *info = &vl_info[type];
807 DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
810 vec_probe_vqs(info, tmp_map);
812 bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
813 if (bitmap_intersects(tmp_map, info->vq_map, SVE_VQ_MAX)) {
814 pr_warn("%s: cpu%d: Required vector length(s) missing\n",
815 info->name, smp_processor_id());
819 if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
823 * For KVM, it is necessary to ensure that this CPU doesn't
824 * support any vector length that guests may have probed as
828 /* Recover the set of supported VQs: */
829 bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
830 /* Find VQs supported that are not globally supported: */
831 bitmap_andnot(tmp_map, tmp_map, info->vq_map, SVE_VQ_MAX);
833 /* Find the lowest such VQ, if any: */
834 b = find_last_bit(tmp_map, SVE_VQ_MAX);
836 return 0; /* no mismatches */
839 * Mismatches above sve_max_virtualisable_vl are fine, since
840 * no guest is allowed to configure ZCR_EL2.LEN to exceed this:
842 if (sve_vl_from_vq(__bit_to_vq(b)) <= info->max_virtualisable_vl) {
843 pr_warn("%s: cpu%d: Unsupported vector length(s) present\n",
844 info->name, smp_processor_id());
851 static void __init sve_efi_setup(void)
853 struct vl_info *info = &vl_info[ARM64_VEC_SVE];
855 if (!IS_ENABLED(CONFIG_EFI))
859 * alloc_percpu() warns and prints a backtrace if this goes wrong.
860 * This is evidence of a crippled system and we are returning void,
861 * so no attempt is made to handle this situation here.
863 if (!sve_vl_valid(info->max_vl))
866 efi_sve_state = __alloc_percpu(
867 SVE_SIG_REGS_SIZE(sve_vq_from_vl(info->max_vl)), SVE_VQ_BYTES);
874 panic("Cannot allocate percpu memory for EFI SVE save/restore");
878 * Enable SVE for EL1.
879 * Intended for use by the cpufeatures code during CPU boot.
881 void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
883 write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1);
888 * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE
891 * Use only if SVE is present.
892 * This function clobbers the SVE vector length.
894 u64 read_zcr_features(void)
900 * Set the maximum possible VL, and write zeroes to all other
901 * bits to see if they stick.
903 sve_kernel_enable(NULL);
904 write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1);
906 zcr = read_sysreg_s(SYS_ZCR_EL1);
907 zcr &= ~(u64)ZCR_ELx_LEN_MASK; /* find sticky 1s outside LEN field */
908 vq_max = sve_vq_from_vl(sve_get_vl());
909 zcr |= vq_max - 1; /* set LEN field to maximum effective value */
914 void __init sve_setup(void)
916 struct vl_info *info = &vl_info[ARM64_VEC_SVE];
918 DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
921 if (!system_supports_sve())
925 * The SVE architecture mandates support for 128-bit vectors,
926 * so sve_vq_map must have at least SVE_VQ_MIN set.
927 * If something went wrong, at least try to patch it up:
929 if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), info->vq_map)))
930 set_bit(__vq_to_bit(SVE_VQ_MIN), info->vq_map);
932 zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
933 info->max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1);
936 * Sanity-check that the max VL we determined through CPU features
937 * corresponds properly to sve_vq_map. If not, do our best:
939 if (WARN_ON(info->max_vl != find_supported_vector_length(ARM64_VEC_SVE,
941 info->max_vl = find_supported_vector_length(ARM64_VEC_SVE,
945 * For the default VL, pick the maximum supported value <= 64.
946 * VL == 64 is guaranteed not to grow the signal frame.
948 set_sve_default_vl(find_supported_vector_length(ARM64_VEC_SVE, 64));
950 bitmap_andnot(tmp_map, info->vq_partial_map, info->vq_map,
953 b = find_last_bit(tmp_map, SVE_VQ_MAX);
955 /* No non-virtualisable VLs found */
956 info->max_virtualisable_vl = SVE_VQ_MAX;
957 else if (WARN_ON(b == SVE_VQ_MAX - 1))
958 /* No virtualisable VLs? This is architecturally forbidden. */
959 info->max_virtualisable_vl = SVE_VQ_MIN;
960 else /* b + 1 < SVE_VQ_MAX */
961 info->max_virtualisable_vl = sve_vl_from_vq(__bit_to_vq(b + 1));
963 if (info->max_virtualisable_vl > info->max_vl)
964 info->max_virtualisable_vl = info->max_vl;
966 pr_info("%s: maximum available vector length %u bytes per vector\n",
967 info->name, info->max_vl);
968 pr_info("%s: default vector length %u bytes per vector\n",
969 info->name, get_sve_default_vl());
971 /* KVM decides whether to support mismatched systems. Just warn here: */
972 if (sve_max_virtualisable_vl() < sve_max_vl())
973 pr_warn("%s: unvirtualisable vector lengths present\n",
980 * Called from the put_task_struct() path, which cannot get here
981 * unless dead_task is really dead and not schedulable.
983 void fpsimd_release_task(struct task_struct *dead_task)
985 __sve_free(dead_task);
988 #endif /* CONFIG_ARM64_SVE */
993 * Storage is allocated for the full SVE state, the current FPSIMD
994 * register contents are migrated across, and the access trap is
997 * TIF_SVE should be clear on entry: otherwise, fpsimd_restore_current_state()
998 * would have disabled the SVE access trap for userspace during
999 * ret_to_user, making an SVE access trap impossible in that case.
1001 void do_sve_acc(unsigned int esr, struct pt_regs *regs)
1003 /* Even if we chose not to use SVE, the hardware could still trap: */
1004 if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
1005 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
1010 if (!current->thread.sve_state) {
1015 get_cpu_fpsimd_context();
1017 if (test_and_set_thread_flag(TIF_SVE))
1018 WARN_ON(1); /* SVE access shouldn't have trapped */
1021 * Convert the FPSIMD state to SVE, zeroing all the state that
1022 * is not shared with FPSIMD. If (as is likely) the current
1023 * state is live in the registers then do this there and
1024 * update our metadata for the current task including
1025 * disabling the trap, otherwise update our in-memory copy.
1027 if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
1028 unsigned long vq_minus_one =
1029 sve_vq_from_vl(task_get_sve_vl(current)) - 1;
1030 sve_set_vq(vq_minus_one);
1031 sve_flush_live(true, vq_minus_one);
1032 fpsimd_bind_task_to_cpu();
1034 fpsimd_to_sve(current);
1037 put_cpu_fpsimd_context();
1041 * Trapped FP/ASIMD access.
1043 void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
1045 /* TODO: implement lazy context saving/restoring */
1050 * Raise a SIGFPE for the current process.
1052 void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
1054 unsigned int si_code = FPE_FLTUNK;
1056 if (esr & ESR_ELx_FP_EXC_TFV) {
1057 if (esr & FPEXC_IOF)
1058 si_code = FPE_FLTINV;
1059 else if (esr & FPEXC_DZF)
1060 si_code = FPE_FLTDIV;
1061 else if (esr & FPEXC_OFF)
1062 si_code = FPE_FLTOVF;
1063 else if (esr & FPEXC_UFF)
1064 si_code = FPE_FLTUND;
1065 else if (esr & FPEXC_IXF)
1066 si_code = FPE_FLTRES;
1069 send_sig_fault(SIGFPE, si_code,
1070 (void __user *)instruction_pointer(regs),
1074 void fpsimd_thread_switch(struct task_struct *next)
1076 bool wrong_task, wrong_cpu;
1078 if (!system_supports_fpsimd())
1081 __get_cpu_fpsimd_context();
1083 /* Save unsaved fpsimd state, if any: */
1087 * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
1088 * state. For kernel threads, FPSIMD registers are never loaded
1089 * and wrong_task and wrong_cpu will always be true.
1091 wrong_task = __this_cpu_read(fpsimd_last_state.st) !=
1092 &next->thread.uw.fpsimd_state;
1093 wrong_cpu = next->thread.fpsimd_cpu != smp_processor_id();
1095 update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE,
1096 wrong_task || wrong_cpu);
1098 __put_cpu_fpsimd_context();
1101 static void fpsimd_flush_thread_vl(enum vec_type type)
1103 int vl, supported_vl;
1106 * Reset the task vector length as required. This is where we
1107 * ensure that all user tasks have a valid vector length
1108 * configured: no kernel task can become a user task without
1109 * an exec and hence a call to this function. By the time the
1110 * first call to this function is made, all early hardware
1111 * probing is complete, so __sve_default_vl should be valid.
1112 * If a bug causes this to go wrong, we make some noise and
1113 * try to fudge thread.sve_vl to a safe value here.
1115 vl = task_get_vl_onexec(current, type);
1117 vl = get_default_vl(type);
1119 if (WARN_ON(!sve_vl_valid(vl)))
1120 vl = vl_info[type].min_vl;
1122 supported_vl = find_supported_vector_length(type, vl);
1123 if (WARN_ON(supported_vl != vl))
1126 task_set_vl(current, type, vl);
1129 * If the task is not set to inherit, ensure that the vector
1130 * length will be reset by a subsequent exec:
1132 if (!test_thread_flag(vec_vl_inherit_flag(type)))
1133 task_set_vl_onexec(current, type, 0);
1136 void fpsimd_flush_thread(void)
1138 if (!system_supports_fpsimd())
1141 get_cpu_fpsimd_context();
1143 fpsimd_flush_task_state(current);
1144 memset(¤t->thread.uw.fpsimd_state, 0,
1145 sizeof(current->thread.uw.fpsimd_state));
1147 if (system_supports_sve()) {
1148 clear_thread_flag(TIF_SVE);
1150 fpsimd_flush_thread_vl(ARM64_VEC_SVE);
1153 put_cpu_fpsimd_context();
1157 * Save the userland FPSIMD state of 'current' to memory, but only if the state
1158 * currently held in the registers does in fact belong to 'current'
1160 void fpsimd_preserve_current_state(void)
1162 if (!system_supports_fpsimd())
1165 get_cpu_fpsimd_context();
1167 put_cpu_fpsimd_context();
1171 * Like fpsimd_preserve_current_state(), but ensure that
1172 * current->thread.uw.fpsimd_state is updated so that it can be copied to
1175 void fpsimd_signal_preserve_current_state(void)
1177 fpsimd_preserve_current_state();
1178 if (test_thread_flag(TIF_SVE))
1179 sve_to_fpsimd(current);
1183 * Associate current's FPSIMD context with this cpu
1184 * The caller must have ownership of the cpu FPSIMD context before calling
1187 static void fpsimd_bind_task_to_cpu(void)
1189 struct fpsimd_last_state_struct *last =
1190 this_cpu_ptr(&fpsimd_last_state);
1192 WARN_ON(!system_supports_fpsimd());
1193 last->st = ¤t->thread.uw.fpsimd_state;
1194 last->sve_state = current->thread.sve_state;
1195 last->sve_vl = task_get_sve_vl(current);
1196 current->thread.fpsimd_cpu = smp_processor_id();
1198 if (system_supports_sve()) {
1199 /* Toggle SVE trapping for userspace if needed */
1200 if (test_thread_flag(TIF_SVE))
1205 /* Serialised by exception return to user */
1209 void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
1210 unsigned int sve_vl)
1212 struct fpsimd_last_state_struct *last =
1213 this_cpu_ptr(&fpsimd_last_state);
1215 WARN_ON(!system_supports_fpsimd());
1216 WARN_ON(!in_softirq() && !irqs_disabled());
1219 last->sve_state = sve_state;
1220 last->sve_vl = sve_vl;
1224 * Load the userland FPSIMD state of 'current' from memory, but only if the
1225 * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
1226 * state of 'current'. This is called when we are preparing to return to
1227 * userspace to ensure that userspace sees a good register state.
1229 void fpsimd_restore_current_state(void)
1232 * For the tasks that were created before we detected the absence of
1233 * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
1234 * e.g, init. This could be then inherited by the children processes.
1235 * If we later detect that the system doesn't support FP/SIMD,
1236 * we must clear the flag for all the tasks to indicate that the
1237 * FPSTATE is clean (as we can't have one) to avoid looping for ever in
1238 * do_notify_resume().
1240 if (!system_supports_fpsimd()) {
1241 clear_thread_flag(TIF_FOREIGN_FPSTATE);
1245 get_cpu_fpsimd_context();
1247 if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
1249 fpsimd_bind_task_to_cpu();
1252 put_cpu_fpsimd_context();
1256 * Load an updated userland FPSIMD state for 'current' from memory and set the
1257 * flag that indicates that the FPSIMD register contents are the most recent
1258 * FPSIMD state of 'current'. This is used by the signal code to restore the
1259 * register state when returning from a signal handler in FPSIMD only cases,
1260 * any SVE context will be discarded.
1262 void fpsimd_update_current_state(struct user_fpsimd_state const *state)
1264 if (WARN_ON(!system_supports_fpsimd()))
1267 get_cpu_fpsimd_context();
1269 current->thread.uw.fpsimd_state = *state;
1270 if (test_thread_flag(TIF_SVE))
1271 fpsimd_to_sve(current);
1274 fpsimd_bind_task_to_cpu();
1276 clear_thread_flag(TIF_FOREIGN_FPSTATE);
1278 put_cpu_fpsimd_context();
1282 * Invalidate live CPU copies of task t's FPSIMD state
1284 * This function may be called with preemption enabled. The barrier()
1285 * ensures that the assignment to fpsimd_cpu is visible to any
1286 * preemption/softirq that could race with set_tsk_thread_flag(), so
1287 * that TIF_FOREIGN_FPSTATE cannot be spuriously re-cleared.
1289 * The final barrier ensures that TIF_FOREIGN_FPSTATE is seen set by any
1292 void fpsimd_flush_task_state(struct task_struct *t)
1294 t->thread.fpsimd_cpu = NR_CPUS;
1296 * If we don't support fpsimd, bail out after we have
1297 * reset the fpsimd_cpu for this task and clear the
1300 if (!system_supports_fpsimd())
1303 set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
1309 * Invalidate any task's FPSIMD state that is present on this cpu.
1310 * The FPSIMD context should be acquired with get_cpu_fpsimd_context()
1311 * before calling this function.
1313 static void fpsimd_flush_cpu_state(void)
1315 WARN_ON(!system_supports_fpsimd());
1316 __this_cpu_write(fpsimd_last_state.st, NULL);
1317 set_thread_flag(TIF_FOREIGN_FPSTATE);
1321 * Save the FPSIMD state to memory and invalidate cpu view.
1322 * This function must be called with preemption disabled.
1324 void fpsimd_save_and_flush_cpu_state(void)
1326 if (!system_supports_fpsimd())
1328 WARN_ON(preemptible());
1329 __get_cpu_fpsimd_context();
1331 fpsimd_flush_cpu_state();
1332 __put_cpu_fpsimd_context();
1335 #ifdef CONFIG_KERNEL_MODE_NEON
1338 * Kernel-side NEON support functions
1342 * kernel_neon_begin(): obtain the CPU FPSIMD registers for use by the calling
1345 * Must not be called unless may_use_simd() returns true.
1346 * Task context in the FPSIMD registers is saved back to memory as necessary.
1348 * A matching call to kernel_neon_end() must be made before returning from the
1351 * The caller may freely use the FPSIMD registers until kernel_neon_end() is
1354 void kernel_neon_begin(void)
1356 if (WARN_ON(!system_supports_fpsimd()))
1359 BUG_ON(!may_use_simd());
1361 get_cpu_fpsimd_context();
1363 /* Save unsaved fpsimd state, if any: */
1366 /* Invalidate any task state remaining in the fpsimd regs: */
1367 fpsimd_flush_cpu_state();
1369 EXPORT_SYMBOL(kernel_neon_begin);
1372 * kernel_neon_end(): give the CPU FPSIMD registers back to the current task
1374 * Must be called from a context in which kernel_neon_begin() was previously
1375 * called, with no call to kernel_neon_end() in the meantime.
1377 * The caller must not use the FPSIMD registers after this function is called,
1378 * unless kernel_neon_begin() is called again in the meantime.
1380 void kernel_neon_end(void)
1382 if (!system_supports_fpsimd())
1385 put_cpu_fpsimd_context();
1387 EXPORT_SYMBOL(kernel_neon_end);
1391 static DEFINE_PER_CPU(struct user_fpsimd_state, efi_fpsimd_state);
1392 static DEFINE_PER_CPU(bool, efi_fpsimd_state_used);
1393 static DEFINE_PER_CPU(bool, efi_sve_state_used);
1396 * EFI runtime services support functions
1398 * The ABI for EFI runtime services allows EFI to use FPSIMD during the call.
1399 * This means that for EFI (and only for EFI), we have to assume that FPSIMD
1400 * is always used rather than being an optional accelerator.
1402 * These functions provide the necessary support for ensuring FPSIMD
1403 * save/restore in the contexts from which EFI is used.
1405 * Do not use them for any other purpose -- if tempted to do so, you are
1406 * either doing something wrong or you need to propose some refactoring.
1410 * __efi_fpsimd_begin(): prepare FPSIMD for making an EFI runtime services call
1412 void __efi_fpsimd_begin(void)
1414 if (!system_supports_fpsimd())
1417 WARN_ON(preemptible());
1419 if (may_use_simd()) {
1420 kernel_neon_begin();
1423 * If !efi_sve_state, SVE can't be in use yet and doesn't need
1426 if (system_supports_sve() && likely(efi_sve_state)) {
1427 char *sve_state = this_cpu_ptr(efi_sve_state);
1429 __this_cpu_write(efi_sve_state_used, true);
1431 sve_save_state(sve_state + sve_ffr_offset(sve_max_vl()),
1432 &this_cpu_ptr(&efi_fpsimd_state)->fpsr,
1435 fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state));
1438 __this_cpu_write(efi_fpsimd_state_used, true);
1443 * __efi_fpsimd_end(): clean up FPSIMD after an EFI runtime services call
1445 void __efi_fpsimd_end(void)
1447 if (!system_supports_fpsimd())
1450 if (!__this_cpu_xchg(efi_fpsimd_state_used, false)) {
1453 if (system_supports_sve() &&
1454 likely(__this_cpu_read(efi_sve_state_used))) {
1455 char const *sve_state = this_cpu_ptr(efi_sve_state);
1457 sve_set_vq(sve_vq_from_vl(sve_get_vl()) - 1);
1458 sve_load_state(sve_state + sve_ffr_offset(sve_max_vl()),
1459 &this_cpu_ptr(&efi_fpsimd_state)->fpsr,
1462 __this_cpu_write(efi_sve_state_used, false);
1464 fpsimd_load_state(this_cpu_ptr(&efi_fpsimd_state));
1469 #endif /* CONFIG_EFI */
1471 #endif /* CONFIG_KERNEL_MODE_NEON */
1473 #ifdef CONFIG_CPU_PM
1474 static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
1475 unsigned long cmd, void *v)
1479 fpsimd_save_and_flush_cpu_state();
1483 case CPU_PM_ENTER_FAILED:
1490 static struct notifier_block fpsimd_cpu_pm_notifier_block = {
1491 .notifier_call = fpsimd_cpu_pm_notifier,
1494 static void __init fpsimd_pm_init(void)
1496 cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block);
1500 static inline void fpsimd_pm_init(void) { }
1501 #endif /* CONFIG_CPU_PM */
1503 #ifdef CONFIG_HOTPLUG_CPU
1504 static int fpsimd_cpu_dead(unsigned int cpu)
1506 per_cpu(fpsimd_last_state.st, cpu) = NULL;
1510 static inline void fpsimd_hotplug_init(void)
1512 cpuhp_setup_state_nocalls(CPUHP_ARM64_FPSIMD_DEAD, "arm64/fpsimd:dead",
1513 NULL, fpsimd_cpu_dead);
1517 static inline void fpsimd_hotplug_init(void) { }
1521 * FP/SIMD support code initialisation.
1523 static int __init fpsimd_init(void)
1525 if (cpu_have_named_feature(FP)) {
1527 fpsimd_hotplug_init();
1529 pr_notice("Floating-point is not implemented\n");
1532 if (!cpu_have_named_feature(ASIMD))
1533 pr_notice("Advanced SIMD is not implemented\n");
1535 return sve_sysctl_init();
1537 core_initcall(fpsimd_init);