1 // SPDX-License-Identifier: GPL-2.0-only
3 * xsave/xrstor support.
5 * Author: Suresh Siddha <suresh.b.siddha@intel.com>
7 #include <linux/bitops.h>
8 #include <linux/compat.h>
10 #include <linux/mman.h>
11 #include <linux/nospec.h>
12 #include <linux/pkeys.h>
13 #include <linux/seq_file.h>
14 #include <linux/proc_fs.h>
15 #include <linux/vmalloc.h>
17 #include <asm/fpu/api.h>
18 #include <asm/fpu/regset.h>
19 #include <asm/fpu/signal.h>
20 #include <asm/fpu/xcr.h>
22 #include <asm/tlbflush.h>
23 #include <asm/prctl.h>
31 #define for_each_extended_xfeature(bit, mask) \
32 (bit) = FIRST_EXTENDED_XFEATURE; \
33 for_each_set_bit_from(bit, (unsigned long *)&(mask), 8 * sizeof(mask))
36 * Although we spell it out in here, the Processor Trace
37 * xfeature is completely unused. We use other mechanisms
38 * to save/restore PT state in Linux.
40 static const char *xfeature_names[] =
42 "x87 floating point registers" ,
45 "MPX bounds registers" ,
50 "Processor Trace (unused)" ,
51 "Protection Keys User registers",
53 "unknown xstate feature" ,
54 "unknown xstate feature" ,
55 "unknown xstate feature" ,
56 "unknown xstate feature" ,
57 "unknown xstate feature" ,
58 "unknown xstate feature" ,
61 "unknown xstate feature" ,
64 static unsigned short xsave_cpuid_features[] __initdata = {
65 [XFEATURE_FP] = X86_FEATURE_FPU,
66 [XFEATURE_SSE] = X86_FEATURE_XMM,
67 [XFEATURE_YMM] = X86_FEATURE_AVX,
68 [XFEATURE_BNDREGS] = X86_FEATURE_MPX,
69 [XFEATURE_BNDCSR] = X86_FEATURE_MPX,
70 [XFEATURE_OPMASK] = X86_FEATURE_AVX512F,
71 [XFEATURE_ZMM_Hi256] = X86_FEATURE_AVX512F,
72 [XFEATURE_Hi16_ZMM] = X86_FEATURE_AVX512F,
73 [XFEATURE_PT_UNIMPLEMENTED_SO_FAR] = X86_FEATURE_INTEL_PT,
74 [XFEATURE_PKRU] = X86_FEATURE_PKU,
75 [XFEATURE_PASID] = X86_FEATURE_ENQCMD,
76 [XFEATURE_XTILE_CFG] = X86_FEATURE_AMX_TILE,
77 [XFEATURE_XTILE_DATA] = X86_FEATURE_AMX_TILE,
80 static unsigned int xstate_offsets[XFEATURE_MAX] __ro_after_init =
81 { [ 0 ... XFEATURE_MAX - 1] = -1};
82 static unsigned int xstate_sizes[XFEATURE_MAX] __ro_after_init =
83 { [ 0 ... XFEATURE_MAX - 1] = -1};
84 static unsigned int xstate_flags[XFEATURE_MAX] __ro_after_init;
86 #define XSTATE_FLAG_SUPERVISOR BIT(0)
87 #define XSTATE_FLAG_ALIGNED64 BIT(1)
90 * Return whether the system supports a given xfeature.
92 * Also return the name of the (most advanced) feature that the caller requested:
94 int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name)
96 u64 xfeatures_missing = xfeatures_needed & ~fpu_kernel_cfg.max_features;
98 if (unlikely(feature_name)) {
99 long xfeature_idx, max_idx;
102 * So we use FLS here to be able to print the most advanced
103 * feature that was requested but is missing. So if a driver
104 * asks about "XFEATURE_MASK_SSE | XFEATURE_MASK_YMM" we'll print the
105 * missing AVX feature - this is the most informative message
108 if (xfeatures_missing)
109 xfeatures_print = xfeatures_missing;
111 xfeatures_print = xfeatures_needed;
113 xfeature_idx = fls64(xfeatures_print)-1;
114 max_idx = ARRAY_SIZE(xfeature_names)-1;
115 xfeature_idx = min(xfeature_idx, max_idx);
117 *feature_name = xfeature_names[xfeature_idx];
120 if (xfeatures_missing)
125 EXPORT_SYMBOL_GPL(cpu_has_xfeatures);
127 static bool xfeature_is_aligned64(int xfeature_nr)
129 return xstate_flags[xfeature_nr] & XSTATE_FLAG_ALIGNED64;
132 static bool xfeature_is_supervisor(int xfeature_nr)
134 return xstate_flags[xfeature_nr] & XSTATE_FLAG_SUPERVISOR;
137 static unsigned int xfeature_get_offset(u64 xcomp_bv, int xfeature)
139 unsigned int offs, i;
142 * Non-compacted format and legacy features use the cached fixed
145 if (!cpu_feature_enabled(X86_FEATURE_XSAVES) || xfeature <= XFEATURE_SSE)
146 return xstate_offsets[xfeature];
149 * Compacted format offsets depend on the actual content of the
150 * compacted xsave area which is determined by the xcomp_bv header
153 offs = FXSAVE_SIZE + XSAVE_HDR_SIZE;
154 for_each_extended_xfeature(i, xcomp_bv) {
155 if (xfeature_is_aligned64(i))
156 offs = ALIGN(offs, 64);
159 offs += xstate_sizes[i];
165 * Enable the extended processor state save/restore feature.
166 * Called once per CPU onlining.
168 void fpu__init_cpu_xstate(void)
170 if (!boot_cpu_has(X86_FEATURE_XSAVE) || !fpu_kernel_cfg.max_features)
173 cr4_set_bits(X86_CR4_OSXSAVE);
176 * Must happen after CR4 setup and before xsetbv() to allow KVM
177 * lazy passthrough. Write independent of the dynamic state static
178 * key as that does not work on the boot CPU. This also ensures
179 * that any stale state is wiped out from XFD.
181 if (cpu_feature_enabled(X86_FEATURE_XFD))
182 wrmsrl(MSR_IA32_XFD, init_fpstate.xfd);
185 * XCR_XFEATURE_ENABLED_MASK (aka. XCR0) sets user features
186 * managed by XSAVE{C, OPT, S} and XRSTOR{S}. Only XSAVE user
187 * states can be set here.
189 xsetbv(XCR_XFEATURE_ENABLED_MASK, fpu_user_cfg.max_features);
192 * MSR_IA32_XSS sets supervisor states managed by XSAVES.
194 if (boot_cpu_has(X86_FEATURE_XSAVES)) {
195 wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() |
196 xfeatures_mask_independent());
200 static bool xfeature_enabled(enum xfeature xfeature)
202 return fpu_kernel_cfg.max_features & BIT_ULL(xfeature);
206 * Record the offsets and sizes of various xstates contained
207 * in the XSAVE state memory layout.
209 static void __init setup_xstate_cache(void)
211 u32 eax, ebx, ecx, edx, i;
212 /* start at the beginning of the "extended state" */
213 unsigned int last_good_offset = offsetof(struct xregs_state,
214 extended_state_area);
216 * The FP xstates and SSE xstates are legacy states. They are always
217 * in the fixed offsets in the xsave area in either compacted form
220 xstate_offsets[XFEATURE_FP] = 0;
221 xstate_sizes[XFEATURE_FP] = offsetof(struct fxregs_state,
224 xstate_offsets[XFEATURE_SSE] = xstate_sizes[XFEATURE_FP];
225 xstate_sizes[XFEATURE_SSE] = sizeof_field(struct fxregs_state,
228 for_each_extended_xfeature(i, fpu_kernel_cfg.max_features) {
229 cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
231 xstate_sizes[i] = eax;
232 xstate_flags[i] = ecx;
235 * If an xfeature is supervisor state, the offset in EBX is
236 * invalid, leave it to -1.
238 if (xfeature_is_supervisor(i))
241 xstate_offsets[i] = ebx;
244 * In our xstate size checks, we assume that the highest-numbered
245 * xstate feature has the highest offset in the buffer. Ensure
248 WARN_ONCE(last_good_offset > xstate_offsets[i],
249 "x86/fpu: misordered xstate at %d\n", last_good_offset);
251 last_good_offset = xstate_offsets[i];
255 static void __init print_xstate_feature(u64 xstate_mask)
257 const char *feature_name;
259 if (cpu_has_xfeatures(xstate_mask, &feature_name))
260 pr_info("x86/fpu: Supporting XSAVE feature 0x%03Lx: '%s'\n", xstate_mask, feature_name);
264 * Print out all the supported xstate features:
266 static void __init print_xstate_features(void)
268 print_xstate_feature(XFEATURE_MASK_FP);
269 print_xstate_feature(XFEATURE_MASK_SSE);
270 print_xstate_feature(XFEATURE_MASK_YMM);
271 print_xstate_feature(XFEATURE_MASK_BNDREGS);
272 print_xstate_feature(XFEATURE_MASK_BNDCSR);
273 print_xstate_feature(XFEATURE_MASK_OPMASK);
274 print_xstate_feature(XFEATURE_MASK_ZMM_Hi256);
275 print_xstate_feature(XFEATURE_MASK_Hi16_ZMM);
276 print_xstate_feature(XFEATURE_MASK_PKRU);
277 print_xstate_feature(XFEATURE_MASK_PASID);
278 print_xstate_feature(XFEATURE_MASK_XTILE_CFG);
279 print_xstate_feature(XFEATURE_MASK_XTILE_DATA);
283 * This check is important because it is easy to get XSTATE_*
284 * confused with XSTATE_BIT_*.
286 #define CHECK_XFEATURE(nr) do { \
287 WARN_ON(nr < FIRST_EXTENDED_XFEATURE); \
288 WARN_ON(nr >= XFEATURE_MAX); \
292 * Print out xstate component offsets and sizes
294 static void __init print_xstate_offset_size(void)
298 for_each_extended_xfeature(i, fpu_kernel_cfg.max_features) {
299 pr_info("x86/fpu: xstate_offset[%d]: %4d, xstate_sizes[%d]: %4d\n",
300 i, xfeature_get_offset(fpu_kernel_cfg.max_features, i),
306 * This function is called only during boot time when x86 caps are not set
307 * up and alternative can not be used yet.
309 static __init void os_xrstor_booting(struct xregs_state *xstate)
311 u64 mask = fpu_kernel_cfg.max_features & XFEATURE_MASK_FPSTATE;
313 u32 hmask = mask >> 32;
316 if (cpu_feature_enabled(X86_FEATURE_XSAVES))
317 XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
319 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
322 * We should never fault when copying from a kernel buffer, and the FPU
323 * state we set at boot time should be valid.
329 * All supported features have either init state all zeros or are
330 * handled in setup_init_fpu() individually. This is an explicit
331 * feature list and does not use XFEATURE_MASK*SUPPORTED to catch
332 * newly added supported features at build time and make people
333 * actually look at the init state for the new feature.
335 #define XFEATURES_INIT_FPSTATE_HANDLED \
336 (XFEATURE_MASK_FP | \
337 XFEATURE_MASK_SSE | \
338 XFEATURE_MASK_YMM | \
339 XFEATURE_MASK_OPMASK | \
340 XFEATURE_MASK_ZMM_Hi256 | \
341 XFEATURE_MASK_Hi16_ZMM | \
342 XFEATURE_MASK_PKRU | \
343 XFEATURE_MASK_BNDREGS | \
344 XFEATURE_MASK_BNDCSR | \
345 XFEATURE_MASK_PASID | \
349 * setup the xstate image representing the init state
351 static void __init setup_init_fpu_buf(void)
353 BUILD_BUG_ON((XFEATURE_MASK_USER_SUPPORTED |
354 XFEATURE_MASK_SUPERVISOR_SUPPORTED) !=
355 XFEATURES_INIT_FPSTATE_HANDLED);
357 if (!boot_cpu_has(X86_FEATURE_XSAVE))
360 print_xstate_features();
362 xstate_init_xcomp_bv(&init_fpstate.regs.xsave, fpu_kernel_cfg.max_features);
365 * Init all the features state with header.xfeatures being 0x0
367 os_xrstor_booting(&init_fpstate.regs.xsave);
370 * All components are now in init state. Read the state back so
371 * that init_fpstate contains all non-zero init state. This only
372 * works with XSAVE, but not with XSAVEOPT and XSAVES because
373 * those use the init optimization which skips writing data for
374 * components in init state.
376 * XSAVE could be used, but that would require to reshuffle the
377 * data when XSAVES is available because XSAVES uses xstate
378 * compaction. But doing so is a pointless exercise because most
379 * components have an all zeros init state except for the legacy
380 * ones (FP and SSE). Those can be saved with FXSAVE into the
381 * legacy area. Adding new features requires to ensure that init
382 * state is all zeroes or if not to add the necessary handling
385 fxsave(&init_fpstate.regs.fxsave);
388 static int xfeature_uncompacted_offset(int xfeature_nr)
390 u32 eax, ebx, ecx, edx;
393 * Only XSAVES supports supervisor states and it uses compacted
394 * format. Checking a supervisor state's uncompacted offset is
397 if (XFEATURE_MASK_SUPERVISOR_ALL & BIT_ULL(xfeature_nr)) {
398 WARN_ONCE(1, "No fixed offset for xstate %d\n", xfeature_nr);
402 CHECK_XFEATURE(xfeature_nr);
403 cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
407 int xfeature_size(int xfeature_nr)
409 u32 eax, ebx, ecx, edx;
411 CHECK_XFEATURE(xfeature_nr);
412 cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
416 /* Validate an xstate header supplied by userspace (ptrace or sigreturn) */
417 static int validate_user_xstate_header(const struct xstate_header *hdr,
418 struct fpstate *fpstate)
420 /* No unknown or supervisor features may be set */
421 if (hdr->xfeatures & ~fpstate->user_xfeatures)
424 /* Userspace must use the uncompacted format */
429 * If 'reserved' is shrunken to add a new field, make sure to validate
430 * that new field here!
432 BUILD_BUG_ON(sizeof(hdr->reserved) != 48);
434 /* No reserved bits may be set */
435 if (memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved)))
441 static void __init __xstate_dump_leaves(void)
444 u32 eax, ebx, ecx, edx;
445 static int should_dump = 1;
451 * Dump out a few leaves past the ones that we support
452 * just in case there are some goodies up there
454 for (i = 0; i < XFEATURE_MAX + 10; i++) {
455 cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
456 pr_warn("CPUID[%02x, %02x]: eax=%08x ebx=%08x ecx=%08x edx=%08x\n",
457 XSTATE_CPUID, i, eax, ebx, ecx, edx);
461 #define XSTATE_WARN_ON(x) do { \
462 if (WARN_ONCE(x, "XSAVE consistency problem, dumping leaves")) { \
463 __xstate_dump_leaves(); \
467 #define XCHECK_SZ(sz, nr, nr_macro, __struct) do { \
468 if ((nr == nr_macro) && \
469 WARN_ONCE(sz != sizeof(__struct), \
470 "%s: struct is %zu bytes, cpu state %d bytes\n", \
471 __stringify(nr_macro), sizeof(__struct), sz)) { \
472 __xstate_dump_leaves(); \
477 * check_xtile_data_against_struct - Check tile data state size.
479 * Calculate the state size by multiplying the single tile size which is
480 * recorded in a C struct, and the number of tiles that the CPU informs.
481 * Compare the provided size with the calculation.
483 * @size: The tile data state size
485 * Returns: 0 on success, -EINVAL on mismatch.
487 static int __init check_xtile_data_against_struct(int size)
489 u32 max_palid, palid, state_size;
490 u32 eax, ebx, ecx, edx;
494 * Check the maximum palette id:
495 * eax: the highest numbered palette subleaf.
497 cpuid_count(TILE_CPUID, 0, &max_palid, &ebx, &ecx, &edx);
500 * Cross-check each tile size and find the maximum number of
503 for (palid = 1, max_tile = 0; palid <= max_palid; palid++) {
507 * Check the tile size info:
508 * eax[31:16]: bytes per title
509 * ebx[31:16]: the max names (or max number of tiles)
511 cpuid_count(TILE_CPUID, palid, &eax, &ebx, &edx, &edx);
512 tile_size = eax >> 16;
515 if (tile_size != sizeof(struct xtile_data)) {
516 pr_err("%s: struct is %zu bytes, cpu xtile %d bytes\n",
517 __stringify(XFEATURE_XTILE_DATA),
518 sizeof(struct xtile_data), tile_size);
519 __xstate_dump_leaves();
527 state_size = sizeof(struct xtile_data) * max_tile;
528 if (size != state_size) {
529 pr_err("%s: calculated size is %u bytes, cpu state %d bytes\n",
530 __stringify(XFEATURE_XTILE_DATA), state_size, size);
531 __xstate_dump_leaves();
538 * We have a C struct for each 'xstate'. We need to ensure
539 * that our software representation matches what the CPU
540 * tells us about the state's size.
542 static bool __init check_xstate_against_struct(int nr)
545 * Ask the CPU for the size of the state.
547 int sz = xfeature_size(nr);
549 * Match each CPU state with the corresponding software
552 XCHECK_SZ(sz, nr, XFEATURE_YMM, struct ymmh_struct);
553 XCHECK_SZ(sz, nr, XFEATURE_BNDREGS, struct mpx_bndreg_state);
554 XCHECK_SZ(sz, nr, XFEATURE_BNDCSR, struct mpx_bndcsr_state);
555 XCHECK_SZ(sz, nr, XFEATURE_OPMASK, struct avx_512_opmask_state);
556 XCHECK_SZ(sz, nr, XFEATURE_ZMM_Hi256, struct avx_512_zmm_uppers_state);
557 XCHECK_SZ(sz, nr, XFEATURE_Hi16_ZMM, struct avx_512_hi16_state);
558 XCHECK_SZ(sz, nr, XFEATURE_PKRU, struct pkru_state);
559 XCHECK_SZ(sz, nr, XFEATURE_PASID, struct ia32_pasid_state);
560 XCHECK_SZ(sz, nr, XFEATURE_XTILE_CFG, struct xtile_cfg);
562 /* The tile data size varies between implementations. */
563 if (nr == XFEATURE_XTILE_DATA)
564 check_xtile_data_against_struct(sz);
567 * Make *SURE* to add any feature numbers in below if
568 * there are "holes" in the xsave state component
571 if ((nr < XFEATURE_YMM) ||
572 (nr >= XFEATURE_MAX) ||
573 (nr == XFEATURE_PT_UNIMPLEMENTED_SO_FAR) ||
574 ((nr >= XFEATURE_RSRVD_COMP_11) && (nr <= XFEATURE_RSRVD_COMP_16))) {
575 WARN_ONCE(1, "no structure for xstate: %d\n", nr);
582 static unsigned int xstate_calculate_size(u64 xfeatures, bool compacted)
584 unsigned int size = FXSAVE_SIZE + XSAVE_HDR_SIZE;
587 for_each_extended_xfeature(i, xfeatures) {
588 /* Align from the end of the previous feature */
589 if (xfeature_is_aligned64(i))
590 size = ALIGN(size, 64);
592 * In compacted format the enabled features are packed,
593 * i.e. disabled features do not occupy space.
595 * In non-compacted format the offsets are fixed and
596 * disabled states still occupy space in the memory buffer.
599 size = xfeature_uncompacted_offset(i);
601 * Add the feature size even for non-compacted format
602 * to make the end result correct
604 size += xfeature_size(i);
610 * This essentially double-checks what the cpu told us about
611 * how large the XSAVE buffer needs to be. We are recalculating
614 * Independent XSAVE features allocate their own buffers and are not
615 * covered by these checks. Only the size of the buffer for task->fpu
618 static bool __init paranoid_xstate_size_valid(unsigned int kernel_size)
620 bool compacted = cpu_feature_enabled(X86_FEATURE_XSAVES);
621 unsigned int size = FXSAVE_SIZE + XSAVE_HDR_SIZE;
624 for_each_extended_xfeature(i, fpu_kernel_cfg.max_features) {
625 if (!check_xstate_against_struct(i))
628 * Supervisor state components can be managed only by
631 if (!compacted && xfeature_is_supervisor(i)) {
636 size = xstate_calculate_size(fpu_kernel_cfg.max_features, compacted);
637 XSTATE_WARN_ON(size != kernel_size);
638 return size == kernel_size;
642 * Get total size of enabled xstates in XCR0 | IA32_XSS.
644 * Note the SDM's wording here. "sub-function 0" only enumerates
645 * the size of the *user* states. If we use it to size a buffer
646 * that we use 'XSAVES' on, we could potentially overflow the
647 * buffer because 'XSAVES' saves system states too.
649 static unsigned int __init get_xsaves_size(void)
651 unsigned int eax, ebx, ecx, edx;
653 * - CPUID function 0DH, sub-function 1:
654 * EBX enumerates the size (in bytes) required by
655 * the XSAVES instruction for an XSAVE area
656 * containing all the state components
657 * corresponding to bits currently set in
660 cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
665 * Get the total size of the enabled xstates without the independent supervisor
668 static unsigned int __init get_xsaves_size_no_independent(void)
670 u64 mask = xfeatures_mask_independent();
674 return get_xsaves_size();
676 /* Disable independent features. */
677 wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor());
680 * Ask the hardware what size is required of the buffer.
681 * This is the size required for the task->fpu buffer.
683 size = get_xsaves_size();
685 /* Re-enable independent features so XSAVES will work on them again. */
686 wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | mask);
691 static unsigned int __init get_xsave_size_user(void)
693 unsigned int eax, ebx, ecx, edx;
695 * - CPUID function 0DH, sub-function 0:
696 * EBX enumerates the size (in bytes) required by
697 * the XSAVE instruction for an XSAVE area
698 * containing all the *user* state components
699 * corresponding to bits currently set in XCR0.
701 cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
706 * Will the runtime-enumerated 'xstate_size' fit in the init
707 * task's statically-allocated buffer?
709 static bool __init is_supported_xstate_size(unsigned int test_xstate_size)
711 if (test_xstate_size <= sizeof(init_fpstate.regs))
714 pr_warn("x86/fpu: xstate buffer too small (%zu < %d), disabling xsave\n",
715 sizeof(init_fpstate.regs), test_xstate_size);
719 static int __init init_xstate_size(void)
721 /* Recompute the context size for enabled features: */
722 unsigned int user_size, kernel_size, kernel_default_size;
723 bool compacted = cpu_feature_enabled(X86_FEATURE_XSAVES);
725 /* Uncompacted user space size */
726 user_size = get_xsave_size_user();
729 * XSAVES kernel size includes supervisor states and
730 * uses compacted format when available.
732 * XSAVE does not support supervisor states so
733 * kernel and user size is identical.
736 kernel_size = get_xsaves_size_no_independent();
738 kernel_size = user_size;
740 kernel_default_size =
741 xstate_calculate_size(fpu_kernel_cfg.default_features, compacted);
743 /* Ensure we have the space to store all default enabled features. */
744 if (!is_supported_xstate_size(kernel_default_size))
747 if (!paranoid_xstate_size_valid(kernel_size))
750 fpu_kernel_cfg.max_size = kernel_size;
751 fpu_user_cfg.max_size = user_size;
753 fpu_kernel_cfg.default_size = kernel_default_size;
754 fpu_user_cfg.default_size =
755 xstate_calculate_size(fpu_user_cfg.default_features, false);
761 * We enabled the XSAVE hardware, but something went wrong and
762 * we can not use it. Disable it.
764 static void __init fpu__init_disable_system_xstate(unsigned int legacy_size)
766 fpu_kernel_cfg.max_features = 0;
767 cr4_clear_bits(X86_CR4_OSXSAVE);
768 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
770 /* Restore the legacy size.*/
771 fpu_kernel_cfg.max_size = legacy_size;
772 fpu_kernel_cfg.default_size = legacy_size;
773 fpu_user_cfg.max_size = legacy_size;
774 fpu_user_cfg.default_size = legacy_size;
777 * Prevent enabling the static branch which enables writes to the
780 init_fpstate.xfd = 0;
782 fpstate_reset(¤t->thread.fpu);
786 * Enable and initialize the xsave feature.
787 * Called once per system bootup.
789 void __init fpu__init_system_xstate(unsigned int legacy_size)
791 unsigned int eax, ebx, ecx, edx;
796 if (!boot_cpu_has(X86_FEATURE_FPU)) {
797 pr_info("x86/fpu: No FPU detected\n");
801 if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
802 pr_info("x86/fpu: x87 FPU will use %s\n",
803 boot_cpu_has(X86_FEATURE_FXSR) ? "FXSAVE" : "FSAVE");
807 if (boot_cpu_data.cpuid_level < XSTATE_CPUID) {
813 * Find user xstates supported by the processor.
815 cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
816 fpu_kernel_cfg.max_features = eax + ((u64)edx << 32);
819 * Find supervisor xstates supported by the processor.
821 cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
822 fpu_kernel_cfg.max_features |= ecx + ((u64)edx << 32);
824 if ((fpu_kernel_cfg.max_features & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) {
826 * This indicates that something really unexpected happened
827 * with the enumeration. Disable XSAVE and try to continue
828 * booting without it. This is too early to BUG().
830 pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n",
831 fpu_kernel_cfg.max_features);
836 * Clear XSAVE features that are disabled in the normal CPUID.
838 for (i = 0; i < ARRAY_SIZE(xsave_cpuid_features); i++) {
839 unsigned short cid = xsave_cpuid_features[i];
841 /* Careful: X86_FEATURE_FPU is 0! */
842 if ((i != XFEATURE_FP && !cid) || !boot_cpu_has(cid))
843 fpu_kernel_cfg.max_features &= ~BIT_ULL(i);
846 if (!cpu_feature_enabled(X86_FEATURE_XFD))
847 fpu_kernel_cfg.max_features &= ~XFEATURE_MASK_USER_DYNAMIC;
849 fpu_kernel_cfg.max_features &= XFEATURE_MASK_USER_SUPPORTED |
850 XFEATURE_MASK_SUPERVISOR_SUPPORTED;
852 fpu_user_cfg.max_features = fpu_kernel_cfg.max_features;
853 fpu_user_cfg.max_features &= XFEATURE_MASK_USER_SUPPORTED;
855 /* Clean out dynamic features from default */
856 fpu_kernel_cfg.default_features = fpu_kernel_cfg.max_features;
857 fpu_kernel_cfg.default_features &= ~XFEATURE_MASK_USER_DYNAMIC;
859 fpu_user_cfg.default_features = fpu_user_cfg.max_features;
860 fpu_user_cfg.default_features &= ~XFEATURE_MASK_USER_DYNAMIC;
862 /* Store it for paranoia check at the end */
863 xfeatures = fpu_kernel_cfg.max_features;
866 * Initialize the default XFD state in initfp_state and enable the
867 * dynamic sizing mechanism if dynamic states are available. The
868 * static key cannot be enabled here because this runs before
869 * jump_label_init(). This is delayed to an initcall.
871 init_fpstate.xfd = fpu_user_cfg.max_features & XFEATURE_MASK_USER_DYNAMIC;
873 /* Enable xstate instructions to be able to continue with initialization: */
874 fpu__init_cpu_xstate();
876 /* Cache size, offset and flags for initialization */
877 setup_xstate_cache();
879 err = init_xstate_size();
883 /* Reset the state for the current task */
884 fpstate_reset(¤t->thread.fpu);
887 * Update info used for ptrace frames; use standard-format size and no
888 * supervisor xstates:
890 update_regset_xstate_info(fpu_user_cfg.max_size,
891 fpu_user_cfg.max_features);
893 setup_init_fpu_buf();
896 * Paranoia check whether something in the setup modified the
899 if (xfeatures != fpu_kernel_cfg.max_features) {
900 pr_err("x86/fpu: xfeatures modified from 0x%016llx to 0x%016llx during init, disabling XSAVE\n",
901 xfeatures, fpu_kernel_cfg.max_features);
905 print_xstate_offset_size();
906 pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
907 fpu_kernel_cfg.max_features,
908 fpu_kernel_cfg.max_size,
909 boot_cpu_has(X86_FEATURE_XSAVES) ? "compacted" : "standard");
913 /* something went wrong, try to boot without any XSAVE support */
914 fpu__init_disable_system_xstate(legacy_size);
918 * Restore minimal FPU state after suspend:
920 void fpu__resume_cpu(void)
923 * Restore XCR0 on xsave capable CPUs:
925 if (cpu_feature_enabled(X86_FEATURE_XSAVE))
926 xsetbv(XCR_XFEATURE_ENABLED_MASK, fpu_user_cfg.max_features);
929 * Restore IA32_XSS. The same CPUID bit enumerates support
930 * of XSAVES and MSR_IA32_XSS.
932 if (cpu_feature_enabled(X86_FEATURE_XSAVES)) {
933 wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() |
934 xfeatures_mask_independent());
937 if (fpu_state_size_dynamic())
938 wrmsrl(MSR_IA32_XFD, current->thread.fpu.fpstate->xfd);
942 * Given an xstate feature nr, calculate where in the xsave
943 * buffer the state is. Callers should ensure that the buffer
946 static void *__raw_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
948 u64 xcomp_bv = xsave->header.xcomp_bv;
950 if (WARN_ON_ONCE(!xfeature_enabled(xfeature_nr)))
953 if (cpu_feature_enabled(X86_FEATURE_XSAVES)) {
954 if (WARN_ON_ONCE(!(xcomp_bv & BIT_ULL(xfeature_nr))))
958 return (void *)xsave + xfeature_get_offset(xcomp_bv, xfeature_nr);
962 * Given the xsave area and a state inside, this function returns the
963 * address of the state.
965 * This is the API that is called to get xstate address in either
966 * standard format or compacted format of xsave area.
968 * Note that if there is no data for the field in the xsave buffer
969 * this will return NULL.
972 * xstate: the thread's storage area for all FPU data
973 * xfeature_nr: state which is defined in xsave.h (e.g. XFEATURE_FP,
974 * XFEATURE_SSE, etc...)
976 * address of the state in the xsave area, or NULL if the
977 * field is not present in the xsave buffer.
979 void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
982 * Do we even *have* xsave state?
984 if (!boot_cpu_has(X86_FEATURE_XSAVE))
988 * We should not ever be requesting features that we
991 if (WARN_ON_ONCE(!xfeature_enabled(xfeature_nr)))
995 * This assumes the last 'xsave*' instruction to
996 * have requested that 'xfeature_nr' be saved.
997 * If it did not, we might be seeing and old value
998 * of the field in the buffer.
1000 * This can happen because the last 'xsave' did not
1001 * request that this feature be saved (unlikely)
1002 * or because the "init optimization" caused it
1005 if (!(xsave->header.xfeatures & BIT_ULL(xfeature_nr)))
1008 return __raw_xsave_addr(xsave, xfeature_nr);
1011 #ifdef CONFIG_ARCH_HAS_PKEYS
1014 * This will go out and modify PKRU register to set the access
1015 * rights for @pkey to @init_val.
1017 int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
1018 unsigned long init_val)
1020 u32 old_pkru, new_pkru_bits = 0;
1024 * This check implies XSAVE support. OSPKE only gets
1025 * set if we enable XSAVE and we enable PKU in XCR0.
1027 if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
1031 * This code should only be called with valid 'pkey'
1032 * values originating from in-kernel users. Complain
1033 * if a bad value is observed.
1035 if (WARN_ON_ONCE(pkey >= arch_max_pkey()))
1038 /* Set the bits we need in PKRU: */
1039 if (init_val & PKEY_DISABLE_ACCESS)
1040 new_pkru_bits |= PKRU_AD_BIT;
1041 if (init_val & PKEY_DISABLE_WRITE)
1042 new_pkru_bits |= PKRU_WD_BIT;
1044 /* Shift the bits in to the correct place in PKRU for pkey: */
1045 pkey_shift = pkey * PKRU_BITS_PER_PKEY;
1046 new_pkru_bits <<= pkey_shift;
1048 /* Get old PKRU and mask off any old bits in place: */
1049 old_pkru = read_pkru();
1050 old_pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift);
1052 /* Write old part along with new part: */
1053 write_pkru(old_pkru | new_pkru_bits);
1057 #endif /* ! CONFIG_ARCH_HAS_PKEYS */
1059 static void copy_feature(bool from_xstate, struct membuf *to, void *xstate,
1060 void *init_xstate, unsigned int size)
1062 membuf_write(to, from_xstate ? xstate : init_xstate, size);
1066 * __copy_xstate_to_uabi_buf - Copy kernel saved xstate to a UABI buffer
1067 * @to: membuf descriptor
1068 * @fpstate: The fpstate buffer from which to copy
1069 * @pkru_val: The PKRU value to store in the PKRU component
1070 * @copy_mode: The requested copy mode
1072 * Converts from kernel XSAVE or XSAVES compacted format to UABI conforming
1073 * format, i.e. from the kernel internal hardware dependent storage format
1074 * to the requested @mode. UABI XSTATE is always uncompacted!
1076 * It supports partial copy but @to.pos always starts from zero.
1078 void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
1079 u32 pkru_val, enum xstate_copy_mode copy_mode)
1081 const unsigned int off_mxcsr = offsetof(struct fxregs_state, mxcsr);
1082 struct xregs_state *xinit = &init_fpstate.regs.xsave;
1083 struct xregs_state *xsave = &fpstate->regs.xsave;
1084 struct xstate_header header;
1085 unsigned int zerofrom;
1089 memset(&header, 0, sizeof(header));
1090 header.xfeatures = xsave->header.xfeatures;
1092 /* Mask out the feature bits depending on copy mode */
1093 switch (copy_mode) {
1094 case XSTATE_COPY_FP:
1095 header.xfeatures &= XFEATURE_MASK_FP;
1098 case XSTATE_COPY_FX:
1099 header.xfeatures &= XFEATURE_MASK_FP | XFEATURE_MASK_SSE;
1102 case XSTATE_COPY_XSAVE:
1103 header.xfeatures &= fpstate->user_xfeatures;
1107 /* Copy FP state up to MXCSR */
1108 copy_feature(header.xfeatures & XFEATURE_MASK_FP, &to, &xsave->i387,
1109 &xinit->i387, off_mxcsr);
1111 /* Copy MXCSR when SSE or YMM are set in the feature mask */
1112 copy_feature(header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM),
1113 &to, &xsave->i387.mxcsr, &xinit->i387.mxcsr,
1114 MXCSR_AND_FLAGS_SIZE);
1116 /* Copy the remaining FP state */
1117 copy_feature(header.xfeatures & XFEATURE_MASK_FP,
1118 &to, &xsave->i387.st_space, &xinit->i387.st_space,
1119 sizeof(xsave->i387.st_space));
1121 /* Copy the SSE state - shared with YMM, but independently managed */
1122 copy_feature(header.xfeatures & XFEATURE_MASK_SSE,
1123 &to, &xsave->i387.xmm_space, &xinit->i387.xmm_space,
1124 sizeof(xsave->i387.xmm_space));
1126 if (copy_mode != XSTATE_COPY_XSAVE)
1129 /* Zero the padding area */
1130 membuf_zero(&to, sizeof(xsave->i387.padding));
1132 /* Copy xsave->i387.sw_reserved */
1133 membuf_write(&to, xstate_fx_sw_bytes, sizeof(xsave->i387.sw_reserved));
1135 /* Copy the user space relevant state of @xsave->header */
1136 membuf_write(&to, &header, sizeof(header));
1138 zerofrom = offsetof(struct xregs_state, extended_state_area);
1141 * The ptrace buffer is in non-compacted XSAVE format. In
1142 * non-compacted format disabled features still occupy state space,
1143 * but there is no state to copy from in the compacted
1144 * init_fpstate. The gap tracking will zero these states.
1146 mask = fpstate->user_xfeatures;
1148 for_each_extended_xfeature(i, mask) {
1150 * If there was a feature or alignment gap, zero the space
1151 * in the destination buffer.
1153 if (zerofrom < xstate_offsets[i])
1154 membuf_zero(&to, xstate_offsets[i] - zerofrom);
1156 if (i == XFEATURE_PKRU) {
1157 struct pkru_state pkru = {0};
1159 * PKRU is not necessarily up to date in the
1160 * XSAVE buffer. Use the provided value.
1162 pkru.pkru = pkru_val;
1163 membuf_write(&to, &pkru, sizeof(pkru));
1165 copy_feature(header.xfeatures & BIT_ULL(i), &to,
1166 __raw_xsave_addr(xsave, i),
1167 __raw_xsave_addr(xinit, i),
1171 * Keep track of the last copied state in the non-compacted
1172 * target buffer for gap zeroing.
1174 zerofrom = xstate_offsets[i] + xstate_sizes[i];
1179 membuf_zero(&to, to.left);
1183 * copy_xstate_to_uabi_buf - Copy kernel saved xstate to a UABI buffer
1184 * @to: membuf descriptor
1185 * @tsk: The task from which to copy the saved xstate
1186 * @copy_mode: The requested copy mode
1188 * Converts from kernel XSAVE or XSAVES compacted format to UABI conforming
1189 * format, i.e. from the kernel internal hardware dependent storage format
1190 * to the requested @mode. UABI XSTATE is always uncompacted!
1192 * It supports partial copy but @to.pos always starts from zero.
1194 void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
1195 enum xstate_copy_mode copy_mode)
1197 __copy_xstate_to_uabi_buf(to, tsk->thread.fpu.fpstate,
1198 tsk->thread.pkru, copy_mode);
1201 static int copy_from_buffer(void *dst, unsigned int offset, unsigned int size,
1202 const void *kbuf, const void __user *ubuf)
1205 memcpy(dst, kbuf + offset, size);
1207 if (copy_from_user(dst, ubuf + offset, size))
1214 static int copy_uabi_to_xstate(struct fpstate *fpstate, const void *kbuf,
1215 const void __user *ubuf)
1217 struct xregs_state *xsave = &fpstate->regs.xsave;
1218 unsigned int offset, size;
1219 struct xstate_header hdr;
1223 offset = offsetof(struct xregs_state, header);
1224 if (copy_from_buffer(&hdr, offset, sizeof(hdr), kbuf, ubuf))
1227 if (validate_user_xstate_header(&hdr, fpstate))
1230 /* Validate MXCSR when any of the related features is in use */
1231 mask = XFEATURE_MASK_FP | XFEATURE_MASK_SSE | XFEATURE_MASK_YMM;
1232 if (hdr.xfeatures & mask) {
1235 offset = offsetof(struct fxregs_state, mxcsr);
1236 if (copy_from_buffer(mxcsr, offset, sizeof(mxcsr), kbuf, ubuf))
1239 /* Reserved bits in MXCSR must be zero. */
1240 if (mxcsr[0] & ~mxcsr_feature_mask)
1243 /* SSE and YMM require MXCSR even when FP is not in use. */
1244 if (!(hdr.xfeatures & XFEATURE_MASK_FP)) {
1245 xsave->i387.mxcsr = mxcsr[0];
1246 xsave->i387.mxcsr_mask = mxcsr[1];
1250 for (i = 0; i < XFEATURE_MAX; i++) {
1251 u64 mask = ((u64)1 << i);
1253 if (hdr.xfeatures & mask) {
1254 void *dst = __raw_xsave_addr(xsave, i);
1256 offset = xstate_offsets[i];
1257 size = xstate_sizes[i];
1259 if (copy_from_buffer(dst, offset, size, kbuf, ubuf))
1265 * The state that came in from userspace was user-state only.
1266 * Mask all the user states out of 'xfeatures':
1268 xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR_ALL;
1271 * Add back in the features that came in from userspace:
1273 xsave->header.xfeatures |= hdr.xfeatures;
1279 * Convert from a ptrace standard-format kernel buffer to kernel XSAVE[S]
1280 * format and copy to the target thread. Used by ptrace and KVM.
1282 int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf)
1284 return copy_uabi_to_xstate(fpstate, kbuf, NULL);
1288 * Convert from a sigreturn standard-format user-space buffer to kernel
1289 * XSAVE[S] format and copy to the target thread. This is called from the
1290 * sigreturn() and rt_sigreturn() system calls.
1292 int copy_sigframe_from_user_to_xstate(struct fpstate *fpstate,
1293 const void __user *ubuf)
1295 return copy_uabi_to_xstate(fpstate, NULL, ubuf);
1298 static bool validate_independent_components(u64 mask)
1302 if (WARN_ON_FPU(!cpu_feature_enabled(X86_FEATURE_XSAVES)))
1305 xchk = ~xfeatures_mask_independent();
1307 if (WARN_ON_ONCE(!mask || mask & xchk))
1314 * xsaves - Save selected components to a kernel xstate buffer
1315 * @xstate: Pointer to the buffer
1316 * @mask: Feature mask to select the components to save
1318 * The @xstate buffer must be 64 byte aligned and correctly initialized as
1319 * XSAVES does not write the full xstate header. Before first use the
1320 * buffer should be zeroed otherwise a consecutive XRSTORS from that buffer
1323 * The feature mask must be a subset of the independent features.
1325 void xsaves(struct xregs_state *xstate, u64 mask)
1329 if (!validate_independent_components(mask))
1332 XSTATE_OP(XSAVES, xstate, (u32)mask, (u32)(mask >> 32), err);
1337 * xrstors - Restore selected components from a kernel xstate buffer
1338 * @xstate: Pointer to the buffer
1339 * @mask: Feature mask to select the components to restore
1341 * The @xstate buffer must be 64 byte aligned and correctly initialized
1342 * otherwise XRSTORS from that buffer can #GP.
1344 * Proper usage is to restore the state which was saved with
1345 * xsaves() into @xstate.
1347 * The feature mask must be a subset of the independent features.
1349 void xrstors(struct xregs_state *xstate, u64 mask)
1353 if (!validate_independent_components(mask))
1356 XSTATE_OP(XRSTORS, xstate, (u32)mask, (u32)(mask >> 32), err);
1360 #if IS_ENABLED(CONFIG_KVM)
1361 void fpstate_clear_xstate_component(struct fpstate *fps, unsigned int xfeature)
1363 void *addr = get_xsave_addr(&fps->regs.xsave, xfeature);
1366 memset(addr, 0, xstate_sizes[xfeature]);
1368 EXPORT_SYMBOL_GPL(fpstate_clear_xstate_component);
1371 #ifdef CONFIG_X86_64
1373 #ifdef CONFIG_X86_DEBUG_FPU
1375 * Ensure that a subsequent XSAVE* or XRSTOR* instruction with RFBM=@mask
1376 * can safely operate on the @fpstate buffer.
1378 static bool xstate_op_valid(struct fpstate *fpstate, u64 mask, bool rstor)
1380 u64 xfd = __this_cpu_read(xfd_state);
1382 if (fpstate->xfd == xfd)
1386 * The XFD MSR does not match fpstate->xfd. That's invalid when
1387 * the passed in fpstate is current's fpstate.
1389 if (fpstate->xfd == current->thread.fpu.fpstate->xfd)
1393 * XRSTOR(S) from init_fpstate are always correct as it will just
1394 * bring all components into init state and not read from the
1395 * buffer. XSAVE(S) raises #PF after init.
1397 if (fpstate == &init_fpstate)
1401 * XSAVE(S): clone(), fpu_swap_kvm_fpu()
1402 * XRSTORS(S): fpu_swap_kvm_fpu()
1406 * No XSAVE/XRSTOR instructions (except XSAVE itself) touch
1407 * the buffer area for XFD-disabled state components.
1412 * Remove features which are valid in fpstate. They
1413 * have space allocated in fpstate.
1415 mask &= ~fpstate->xfeatures;
1418 * Any remaining state components in 'mask' might be written
1419 * by XSAVE/XRSTOR. Fail validation it found.
1424 void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rstor)
1426 WARN_ON_ONCE(!xstate_op_valid(fpstate, mask, rstor));
1428 #endif /* CONFIG_X86_DEBUG_FPU */
1430 static int __init xfd_update_static_branch(void)
1433 * If init_fpstate.xfd has bits set then dynamic features are
1434 * available and the dynamic sizing must be enabled.
1436 if (init_fpstate.xfd)
1437 static_branch_enable(&__fpu_state_size_dynamic);
1440 arch_initcall(xfd_update_static_branch)
1442 void fpstate_free(struct fpu *fpu)
1444 if (fpu->fpstate && fpu->fpstate != &fpu->__fpstate)
1445 vfree(fpu->fpstate);
1449 * fpstate_realloc - Reallocate struct fpstate for the requested new features
1451 * @xfeatures: A bitmap of xstate features which extend the enabled features
1453 * @ksize: The required size for the kernel buffer
1454 * @usize: The required size for user space buffers
1455 * @guest_fpu: Pointer to a guest FPU container. NULL for host allocations
1457 * Note vs. vmalloc(): If the task with a vzalloc()-allocated buffer
1458 * terminates quickly, vfree()-induced IPIs may be a concern, but tasks
1459 * with large states are likely to live longer.
1461 * Returns: 0 on success, -ENOMEM on allocation error.
1463 static int fpstate_realloc(u64 xfeatures, unsigned int ksize,
1464 unsigned int usize, struct fpu_guest *guest_fpu)
1466 struct fpu *fpu = ¤t->thread.fpu;
1467 struct fpstate *curfps, *newfps = NULL;
1468 unsigned int fpsize;
1471 fpsize = ksize + ALIGN(offsetof(struct fpstate, regs), 64);
1473 newfps = vzalloc(fpsize);
1476 newfps->size = ksize;
1477 newfps->user_size = usize;
1478 newfps->is_valloc = true;
1481 * When a guest FPU is supplied, use @guest_fpu->fpstate
1482 * as reference independent whether it is in use or not.
1484 curfps = guest_fpu ? guest_fpu->fpstate : fpu->fpstate;
1486 /* Determine whether @curfps is the active fpstate */
1487 in_use = fpu->fpstate == curfps;
1490 newfps->is_guest = true;
1491 newfps->is_confidential = curfps->is_confidential;
1492 newfps->in_use = curfps->in_use;
1493 guest_fpu->xfeatures |= xfeatures;
1494 guest_fpu->uabi_size = usize;
1499 * If @curfps is in use, ensure that the current state is in the
1500 * registers before swapping fpstate as that might invalidate it
1501 * due to layout changes.
1503 if (in_use && test_thread_flag(TIF_NEED_FPU_LOAD))
1504 fpregs_restore_userregs();
1506 newfps->xfeatures = curfps->xfeatures | xfeatures;
1509 newfps->user_xfeatures = curfps->user_xfeatures | xfeatures;
1511 newfps->xfd = curfps->xfd & ~xfeatures;
1513 /* Do the final updates within the locked region */
1514 xstate_init_xcomp_bv(&newfps->regs.xsave, newfps->xfeatures);
1517 guest_fpu->fpstate = newfps;
1518 /* If curfps is active, update the FPU fpstate pointer */
1520 fpu->fpstate = newfps;
1522 fpu->fpstate = newfps;
1526 xfd_update_state(fpu->fpstate);
1529 /* Only free valloc'ed state */
1530 if (curfps && curfps->is_valloc)
1536 static int validate_sigaltstack(unsigned int usize)
1538 struct task_struct *thread, *leader = current->group_leader;
1539 unsigned long framesize = get_sigframe_size();
1541 lockdep_assert_held(¤t->sighand->siglock);
1543 /* get_sigframe_size() is based on fpu_user_cfg.max_size */
1544 framesize -= fpu_user_cfg.max_size;
1546 for_each_thread(leader, thread) {
1547 if (thread->sas_ss_size && thread->sas_ss_size < framesize)
1553 static int __xstate_request_perm(u64 permitted, u64 requested, bool guest)
1556 * This deliberately does not exclude !XSAVES as we still might
1557 * decide to optionally context switch XCR0 or talk the silicon
1558 * vendors into extending XFD for the pre AMX states, especially
1561 bool compacted = cpu_feature_enabled(X86_FEATURE_XSAVES);
1562 struct fpu *fpu = ¤t->group_leader->thread.fpu;
1563 struct fpu_state_perm *perm;
1564 unsigned int ksize, usize;
1568 /* Check whether fully enabled */
1569 if ((permitted & requested) == requested)
1572 /* Calculate the resulting kernel state size */
1573 mask = permitted | requested;
1574 ksize = xstate_calculate_size(mask, compacted);
1576 /* Calculate the resulting user state size */
1577 mask &= XFEATURE_MASK_USER_SUPPORTED;
1578 usize = xstate_calculate_size(mask, false);
1581 ret = validate_sigaltstack(usize);
1586 perm = guest ? &fpu->guest_perm : &fpu->perm;
1587 /* Pairs with the READ_ONCE() in xstate_get_group_perm() */
1588 WRITE_ONCE(perm->__state_perm, mask);
1589 /* Protected by sighand lock */
1590 perm->__state_size = ksize;
1591 perm->__user_state_size = usize;
1596 * Permissions array to map facilities with more than one component
1598 static const u64 xstate_prctl_req[XFEATURE_MAX] = {
1599 [XFEATURE_XTILE_DATA] = XFEATURE_MASK_XTILE_DATA,
1602 static int xstate_request_perm(unsigned long idx, bool guest)
1604 u64 permitted, requested;
1607 if (idx >= XFEATURE_MAX)
1611 * Look up the facility mask which can require more than
1612 * one xstate component.
1614 idx = array_index_nospec(idx, ARRAY_SIZE(xstate_prctl_req));
1615 requested = xstate_prctl_req[idx];
1619 if ((fpu_user_cfg.max_features & requested) != requested)
1622 /* Lockless quick check */
1623 permitted = xstate_get_group_perm(guest);
1624 if ((permitted & requested) == requested)
1627 /* Protect against concurrent modifications */
1628 spin_lock_irq(¤t->sighand->siglock);
1629 permitted = xstate_get_group_perm(guest);
1631 /* First vCPU allocation locks the permissions. */
1632 if (guest && (permitted & FPU_GUEST_PERM_LOCKED))
1635 ret = __xstate_request_perm(permitted, requested, guest);
1636 spin_unlock_irq(¤t->sighand->siglock);
1640 int __xfd_enable_feature(u64 xfd_err, struct fpu_guest *guest_fpu)
1642 u64 xfd_event = xfd_err & XFEATURE_MASK_USER_DYNAMIC;
1643 struct fpu_state_perm *perm;
1644 unsigned int ksize, usize;
1649 pr_err_once("XFD: Invalid xfd error: %016llx\n", xfd_err);
1653 /* Protect against concurrent modifications */
1654 spin_lock_irq(¤t->sighand->siglock);
1656 /* If not permitted let it die */
1657 if ((xstate_get_group_perm(!!guest_fpu) & xfd_event) != xfd_event) {
1658 spin_unlock_irq(¤t->sighand->siglock);
1662 fpu = ¤t->group_leader->thread.fpu;
1663 perm = guest_fpu ? &fpu->guest_perm : &fpu->perm;
1664 ksize = perm->__state_size;
1665 usize = perm->__user_state_size;
1668 * The feature is permitted. State size is sufficient. Dropping
1669 * the lock is safe here even if more features are added from
1670 * another task, the retrieved buffer sizes are valid for the
1671 * currently requested feature(s).
1673 spin_unlock_irq(¤t->sighand->siglock);
1676 * Try to allocate a new fpstate. If that fails there is no way
1679 if (fpstate_realloc(xfd_event, ksize, usize, guest_fpu))
1684 int xfd_enable_feature(u64 xfd_err)
1686 return __xfd_enable_feature(xfd_err, NULL);
1689 #else /* CONFIG_X86_64 */
1690 static inline int xstate_request_perm(unsigned long idx, bool guest)
1694 #endif /* !CONFIG_X86_64 */
1696 u64 xstate_get_guest_group_perm(void)
1698 return xstate_get_group_perm(true);
1700 EXPORT_SYMBOL_GPL(xstate_get_guest_group_perm);
1703 * fpu_xstate_prctl - xstate permission operations
1704 * @tsk: Redundant pointer to current
1705 * @option: A subfunction of arch_prctl()
1706 * @arg2: option argument
1707 * Return: 0 if successful; otherwise, an error code
1711 * ARCH_GET_XCOMP_SUPP: Pointer to user space u64 to store the info
1712 * ARCH_GET_XCOMP_PERM: Pointer to user space u64 to store the info
1713 * ARCH_REQ_XCOMP_PERM: Facility number requested
1715 * For facilities which require more than one XSTATE component, the request
1716 * must be the highest state component number related to that facility,
1717 * e.g. for AMX which requires XFEATURE_XTILE_CFG(17) and
1718 * XFEATURE_XTILE_DATA(18) this would be XFEATURE_XTILE_DATA(18).
1720 long fpu_xstate_prctl(struct task_struct *tsk, int option, unsigned long arg2)
1722 u64 __user *uptr = (u64 __user *)arg2;
1723 u64 permitted, supported;
1724 unsigned long idx = arg2;
1731 case ARCH_GET_XCOMP_SUPP:
1732 supported = fpu_user_cfg.max_features | fpu_user_cfg.legacy_features;
1733 return put_user(supported, uptr);
1735 case ARCH_GET_XCOMP_PERM:
1737 * Lockless snapshot as it can also change right after the
1738 * dropping the lock.
1740 permitted = xstate_get_host_group_perm();
1741 permitted &= XFEATURE_MASK_USER_SUPPORTED;
1742 return put_user(permitted, uptr);
1744 case ARCH_GET_XCOMP_GUEST_PERM:
1745 permitted = xstate_get_guest_group_perm();
1746 permitted &= XFEATURE_MASK_USER_SUPPORTED;
1747 return put_user(permitted, uptr);
1749 case ARCH_REQ_XCOMP_GUEST_PERM:
1753 case ARCH_REQ_XCOMP_PERM:
1754 if (!IS_ENABLED(CONFIG_X86_64))
1757 return xstate_request_perm(idx, guest);
1764 #ifdef CONFIG_PROC_PID_ARCH_STATUS
1766 * Report the amount of time elapsed in millisecond since last AVX512
1769 static void avx512_status(struct seq_file *m, struct task_struct *task)
1771 unsigned long timestamp = READ_ONCE(task->thread.fpu.avx512_timestamp);
1776 * Report -1 if no AVX512 usage
1780 delta = (long)(jiffies - timestamp);
1782 * Cap to LONG_MAX if time difference > LONG_MAX
1786 delta = jiffies_to_msecs(delta);
1789 seq_put_decimal_ll(m, "AVX512_elapsed_ms:\t", delta);
1794 * Report architecture specific information
1796 int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
1797 struct pid *pid, struct task_struct *task)
1800 * Report AVX512 state if the processor and build option supported.
1802 if (cpu_feature_enabled(X86_FEATURE_AVX512F))
1803 avx512_status(m, task);
1807 #endif /* CONFIG_PROC_PID_ARCH_STATUS */