1 // SPDX-License-Identifier: GPL-2.0-only
3 * xsave/xrstor support.
5 * Author: Suresh Siddha <suresh.b.siddha@intel.com>
7 #include <linux/bitops.h>
8 #include <linux/compat.h>
10 #include <linux/mman.h>
11 #include <linux/pkeys.h>
12 #include <linux/seq_file.h>
13 #include <linux/proc_fs.h>
15 #include <asm/fpu/api.h>
16 #include <asm/fpu/regset.h>
17 #include <asm/fpu/signal.h>
18 #include <asm/fpu/xcr.h>
20 #include <asm/tlbflush.h>
26 #define for_each_extended_xfeature(bit, mask) \
27 (bit) = FIRST_EXTENDED_XFEATURE; \
28 for_each_set_bit_from(bit, (unsigned long *)&(mask), 8 * sizeof(mask))
31 * Although we spell it out in here, the Processor Trace
32 * xfeature is completely unused. We use other mechanisms
33 * to save/restore PT state in Linux.
35 static const char *xfeature_names[] =
37 "x87 floating point registers" ,
40 "MPX bounds registers" ,
45 "Processor Trace (unused)" ,
46 "Protection Keys User registers",
48 "unknown xstate feature" ,
51 static short xsave_cpuid_features[] __initdata = {
65 static unsigned int xstate_offsets[XFEATURE_MAX] __ro_after_init =
66 { [ 0 ... XFEATURE_MAX - 1] = -1};
67 static unsigned int xstate_sizes[XFEATURE_MAX] __ro_after_init =
68 { [ 0 ... XFEATURE_MAX - 1] = -1};
69 static unsigned int xstate_comp_offsets[XFEATURE_MAX] __ro_after_init =
70 { [ 0 ... XFEATURE_MAX - 1] = -1};
71 static unsigned int xstate_supervisor_only_offsets[XFEATURE_MAX] __ro_after_init =
72 { [ 0 ... XFEATURE_MAX - 1] = -1};
75 * Return whether the system supports a given xfeature.
77 * Also return the name of the (most advanced) feature that the caller requested:
79 int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name)
81 u64 xfeatures_missing = xfeatures_needed & ~fpu_kernel_cfg.max_features;
83 if (unlikely(feature_name)) {
84 long xfeature_idx, max_idx;
87 * So we use FLS here to be able to print the most advanced
88 * feature that was requested but is missing. So if a driver
89 * asks about "XFEATURE_MASK_SSE | XFEATURE_MASK_YMM" we'll print the
90 * missing AVX feature - this is the most informative message
93 if (xfeatures_missing)
94 xfeatures_print = xfeatures_missing;
96 xfeatures_print = xfeatures_needed;
98 xfeature_idx = fls64(xfeatures_print)-1;
99 max_idx = ARRAY_SIZE(xfeature_names)-1;
100 xfeature_idx = min(xfeature_idx, max_idx);
102 *feature_name = xfeature_names[xfeature_idx];
105 if (xfeatures_missing)
110 EXPORT_SYMBOL_GPL(cpu_has_xfeatures);
112 static bool xfeature_is_supervisor(int xfeature_nr)
115 * Extended State Enumeration Sub-leaves (EAX = 0DH, ECX = n, n > 1)
116 * returns ECX[0] set to (1) for a supervisor state, and cleared (0)
119 u32 eax, ebx, ecx, edx;
121 cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
126 * Enable the extended processor state save/restore feature.
127 * Called once per CPU onlining.
129 void fpu__init_cpu_xstate(void)
131 if (!boot_cpu_has(X86_FEATURE_XSAVE) || !fpu_kernel_cfg.max_features)
134 cr4_set_bits(X86_CR4_OSXSAVE);
137 * XCR_XFEATURE_ENABLED_MASK (aka. XCR0) sets user features
138 * managed by XSAVE{C, OPT, S} and XRSTOR{S}. Only XSAVE user
139 * states can be set here.
141 xsetbv(XCR_XFEATURE_ENABLED_MASK, fpu_user_cfg.max_features);
144 * MSR_IA32_XSS sets supervisor states managed by XSAVES.
146 if (boot_cpu_has(X86_FEATURE_XSAVES)) {
147 wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() |
148 xfeatures_mask_independent());
152 static bool xfeature_enabled(enum xfeature xfeature)
154 return fpu_kernel_cfg.max_features & BIT_ULL(xfeature);
158 * Record the offsets and sizes of various xstates contained
159 * in the XSAVE state memory layout.
161 static void __init setup_xstate_features(void)
163 u32 eax, ebx, ecx, edx, i;
164 /* start at the beginning of the "extended state" */
165 unsigned int last_good_offset = offsetof(struct xregs_state,
166 extended_state_area);
168 * The FP xstates and SSE xstates are legacy states. They are always
169 * in the fixed offsets in the xsave area in either compacted form
172 xstate_offsets[XFEATURE_FP] = 0;
173 xstate_sizes[XFEATURE_FP] = offsetof(struct fxregs_state,
176 xstate_offsets[XFEATURE_SSE] = xstate_sizes[XFEATURE_FP];
177 xstate_sizes[XFEATURE_SSE] = sizeof_field(struct fxregs_state,
180 for_each_extended_xfeature(i, fpu_kernel_cfg.max_features) {
181 cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
183 xstate_sizes[i] = eax;
186 * If an xfeature is supervisor state, the offset in EBX is
187 * invalid, leave it to -1.
189 if (xfeature_is_supervisor(i))
192 xstate_offsets[i] = ebx;
195 * In our xstate size checks, we assume that the highest-numbered
196 * xstate feature has the highest offset in the buffer. Ensure
199 WARN_ONCE(last_good_offset > xstate_offsets[i],
200 "x86/fpu: misordered xstate at %d\n", last_good_offset);
202 last_good_offset = xstate_offsets[i];
206 static void __init print_xstate_feature(u64 xstate_mask)
208 const char *feature_name;
210 if (cpu_has_xfeatures(xstate_mask, &feature_name))
211 pr_info("x86/fpu: Supporting XSAVE feature 0x%03Lx: '%s'\n", xstate_mask, feature_name);
215 * Print out all the supported xstate features:
217 static void __init print_xstate_features(void)
219 print_xstate_feature(XFEATURE_MASK_FP);
220 print_xstate_feature(XFEATURE_MASK_SSE);
221 print_xstate_feature(XFEATURE_MASK_YMM);
222 print_xstate_feature(XFEATURE_MASK_BNDREGS);
223 print_xstate_feature(XFEATURE_MASK_BNDCSR);
224 print_xstate_feature(XFEATURE_MASK_OPMASK);
225 print_xstate_feature(XFEATURE_MASK_ZMM_Hi256);
226 print_xstate_feature(XFEATURE_MASK_Hi16_ZMM);
227 print_xstate_feature(XFEATURE_MASK_PKRU);
228 print_xstate_feature(XFEATURE_MASK_PASID);
232 * This check is important because it is easy to get XSTATE_*
233 * confused with XSTATE_BIT_*.
235 #define CHECK_XFEATURE(nr) do { \
236 WARN_ON(nr < FIRST_EXTENDED_XFEATURE); \
237 WARN_ON(nr >= XFEATURE_MAX); \
241 * We could cache this like xstate_size[], but we only use
242 * it here, so it would be a waste of space.
244 static int xfeature_is_aligned(int xfeature_nr)
246 u32 eax, ebx, ecx, edx;
248 CHECK_XFEATURE(xfeature_nr);
250 if (!xfeature_enabled(xfeature_nr)) {
251 WARN_ONCE(1, "Checking alignment of disabled xfeature %d\n",
256 cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
258 * The value returned by ECX[1] indicates the alignment
259 * of state component 'i' when the compacted format
260 * of the extended region of an XSAVE area is used:
266 * This function sets up offsets and sizes of all extended states in
267 * xsave area. This supports both standard format and compacted format
270 static void __init setup_xstate_comp_offsets(void)
272 unsigned int next_offset;
276 * The FP xstates and SSE xstates are legacy states. They are always
277 * in the fixed offsets in the xsave area in either compacted form
280 xstate_comp_offsets[XFEATURE_FP] = 0;
281 xstate_comp_offsets[XFEATURE_SSE] = offsetof(struct fxregs_state,
284 if (!cpu_feature_enabled(X86_FEATURE_XSAVES)) {
285 for_each_extended_xfeature(i, fpu_kernel_cfg.max_features)
286 xstate_comp_offsets[i] = xstate_offsets[i];
290 next_offset = FXSAVE_SIZE + XSAVE_HDR_SIZE;
292 for_each_extended_xfeature(i, fpu_kernel_cfg.max_features) {
293 if (xfeature_is_aligned(i))
294 next_offset = ALIGN(next_offset, 64);
296 xstate_comp_offsets[i] = next_offset;
297 next_offset += xstate_sizes[i];
302 * Setup offsets of a supervisor-state-only XSAVES buffer:
304 * The offsets stored in xstate_comp_offsets[] only work for one specific
305 * value of the Requested Feature BitMap (RFBM). In cases where a different
306 * RFBM value is used, a different set of offsets is required. This set of
307 * offsets is for when RFBM=xfeatures_mask_supervisor().
309 static void __init setup_supervisor_only_offsets(void)
311 unsigned int next_offset;
314 next_offset = FXSAVE_SIZE + XSAVE_HDR_SIZE;
316 for_each_extended_xfeature(i, fpu_kernel_cfg.max_features) {
317 if (!xfeature_is_supervisor(i))
320 if (xfeature_is_aligned(i))
321 next_offset = ALIGN(next_offset, 64);
323 xstate_supervisor_only_offsets[i] = next_offset;
324 next_offset += xstate_sizes[i];
329 * Print out xstate component offsets and sizes
331 static void __init print_xstate_offset_size(void)
335 for_each_extended_xfeature(i, fpu_kernel_cfg.max_features) {
336 pr_info("x86/fpu: xstate_offset[%d]: %4d, xstate_sizes[%d]: %4d\n",
337 i, xstate_comp_offsets[i], i, xstate_sizes[i]);
342 * This function is called only during boot time when x86 caps are not set
343 * up and alternative can not be used yet.
345 static __init void os_xrstor_booting(struct xregs_state *xstate)
347 u64 mask = fpu_kernel_cfg.max_features & XFEATURE_MASK_FPSTATE;
349 u32 hmask = mask >> 32;
352 if (cpu_feature_enabled(X86_FEATURE_XSAVES))
353 XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
355 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
358 * We should never fault when copying from a kernel buffer, and the FPU
359 * state we set at boot time should be valid.
365 * All supported features have either init state all zeros or are
366 * handled in setup_init_fpu() individually. This is an explicit
367 * feature list and does not use XFEATURE_MASK*SUPPORTED to catch
368 * newly added supported features at build time and make people
369 * actually look at the init state for the new feature.
371 #define XFEATURES_INIT_FPSTATE_HANDLED \
372 (XFEATURE_MASK_FP | \
373 XFEATURE_MASK_SSE | \
374 XFEATURE_MASK_YMM | \
375 XFEATURE_MASK_OPMASK | \
376 XFEATURE_MASK_ZMM_Hi256 | \
377 XFEATURE_MASK_Hi16_ZMM | \
378 XFEATURE_MASK_PKRU | \
379 XFEATURE_MASK_BNDREGS | \
380 XFEATURE_MASK_BNDCSR | \
384 * setup the xstate image representing the init state
386 static void __init setup_init_fpu_buf(void)
388 BUILD_BUG_ON((XFEATURE_MASK_USER_SUPPORTED |
389 XFEATURE_MASK_SUPERVISOR_SUPPORTED) !=
390 XFEATURES_INIT_FPSTATE_HANDLED);
392 if (!boot_cpu_has(X86_FEATURE_XSAVE))
395 setup_xstate_features();
396 print_xstate_features();
398 xstate_init_xcomp_bv(&init_fpstate.regs.xsave, fpu_kernel_cfg.max_features);
401 * Init all the features state with header.xfeatures being 0x0
403 os_xrstor_booting(&init_fpstate.regs.xsave);
406 * All components are now in init state. Read the state back so
407 * that init_fpstate contains all non-zero init state. This only
408 * works with XSAVE, but not with XSAVEOPT and XSAVES because
409 * those use the init optimization which skips writing data for
410 * components in init state.
412 * XSAVE could be used, but that would require to reshuffle the
413 * data when XSAVES is available because XSAVES uses xstate
414 * compaction. But doing so is a pointless exercise because most
415 * components have an all zeros init state except for the legacy
416 * ones (FP and SSE). Those can be saved with FXSAVE into the
417 * legacy area. Adding new features requires to ensure that init
418 * state is all zeroes or if not to add the necessary handling
421 fxsave(&init_fpstate.regs.fxsave);
424 static int xfeature_uncompacted_offset(int xfeature_nr)
426 u32 eax, ebx, ecx, edx;
429 * Only XSAVES supports supervisor states and it uses compacted
430 * format. Checking a supervisor state's uncompacted offset is
433 if (XFEATURE_MASK_SUPERVISOR_ALL & BIT_ULL(xfeature_nr)) {
434 WARN_ONCE(1, "No fixed offset for xstate %d\n", xfeature_nr);
438 CHECK_XFEATURE(xfeature_nr);
439 cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
443 int xfeature_size(int xfeature_nr)
445 u32 eax, ebx, ecx, edx;
447 CHECK_XFEATURE(xfeature_nr);
448 cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
452 /* Validate an xstate header supplied by userspace (ptrace or sigreturn) */
453 static int validate_user_xstate_header(const struct xstate_header *hdr,
454 struct fpstate *fpstate)
456 /* No unknown or supervisor features may be set */
457 if (hdr->xfeatures & ~fpstate->user_xfeatures)
460 /* Userspace must use the uncompacted format */
465 * If 'reserved' is shrunken to add a new field, make sure to validate
466 * that new field here!
468 BUILD_BUG_ON(sizeof(hdr->reserved) != 48);
470 /* No reserved bits may be set */
471 if (memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved)))
477 static void __init __xstate_dump_leaves(void)
480 u32 eax, ebx, ecx, edx;
481 static int should_dump = 1;
487 * Dump out a few leaves past the ones that we support
488 * just in case there are some goodies up there
490 for (i = 0; i < XFEATURE_MAX + 10; i++) {
491 cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
492 pr_warn("CPUID[%02x, %02x]: eax=%08x ebx=%08x ecx=%08x edx=%08x\n",
493 XSTATE_CPUID, i, eax, ebx, ecx, edx);
497 #define XSTATE_WARN_ON(x) do { \
498 if (WARN_ONCE(x, "XSAVE consistency problem, dumping leaves")) { \
499 __xstate_dump_leaves(); \
503 #define XCHECK_SZ(sz, nr, nr_macro, __struct) do { \
504 if ((nr == nr_macro) && \
505 WARN_ONCE(sz != sizeof(__struct), \
506 "%s: struct is %zu bytes, cpu state %d bytes\n", \
507 __stringify(nr_macro), sizeof(__struct), sz)) { \
508 __xstate_dump_leaves(); \
513 * We have a C struct for each 'xstate'. We need to ensure
514 * that our software representation matches what the CPU
515 * tells us about the state's size.
517 static bool __init check_xstate_against_struct(int nr)
520 * Ask the CPU for the size of the state.
522 int sz = xfeature_size(nr);
524 * Match each CPU state with the corresponding software
527 XCHECK_SZ(sz, nr, XFEATURE_YMM, struct ymmh_struct);
528 XCHECK_SZ(sz, nr, XFEATURE_BNDREGS, struct mpx_bndreg_state);
529 XCHECK_SZ(sz, nr, XFEATURE_BNDCSR, struct mpx_bndcsr_state);
530 XCHECK_SZ(sz, nr, XFEATURE_OPMASK, struct avx_512_opmask_state);
531 XCHECK_SZ(sz, nr, XFEATURE_ZMM_Hi256, struct avx_512_zmm_uppers_state);
532 XCHECK_SZ(sz, nr, XFEATURE_Hi16_ZMM, struct avx_512_hi16_state);
533 XCHECK_SZ(sz, nr, XFEATURE_PKRU, struct pkru_state);
534 XCHECK_SZ(sz, nr, XFEATURE_PASID, struct ia32_pasid_state);
537 * Make *SURE* to add any feature numbers in below if
538 * there are "holes" in the xsave state component
541 if ((nr < XFEATURE_YMM) ||
542 (nr >= XFEATURE_MAX) ||
543 (nr == XFEATURE_PT_UNIMPLEMENTED_SO_FAR) ||
544 ((nr >= XFEATURE_RSRVD_COMP_11) && (nr <= XFEATURE_LBR))) {
545 WARN_ONCE(1, "no structure for xstate: %d\n", nr);
553 * This essentially double-checks what the cpu told us about
554 * how large the XSAVE buffer needs to be. We are recalculating
557 * Independent XSAVE features allocate their own buffers and are not
558 * covered by these checks. Only the size of the buffer for task->fpu
561 static bool __init paranoid_xstate_size_valid(unsigned int kernel_size)
563 bool compacted = cpu_feature_enabled(X86_FEATURE_XSAVES);
564 unsigned int size = FXSAVE_SIZE + XSAVE_HDR_SIZE;
567 for_each_extended_xfeature(i, fpu_kernel_cfg.max_features) {
568 if (!check_xstate_against_struct(i))
571 * Supervisor state components can be managed only by
574 if (!compacted && xfeature_is_supervisor(i)) {
579 /* Align from the end of the previous feature */
580 if (xfeature_is_aligned(i))
581 size = ALIGN(size, 64);
583 * In compacted format the enabled features are packed,
584 * i.e. disabled features do not occupy space.
586 * In non-compacted format the offsets are fixed and
587 * disabled states still occupy space in the memory buffer.
590 size = xfeature_uncompacted_offset(i);
592 * Add the feature size even for non-compacted format
593 * to make the end result correct
595 size += xfeature_size(i);
597 XSTATE_WARN_ON(size != kernel_size);
598 return size == kernel_size;
602 * Get total size of enabled xstates in XCR0 | IA32_XSS.
604 * Note the SDM's wording here. "sub-function 0" only enumerates
605 * the size of the *user* states. If we use it to size a buffer
606 * that we use 'XSAVES' on, we could potentially overflow the
607 * buffer because 'XSAVES' saves system states too.
609 static unsigned int __init get_xsaves_size(void)
611 unsigned int eax, ebx, ecx, edx;
613 * - CPUID function 0DH, sub-function 1:
614 * EBX enumerates the size (in bytes) required by
615 * the XSAVES instruction for an XSAVE area
616 * containing all the state components
617 * corresponding to bits currently set in
620 cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
625 * Get the total size of the enabled xstates without the independent supervisor
628 static unsigned int __init get_xsaves_size_no_independent(void)
630 u64 mask = xfeatures_mask_independent();
634 return get_xsaves_size();
636 /* Disable independent features. */
637 wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor());
640 * Ask the hardware what size is required of the buffer.
641 * This is the size required for the task->fpu buffer.
643 size = get_xsaves_size();
645 /* Re-enable independent features so XSAVES will work on them again. */
646 wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | mask);
651 static unsigned int __init get_xsave_size_user(void)
653 unsigned int eax, ebx, ecx, edx;
655 * - CPUID function 0DH, sub-function 0:
656 * EBX enumerates the size (in bytes) required by
657 * the XSAVE instruction for an XSAVE area
658 * containing all the *user* state components
659 * corresponding to bits currently set in XCR0.
661 cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
666 * Will the runtime-enumerated 'xstate_size' fit in the init
667 * task's statically-allocated buffer?
669 static bool __init is_supported_xstate_size(unsigned int test_xstate_size)
671 if (test_xstate_size <= sizeof(init_fpstate.regs))
674 pr_warn("x86/fpu: xstate buffer too small (%zu < %d), disabling xsave\n",
675 sizeof(init_fpstate.regs), test_xstate_size);
679 static int __init init_xstate_size(void)
681 /* Recompute the context size for enabled features: */
682 unsigned int user_size, kernel_size;
684 /* Uncompacted user space size */
685 user_size = get_xsave_size_user();
688 * XSAVES kernel size includes supervisor states and
689 * uses compacted format.
691 * XSAVE does not support supervisor states so
692 * kernel and user size is identical.
694 if (cpu_feature_enabled(X86_FEATURE_XSAVES))
695 kernel_size = get_xsaves_size_no_independent();
697 kernel_size = user_size;
699 /* Ensure we have the space to store all enabled features. */
700 if (!is_supported_xstate_size(kernel_size))
703 if (!paranoid_xstate_size_valid(kernel_size))
706 /* Keep it the same for now */
707 fpu_kernel_cfg.max_size = kernel_size;
708 fpu_kernel_cfg.default_size = kernel_size;
709 fpu_user_cfg.max_size = user_size;
710 fpu_user_cfg.default_size = user_size;
716 * We enabled the XSAVE hardware, but something went wrong and
717 * we can not use it. Disable it.
719 static void __init fpu__init_disable_system_xstate(unsigned int legacy_size)
721 fpu_kernel_cfg.max_features = 0;
722 cr4_clear_bits(X86_CR4_OSXSAVE);
723 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
725 /* Restore the legacy size.*/
726 fpu_kernel_cfg.max_size = legacy_size;
727 fpu_kernel_cfg.default_size = legacy_size;
728 fpu_user_cfg.max_size = legacy_size;
729 fpu_user_cfg.default_size = legacy_size;
731 fpstate_reset(¤t->thread.fpu);
735 * Enable and initialize the xsave feature.
736 * Called once per system bootup.
738 void __init fpu__init_system_xstate(unsigned int legacy_size)
740 unsigned int eax, ebx, ecx, edx;
745 if (!boot_cpu_has(X86_FEATURE_FPU)) {
746 pr_info("x86/fpu: No FPU detected\n");
750 if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
751 pr_info("x86/fpu: x87 FPU will use %s\n",
752 boot_cpu_has(X86_FEATURE_FXSR) ? "FXSAVE" : "FSAVE");
756 if (boot_cpu_data.cpuid_level < XSTATE_CPUID) {
762 * Find user xstates supported by the processor.
764 cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
765 fpu_kernel_cfg.max_features = eax + ((u64)edx << 32);
768 * Find supervisor xstates supported by the processor.
770 cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
771 fpu_kernel_cfg.max_features |= ecx + ((u64)edx << 32);
773 if ((fpu_kernel_cfg.max_features & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) {
775 * This indicates that something really unexpected happened
776 * with the enumeration. Disable XSAVE and try to continue
777 * booting without it. This is too early to BUG().
779 pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n",
780 fpu_kernel_cfg.max_features);
785 * Clear XSAVE features that are disabled in the normal CPUID.
787 for (i = 0; i < ARRAY_SIZE(xsave_cpuid_features); i++) {
788 if (!boot_cpu_has(xsave_cpuid_features[i]))
789 fpu_kernel_cfg.max_features &= ~BIT_ULL(i);
792 fpu_kernel_cfg.max_features &= XFEATURE_MASK_USER_SUPPORTED |
793 XFEATURE_MASK_SUPERVISOR_SUPPORTED;
795 fpu_user_cfg.max_features = fpu_kernel_cfg.max_features;
796 fpu_user_cfg.max_features &= XFEATURE_MASK_USER_SUPPORTED;
798 /* Identical for now */
799 fpu_kernel_cfg.default_features = fpu_kernel_cfg.max_features;
800 fpu_user_cfg.default_features = fpu_user_cfg.max_features;
802 /* Store it for paranoia check at the end */
803 xfeatures = fpu_kernel_cfg.max_features;
805 /* Enable xstate instructions to be able to continue with initialization: */
806 fpu__init_cpu_xstate();
807 err = init_xstate_size();
811 fpstate_reset(¤t->thread.fpu);
814 * Update info used for ptrace frames; use standard-format size and no
815 * supervisor xstates:
817 update_regset_xstate_info(fpu_user_cfg.max_size,
818 fpu_user_cfg.max_features);
820 fpu__init_prepare_fx_sw_frame();
821 setup_init_fpu_buf();
822 setup_xstate_comp_offsets();
823 setup_supervisor_only_offsets();
826 * Paranoia check whether something in the setup modified the
829 if (xfeatures != fpu_kernel_cfg.max_features) {
830 pr_err("x86/fpu: xfeatures modified from 0x%016llx to 0x%016llx during init, disabling XSAVE\n",
831 xfeatures, fpu_kernel_cfg.max_features);
835 print_xstate_offset_size();
836 pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
837 fpu_kernel_cfg.max_features,
838 fpu_kernel_cfg.max_size,
839 boot_cpu_has(X86_FEATURE_XSAVES) ? "compacted" : "standard");
843 /* something went wrong, try to boot without any XSAVE support */
844 fpu__init_disable_system_xstate(legacy_size);
848 * Restore minimal FPU state after suspend:
850 void fpu__resume_cpu(void)
853 * Restore XCR0 on xsave capable CPUs:
855 if (cpu_feature_enabled(X86_FEATURE_XSAVE))
856 xsetbv(XCR_XFEATURE_ENABLED_MASK, fpu_user_cfg.max_features);
859 * Restore IA32_XSS. The same CPUID bit enumerates support
860 * of XSAVES and MSR_IA32_XSS.
862 if (cpu_feature_enabled(X86_FEATURE_XSAVES)) {
863 wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() |
864 xfeatures_mask_independent());
869 * Given an xstate feature nr, calculate where in the xsave
870 * buffer the state is. Callers should ensure that the buffer
873 static void *__raw_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
875 if (!xfeature_enabled(xfeature_nr)) {
880 return (void *)xsave + xstate_comp_offsets[xfeature_nr];
883 * Given the xsave area and a state inside, this function returns the
884 * address of the state.
886 * This is the API that is called to get xstate address in either
887 * standard format or compacted format of xsave area.
889 * Note that if there is no data for the field in the xsave buffer
890 * this will return NULL.
893 * xstate: the thread's storage area for all FPU data
894 * xfeature_nr: state which is defined in xsave.h (e.g. XFEATURE_FP,
895 * XFEATURE_SSE, etc...)
897 * address of the state in the xsave area, or NULL if the
898 * field is not present in the xsave buffer.
900 void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
903 * Do we even *have* xsave state?
905 if (!boot_cpu_has(X86_FEATURE_XSAVE))
909 * We should not ever be requesting features that we
912 WARN_ONCE(!(fpu_kernel_cfg.max_features & BIT_ULL(xfeature_nr)),
913 "get of unsupported state");
915 * This assumes the last 'xsave*' instruction to
916 * have requested that 'xfeature_nr' be saved.
917 * If it did not, we might be seeing and old value
918 * of the field in the buffer.
920 * This can happen because the last 'xsave' did not
921 * request that this feature be saved (unlikely)
922 * or because the "init optimization" caused it
925 if (!(xsave->header.xfeatures & BIT_ULL(xfeature_nr)))
928 return __raw_xsave_addr(xsave, xfeature_nr);
931 #ifdef CONFIG_ARCH_HAS_PKEYS
934 * This will go out and modify PKRU register to set the access
935 * rights for @pkey to @init_val.
937 int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
938 unsigned long init_val)
940 u32 old_pkru, new_pkru_bits = 0;
944 * This check implies XSAVE support. OSPKE only gets
945 * set if we enable XSAVE and we enable PKU in XCR0.
947 if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
951 * This code should only be called with valid 'pkey'
952 * values originating from in-kernel users. Complain
953 * if a bad value is observed.
955 if (WARN_ON_ONCE(pkey >= arch_max_pkey()))
958 /* Set the bits we need in PKRU: */
959 if (init_val & PKEY_DISABLE_ACCESS)
960 new_pkru_bits |= PKRU_AD_BIT;
961 if (init_val & PKEY_DISABLE_WRITE)
962 new_pkru_bits |= PKRU_WD_BIT;
964 /* Shift the bits in to the correct place in PKRU for pkey: */
965 pkey_shift = pkey * PKRU_BITS_PER_PKEY;
966 new_pkru_bits <<= pkey_shift;
968 /* Get old PKRU and mask off any old bits in place: */
969 old_pkru = read_pkru();
970 old_pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift);
972 /* Write old part along with new part: */
973 write_pkru(old_pkru | new_pkru_bits);
977 #endif /* ! CONFIG_ARCH_HAS_PKEYS */
979 static void copy_feature(bool from_xstate, struct membuf *to, void *xstate,
980 void *init_xstate, unsigned int size)
982 membuf_write(to, from_xstate ? xstate : init_xstate, size);
986 * __copy_xstate_to_uabi_buf - Copy kernel saved xstate to a UABI buffer
987 * @to: membuf descriptor
988 * @fpstate: The fpstate buffer from which to copy
989 * @pkru_val: The PKRU value to store in the PKRU component
990 * @copy_mode: The requested copy mode
992 * Converts from kernel XSAVE or XSAVES compacted format to UABI conforming
993 * format, i.e. from the kernel internal hardware dependent storage format
994 * to the requested @mode. UABI XSTATE is always uncompacted!
996 * It supports partial copy but @to.pos always starts from zero.
998 void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
999 u32 pkru_val, enum xstate_copy_mode copy_mode)
1001 const unsigned int off_mxcsr = offsetof(struct fxregs_state, mxcsr);
1002 struct xregs_state *xinit = &init_fpstate.regs.xsave;
1003 struct xregs_state *xsave = &fpstate->regs.xsave;
1004 struct xstate_header header;
1005 unsigned int zerofrom;
1009 memset(&header, 0, sizeof(header));
1010 header.xfeatures = xsave->header.xfeatures;
1012 /* Mask out the feature bits depending on copy mode */
1013 switch (copy_mode) {
1014 case XSTATE_COPY_FP:
1015 header.xfeatures &= XFEATURE_MASK_FP;
1018 case XSTATE_COPY_FX:
1019 header.xfeatures &= XFEATURE_MASK_FP | XFEATURE_MASK_SSE;
1022 case XSTATE_COPY_XSAVE:
1023 header.xfeatures &= fpstate->user_xfeatures;
1027 /* Copy FP state up to MXCSR */
1028 copy_feature(header.xfeatures & XFEATURE_MASK_FP, &to, &xsave->i387,
1029 &xinit->i387, off_mxcsr);
1031 /* Copy MXCSR when SSE or YMM are set in the feature mask */
1032 copy_feature(header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM),
1033 &to, &xsave->i387.mxcsr, &xinit->i387.mxcsr,
1034 MXCSR_AND_FLAGS_SIZE);
1036 /* Copy the remaining FP state */
1037 copy_feature(header.xfeatures & XFEATURE_MASK_FP,
1038 &to, &xsave->i387.st_space, &xinit->i387.st_space,
1039 sizeof(xsave->i387.st_space));
1041 /* Copy the SSE state - shared with YMM, but independently managed */
1042 copy_feature(header.xfeatures & XFEATURE_MASK_SSE,
1043 &to, &xsave->i387.xmm_space, &xinit->i387.xmm_space,
1044 sizeof(xsave->i387.xmm_space));
1046 if (copy_mode != XSTATE_COPY_XSAVE)
1049 /* Zero the padding area */
1050 membuf_zero(&to, sizeof(xsave->i387.padding));
1052 /* Copy xsave->i387.sw_reserved */
1053 membuf_write(&to, xstate_fx_sw_bytes, sizeof(xsave->i387.sw_reserved));
1055 /* Copy the user space relevant state of @xsave->header */
1056 membuf_write(&to, &header, sizeof(header));
1058 zerofrom = offsetof(struct xregs_state, extended_state_area);
1061 * The ptrace buffer is in non-compacted XSAVE format. In
1062 * non-compacted format disabled features still occupy state space,
1063 * but there is no state to copy from in the compacted
1064 * init_fpstate. The gap tracking will zero these states.
1066 mask = fpstate->user_xfeatures;
1068 for_each_extended_xfeature(i, mask) {
1070 * If there was a feature or alignment gap, zero the space
1071 * in the destination buffer.
1073 if (zerofrom < xstate_offsets[i])
1074 membuf_zero(&to, xstate_offsets[i] - zerofrom);
1076 if (i == XFEATURE_PKRU) {
1077 struct pkru_state pkru = {0};
1079 * PKRU is not necessarily up to date in the
1080 * XSAVE buffer. Use the provided value.
1082 pkru.pkru = pkru_val;
1083 membuf_write(&to, &pkru, sizeof(pkru));
1085 copy_feature(header.xfeatures & BIT_ULL(i), &to,
1086 __raw_xsave_addr(xsave, i),
1087 __raw_xsave_addr(xinit, i),
1091 * Keep track of the last copied state in the non-compacted
1092 * target buffer for gap zeroing.
1094 zerofrom = xstate_offsets[i] + xstate_sizes[i];
1099 membuf_zero(&to, to.left);
1103 * copy_xstate_to_uabi_buf - Copy kernel saved xstate to a UABI buffer
1104 * @to: membuf descriptor
1105 * @tsk: The task from which to copy the saved xstate
1106 * @copy_mode: The requested copy mode
1108 * Converts from kernel XSAVE or XSAVES compacted format to UABI conforming
1109 * format, i.e. from the kernel internal hardware dependent storage format
1110 * to the requested @mode. UABI XSTATE is always uncompacted!
1112 * It supports partial copy but @to.pos always starts from zero.
1114 void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
1115 enum xstate_copy_mode copy_mode)
1117 __copy_xstate_to_uabi_buf(to, tsk->thread.fpu.fpstate,
1118 tsk->thread.pkru, copy_mode);
1121 static int copy_from_buffer(void *dst, unsigned int offset, unsigned int size,
1122 const void *kbuf, const void __user *ubuf)
1125 memcpy(dst, kbuf + offset, size);
1127 if (copy_from_user(dst, ubuf + offset, size))
1134 static int copy_uabi_to_xstate(struct fpstate *fpstate, const void *kbuf,
1135 const void __user *ubuf)
1137 struct xregs_state *xsave = &fpstate->regs.xsave;
1138 unsigned int offset, size;
1139 struct xstate_header hdr;
1143 offset = offsetof(struct xregs_state, header);
1144 if (copy_from_buffer(&hdr, offset, sizeof(hdr), kbuf, ubuf))
1147 if (validate_user_xstate_header(&hdr, fpstate))
1150 /* Validate MXCSR when any of the related features is in use */
1151 mask = XFEATURE_MASK_FP | XFEATURE_MASK_SSE | XFEATURE_MASK_YMM;
1152 if (hdr.xfeatures & mask) {
1155 offset = offsetof(struct fxregs_state, mxcsr);
1156 if (copy_from_buffer(mxcsr, offset, sizeof(mxcsr), kbuf, ubuf))
1159 /* Reserved bits in MXCSR must be zero. */
1160 if (mxcsr[0] & ~mxcsr_feature_mask)
1163 /* SSE and YMM require MXCSR even when FP is not in use. */
1164 if (!(hdr.xfeatures & XFEATURE_MASK_FP)) {
1165 xsave->i387.mxcsr = mxcsr[0];
1166 xsave->i387.mxcsr_mask = mxcsr[1];
1170 for (i = 0; i < XFEATURE_MAX; i++) {
1171 u64 mask = ((u64)1 << i);
1173 if (hdr.xfeatures & mask) {
1174 void *dst = __raw_xsave_addr(xsave, i);
1176 offset = xstate_offsets[i];
1177 size = xstate_sizes[i];
1179 if (copy_from_buffer(dst, offset, size, kbuf, ubuf))
1185 * The state that came in from userspace was user-state only.
1186 * Mask all the user states out of 'xfeatures':
1188 xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR_ALL;
1191 * Add back in the features that came in from userspace:
1193 xsave->header.xfeatures |= hdr.xfeatures;
1199 * Convert from a ptrace standard-format kernel buffer to kernel XSAVE[S]
1200 * format and copy to the target thread. Used by ptrace and KVM.
1202 int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf)
1204 return copy_uabi_to_xstate(fpstate, kbuf, NULL);
1208 * Convert from a sigreturn standard-format user-space buffer to kernel
1209 * XSAVE[S] format and copy to the target thread. This is called from the
1210 * sigreturn() and rt_sigreturn() system calls.
1212 int copy_sigframe_from_user_to_xstate(struct fpstate *fpstate,
1213 const void __user *ubuf)
1215 return copy_uabi_to_xstate(fpstate, NULL, ubuf);
1218 static bool validate_independent_components(u64 mask)
1222 if (WARN_ON_FPU(!cpu_feature_enabled(X86_FEATURE_XSAVES)))
1225 xchk = ~xfeatures_mask_independent();
1227 if (WARN_ON_ONCE(!mask || mask & xchk))
1234 * xsaves - Save selected components to a kernel xstate buffer
1235 * @xstate: Pointer to the buffer
1236 * @mask: Feature mask to select the components to save
1238 * The @xstate buffer must be 64 byte aligned and correctly initialized as
1239 * XSAVES does not write the full xstate header. Before first use the
1240 * buffer should be zeroed otherwise a consecutive XRSTORS from that buffer
1243 * The feature mask must be a subset of the independent features.
1245 void xsaves(struct xregs_state *xstate, u64 mask)
1249 if (!validate_independent_components(mask))
1252 XSTATE_OP(XSAVES, xstate, (u32)mask, (u32)(mask >> 32), err);
1257 * xrstors - Restore selected components from a kernel xstate buffer
1258 * @xstate: Pointer to the buffer
1259 * @mask: Feature mask to select the components to restore
1261 * The @xstate buffer must be 64 byte aligned and correctly initialized
1262 * otherwise XRSTORS from that buffer can #GP.
1264 * Proper usage is to restore the state which was saved with
1265 * xsaves() into @xstate.
1267 * The feature mask must be a subset of the independent features.
1269 void xrstors(struct xregs_state *xstate, u64 mask)
1273 if (!validate_independent_components(mask))
1276 XSTATE_OP(XRSTORS, xstate, (u32)mask, (u32)(mask >> 32), err);
1280 #if IS_ENABLED(CONFIG_KVM)
1281 void fpstate_clear_xstate_component(struct fpstate *fps, unsigned int xfeature)
1283 void *addr = get_xsave_addr(&fps->regs.xsave, xfeature);
1286 memset(addr, 0, xstate_sizes[xfeature]);
1288 EXPORT_SYMBOL_GPL(fpstate_clear_xstate_component);
1291 #ifdef CONFIG_PROC_PID_ARCH_STATUS
1293 * Report the amount of time elapsed in millisecond since last AVX512
1296 static void avx512_status(struct seq_file *m, struct task_struct *task)
1298 unsigned long timestamp = READ_ONCE(task->thread.fpu.avx512_timestamp);
1303 * Report -1 if no AVX512 usage
1307 delta = (long)(jiffies - timestamp);
1309 * Cap to LONG_MAX if time difference > LONG_MAX
1313 delta = jiffies_to_msecs(delta);
1316 seq_put_decimal_ll(m, "AVX512_elapsed_ms:\t", delta);
1321 * Report architecture specific information
1323 int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
1324 struct pid *pid, struct task_struct *task)
1327 * Report AVX512 state if the processor and build option supported.
1329 if (cpu_feature_enabled(X86_FEATURE_AVX512F))
1330 avx512_status(m, task);
1334 #endif /* CONFIG_PROC_PID_ARCH_STATUS */