2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
63 /* CPUID Leaf 4 constants: */
66 #define CPUID_4_TYPE_DCACHE 1
67 #define CPUID_4_TYPE_ICACHE 2
68 #define CPUID_4_TYPE_UNIFIED 3
70 #define CPUID_4_LEVEL(l) ((l) << 5)
72 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
73 #define CPUID_4_FULLY_ASSOC (1 << 9)
76 #define CPUID_4_NO_INVD_SHARING (1 << 0)
77 #define CPUID_4_INCLUSIVE (1 << 1)
78 #define CPUID_4_COMPLEX_IDX (1 << 2)
80 #define ASSOC_FULL 0xFF
82 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
83 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
93 a == ASSOC_FULL ? 0xF : \
94 0 /* invalid value */)
97 /* Definitions of the hardcoded cache entries we expose: */
100 #define L1D_LINE_SIZE 64
101 #define L1D_ASSOCIATIVITY 8
103 #define L1D_PARTITIONS 1
104 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
105 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
106 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
107 #define L1D_LINES_PER_TAG 1
108 #define L1D_SIZE_KB_AMD 64
109 #define L1D_ASSOCIATIVITY_AMD 2
111 /* L1 instruction cache: */
112 #define L1I_LINE_SIZE 64
113 #define L1I_ASSOCIATIVITY 8
115 #define L1I_PARTITIONS 1
116 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
117 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
118 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
119 #define L1I_LINES_PER_TAG 1
120 #define L1I_SIZE_KB_AMD 64
121 #define L1I_ASSOCIATIVITY_AMD 2
123 /* Level 2 unified cache: */
124 #define L2_LINE_SIZE 64
125 #define L2_ASSOCIATIVITY 16
127 #define L2_PARTITIONS 1
128 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
129 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
130 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
131 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
132 #define L2_LINES_PER_TAG 1
133 #define L2_SIZE_KB_AMD 512
135 /* Level 3 unified cache: */
136 #define L3_SIZE_KB 0 /* disabled */
137 #define L3_ASSOCIATIVITY 0 /* disabled */
138 #define L3_LINES_PER_TAG 0 /* disabled */
139 #define L3_LINE_SIZE 0 /* disabled */
140 #define L3_N_LINE_SIZE 64
141 #define L3_N_ASSOCIATIVITY 16
142 #define L3_N_SETS 16384
143 #define L3_N_PARTITIONS 1
144 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
145 #define L3_N_LINES_PER_TAG 1
146 #define L3_N_SIZE_KB_AMD 16384
148 /* TLB definitions: */
150 #define L1_DTLB_2M_ASSOC 1
151 #define L1_DTLB_2M_ENTRIES 255
152 #define L1_DTLB_4K_ASSOC 1
153 #define L1_DTLB_4K_ENTRIES 255
155 #define L1_ITLB_2M_ASSOC 1
156 #define L1_ITLB_2M_ENTRIES 255
157 #define L1_ITLB_4K_ASSOC 1
158 #define L1_ITLB_4K_ENTRIES 255
160 #define L2_DTLB_2M_ASSOC 0 /* disabled */
161 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
162 #define L2_DTLB_4K_ASSOC 4
163 #define L2_DTLB_4K_ENTRIES 512
165 #define L2_ITLB_2M_ASSOC 0 /* disabled */
166 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
167 #define L2_ITLB_4K_ASSOC 4
168 #define L2_ITLB_4K_ENTRIES 512
172 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
173 uint32_t vendor2, uint32_t vendor3)
176 for (i = 0; i < 4; i++) {
177 dst[i] = vendor1 >> (8 * i);
178 dst[i + 4] = vendor2 >> (8 * i);
179 dst[i + 8] = vendor3 >> (8 * i);
181 dst[CPUID_VENDOR_SZ] = '\0';
184 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
185 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
186 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
187 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
188 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
189 CPUID_PSE36 | CPUID_FXSR)
190 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
191 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
192 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
193 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
194 CPUID_PAE | CPUID_SEP | CPUID_APIC)
196 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
197 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
198 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
199 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
200 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
201 /* partly implemented:
202 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
204 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
205 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
206 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
207 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
208 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
209 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
211 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
212 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
213 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
214 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
215 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
218 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
220 #define TCG_EXT2_X86_64_FEATURES 0
223 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
224 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
225 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
226 TCG_EXT2_X86_64_FEATURES)
227 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
228 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
229 #define TCG_EXT4_FEATURES 0
230 #define TCG_SVM_FEATURES 0
231 #define TCG_KVM_FEATURES 0
232 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
233 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
234 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
235 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
238 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
239 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
240 CPUID_7_0_EBX_RDSEED */
241 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
242 #define TCG_APM_FEATURES 0
243 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
244 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
246 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
248 typedef struct FeatureWordInfo {
249 /* feature flags names are taken from "Intel Processor Identification and
250 * the CPUID Instruction" and AMD's "CPUID Specification".
251 * In cases of disagreement between feature naming conventions,
252 * aliases may be added.
254 const char *feat_names[32];
255 uint32_t cpuid_eax; /* Input EAX for CPUID */
256 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
257 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
258 int cpuid_reg; /* output register (R_* constant) */
259 uint32_t tcg_features; /* Feature flags supported by TCG */
260 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
261 uint32_t migratable_flags; /* Feature flags known to be migratable */
264 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
267 "fpu", "vme", "de", "pse",
268 "tsc", "msr", "pae", "mce",
269 "cx8", "apic", NULL, "sep",
270 "mtrr", "pge", "mca", "cmov",
271 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
272 NULL, "ds" /* Intel dts */, "acpi", "mmx",
273 "fxsr", "sse", "sse2", "ss",
274 "ht" /* Intel htt */, "tm", "ia64", "pbe",
276 .cpuid_eax = 1, .cpuid_reg = R_EDX,
277 .tcg_features = TCG_FEATURES,
281 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
282 "ds-cpl", "vmx", "smx", "est",
283 "tm2", "ssse3", "cid", NULL,
284 "fma", "cx16", "xtpr", "pdcm",
285 NULL, "pcid", "dca", "sse4.1",
286 "sse4.2", "x2apic", "movbe", "popcnt",
287 "tsc-deadline", "aes", "xsave", "osxsave",
288 "avx", "f16c", "rdrand", "hypervisor",
290 .cpuid_eax = 1, .cpuid_reg = R_ECX,
291 .tcg_features = TCG_EXT_FEATURES,
293 /* Feature names that are already defined on feature_name[] but
294 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
295 * names on feat_names below. They are copied automatically
296 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
298 [FEAT_8000_0001_EDX] = {
300 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
301 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
302 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
303 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
304 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
305 "nx", NULL, "mmxext", NULL /* mmx */,
306 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
307 NULL, "lm", "3dnowext", "3dnow",
309 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
310 .tcg_features = TCG_EXT2_FEATURES,
312 [FEAT_8000_0001_ECX] = {
314 "lahf-lm", "cmp-legacy", "svm", "extapic",
315 "cr8legacy", "abm", "sse4a", "misalignsse",
316 "3dnowprefetch", "osvw", "ibs", "xop",
317 "skinit", "wdt", NULL, "lwp",
318 "fma4", "tce", NULL, "nodeid-msr",
319 NULL, "tbm", "topoext", "perfctr-core",
320 "perfctr-nb", NULL, NULL, NULL,
321 NULL, NULL, NULL, NULL,
323 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
324 .tcg_features = TCG_EXT3_FEATURES,
326 [FEAT_C000_0001_EDX] = {
328 NULL, NULL, "xstore", "xstore-en",
329 NULL, NULL, "xcrypt", "xcrypt-en",
330 "ace2", "ace2-en", "phe", "phe-en",
331 "pmm", "pmm-en", NULL, NULL,
332 NULL, NULL, NULL, NULL,
333 NULL, NULL, NULL, NULL,
334 NULL, NULL, NULL, NULL,
335 NULL, NULL, NULL, NULL,
337 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
338 .tcg_features = TCG_EXT4_FEATURES,
342 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
343 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
344 NULL, NULL, NULL, NULL,
345 NULL, NULL, NULL, NULL,
346 NULL, NULL, NULL, NULL,
347 NULL, NULL, NULL, NULL,
348 "kvmclock-stable-bit", NULL, NULL, NULL,
349 NULL, NULL, NULL, NULL,
351 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
352 .tcg_features = TCG_KVM_FEATURES,
354 [FEAT_HYPERV_EAX] = {
356 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
357 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
358 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
359 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
360 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
361 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
362 NULL, NULL, NULL, NULL,
363 NULL, NULL, NULL, NULL,
364 NULL, NULL, NULL, NULL,
365 NULL, NULL, NULL, NULL,
366 NULL, NULL, NULL, NULL,
368 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
370 [FEAT_HYPERV_EBX] = {
372 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
373 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
374 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
375 NULL /* hv_create_port */, NULL /* hv_connect_port */,
376 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
377 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
379 NULL, NULL, NULL, NULL,
380 NULL, NULL, NULL, NULL,
381 NULL, NULL, NULL, NULL,
382 NULL, NULL, NULL, NULL,
384 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
386 [FEAT_HYPERV_EDX] = {
388 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
389 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
390 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
392 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
393 NULL, NULL, NULL, NULL,
394 NULL, NULL, NULL, NULL,
395 NULL, NULL, NULL, NULL,
396 NULL, NULL, NULL, NULL,
397 NULL, NULL, NULL, NULL,
399 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
403 "npt", "lbrv", "svm-lock", "nrip-save",
404 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
405 NULL, NULL, "pause-filter", NULL,
406 "pfthreshold", NULL, NULL, NULL,
407 NULL, NULL, NULL, NULL,
408 NULL, NULL, NULL, NULL,
409 NULL, NULL, NULL, NULL,
410 NULL, NULL, NULL, NULL,
412 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
413 .tcg_features = TCG_SVM_FEATURES,
417 "fsgsbase", "tsc-adjust", NULL, "bmi1",
418 "hle", "avx2", NULL, "smep",
419 "bmi2", "erms", "invpcid", "rtm",
420 NULL, NULL, "mpx", NULL,
421 "avx512f", "avx512dq", "rdseed", "adx",
422 "smap", "avx512ifma", "pcommit", "clflushopt",
423 "clwb", NULL, "avx512pf", "avx512er",
424 "avx512cd", NULL, "avx512bw", "avx512vl",
427 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
429 .tcg_features = TCG_7_0_EBX_FEATURES,
433 NULL, "avx512vbmi", "umip", "pku",
434 "ospke", NULL, NULL, NULL,
435 NULL, NULL, NULL, NULL,
436 NULL, NULL, NULL, NULL,
437 NULL, NULL, NULL, NULL,
438 NULL, NULL, "rdpid", NULL,
439 NULL, NULL, NULL, NULL,
440 NULL, NULL, NULL, NULL,
443 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
445 .tcg_features = TCG_7_0_ECX_FEATURES,
447 [FEAT_8000_0007_EDX] = {
449 NULL, NULL, NULL, NULL,
450 NULL, NULL, NULL, NULL,
451 "invtsc", NULL, NULL, NULL,
452 NULL, NULL, NULL, NULL,
453 NULL, NULL, NULL, NULL,
454 NULL, NULL, NULL, NULL,
455 NULL, NULL, NULL, NULL,
456 NULL, NULL, NULL, NULL,
458 .cpuid_eax = 0x80000007,
460 .tcg_features = TCG_APM_FEATURES,
461 .unmigratable_flags = CPUID_APM_INVTSC,
465 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
466 NULL, NULL, NULL, NULL,
467 NULL, NULL, NULL, NULL,
468 NULL, NULL, NULL, NULL,
469 NULL, NULL, NULL, NULL,
470 NULL, NULL, NULL, NULL,
471 NULL, NULL, NULL, NULL,
472 NULL, NULL, NULL, NULL,
475 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
477 .tcg_features = TCG_XSAVE_FEATURES,
481 NULL, NULL, "arat", NULL,
482 NULL, NULL, NULL, NULL,
483 NULL, NULL, NULL, NULL,
484 NULL, NULL, NULL, NULL,
485 NULL, NULL, NULL, NULL,
486 NULL, NULL, NULL, NULL,
487 NULL, NULL, NULL, NULL,
488 NULL, NULL, NULL, NULL,
490 .cpuid_eax = 6, .cpuid_reg = R_EAX,
491 .tcg_features = TCG_6_EAX_FEATURES,
493 [FEAT_XSAVE_COMP_LO] = {
495 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
498 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
499 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
500 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
503 [FEAT_XSAVE_COMP_HI] = {
505 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
511 typedef struct X86RegisterInfo32 {
512 /* Name of register */
514 /* QAPI enum value register */
515 X86CPURegister32 qapi_enum;
518 #define REGISTER(reg) \
519 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
520 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
532 typedef struct ExtSaveArea {
533 uint32_t feature, bits;
534 uint32_t offset, size;
537 static const ExtSaveArea x86_ext_save_areas[] = {
539 /* x87 FP state component is always enabled if XSAVE is supported */
540 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
541 /* x87 state is in the legacy region of the XSAVE area */
543 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
546 /* SSE state component is always enabled if XSAVE is supported */
547 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
548 /* SSE state is in the legacy region of the XSAVE area */
550 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
553 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
554 .offset = offsetof(X86XSaveArea, avx_state),
555 .size = sizeof(XSaveAVX) },
556 [XSTATE_BNDREGS_BIT] =
557 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
558 .offset = offsetof(X86XSaveArea, bndreg_state),
559 .size = sizeof(XSaveBNDREG) },
560 [XSTATE_BNDCSR_BIT] =
561 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
562 .offset = offsetof(X86XSaveArea, bndcsr_state),
563 .size = sizeof(XSaveBNDCSR) },
564 [XSTATE_OPMASK_BIT] =
565 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
566 .offset = offsetof(X86XSaveArea, opmask_state),
567 .size = sizeof(XSaveOpmask) },
568 [XSTATE_ZMM_Hi256_BIT] =
569 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
570 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
571 .size = sizeof(XSaveZMM_Hi256) },
572 [XSTATE_Hi16_ZMM_BIT] =
573 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
574 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
575 .size = sizeof(XSaveHi16_ZMM) },
577 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
578 .offset = offsetof(X86XSaveArea, pkru_state),
579 .size = sizeof(XSavePKRU) },
582 static uint32_t xsave_area_size(uint64_t mask)
587 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
588 const ExtSaveArea *esa = &x86_ext_save_areas[i];
589 if ((mask >> i) & 1) {
590 ret = MAX(ret, esa->offset + esa->size);
596 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
598 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
599 cpu->env.features[FEAT_XSAVE_COMP_LO];
602 const char *get_register_name_32(unsigned int reg)
604 if (reg >= CPU_NB_REGS32) {
607 return x86_reg_info_32[reg].name;
611 * Returns the set of feature flags that are supported and migratable by
612 * QEMU, for a given FeatureWord.
614 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
616 FeatureWordInfo *wi = &feature_word_info[w];
620 for (i = 0; i < 32; i++) {
621 uint32_t f = 1U << i;
623 /* If the feature name is known, it is implicitly considered migratable,
624 * unless it is explicitly set in unmigratable_flags */
625 if ((wi->migratable_flags & f) ||
626 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
633 void host_cpuid(uint32_t function, uint32_t count,
634 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
640 : "=a"(vec[0]), "=b"(vec[1]),
641 "=c"(vec[2]), "=d"(vec[3])
642 : "0"(function), "c"(count) : "cc");
643 #elif defined(__i386__)
644 asm volatile("pusha \n\t"
646 "mov %%eax, 0(%2) \n\t"
647 "mov %%ebx, 4(%2) \n\t"
648 "mov %%ecx, 8(%2) \n\t"
649 "mov %%edx, 12(%2) \n\t"
651 : : "a"(function), "c"(count), "S"(vec)
667 /* CPU class name definitions: */
669 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
670 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
672 /* Return type name for a given CPU model name
673 * Caller is responsible for freeing the returned string.
675 static char *x86_cpu_type_name(const char *model_name)
677 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
680 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
685 if (cpu_model == NULL) {
689 typename = x86_cpu_type_name(cpu_model);
690 oc = object_class_by_name(typename);
695 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
697 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
698 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
699 return g_strndup(class_name,
700 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
703 struct X86CPUDefinition {
707 /* vendor is zero-terminated, 12 character ASCII string */
708 char vendor[CPUID_VENDOR_SZ + 1];
712 FeatureWordArray features;
716 static X86CPUDefinition builtin_x86_defs[] = {
720 .vendor = CPUID_VENDOR_AMD,
724 .features[FEAT_1_EDX] =
726 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
728 .features[FEAT_1_ECX] =
729 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
730 .features[FEAT_8000_0001_EDX] =
731 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
732 .features[FEAT_8000_0001_ECX] =
733 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
734 .xlevel = 0x8000000A,
735 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
740 .vendor = CPUID_VENDOR_AMD,
744 /* Missing: CPUID_HT */
745 .features[FEAT_1_EDX] =
747 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
748 CPUID_PSE36 | CPUID_VME,
749 .features[FEAT_1_ECX] =
750 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
752 .features[FEAT_8000_0001_EDX] =
753 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
754 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
755 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
756 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
758 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
759 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
760 .features[FEAT_8000_0001_ECX] =
761 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
762 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
763 /* Missing: CPUID_SVM_LBRV */
764 .features[FEAT_SVM] =
766 .xlevel = 0x8000001A,
767 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
772 .vendor = CPUID_VENDOR_INTEL,
776 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
777 .features[FEAT_1_EDX] =
779 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
780 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
781 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
782 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
783 .features[FEAT_1_ECX] =
784 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
786 .features[FEAT_8000_0001_EDX] =
787 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
788 .features[FEAT_8000_0001_ECX] =
790 .xlevel = 0x80000008,
791 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
796 .vendor = CPUID_VENDOR_INTEL,
800 /* Missing: CPUID_HT */
801 .features[FEAT_1_EDX] =
802 PPRO_FEATURES | CPUID_VME |
803 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
805 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
806 .features[FEAT_1_ECX] =
807 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
808 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
809 .features[FEAT_8000_0001_EDX] =
810 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
811 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
812 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
813 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
814 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
815 .features[FEAT_8000_0001_ECX] =
817 .xlevel = 0x80000008,
818 .model_id = "Common KVM processor"
823 .vendor = CPUID_VENDOR_INTEL,
827 .features[FEAT_1_EDX] =
829 .features[FEAT_1_ECX] =
831 .xlevel = 0x80000004,
832 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
837 .vendor = CPUID_VENDOR_INTEL,
841 .features[FEAT_1_EDX] =
842 PPRO_FEATURES | CPUID_VME |
843 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
844 .features[FEAT_1_ECX] =
846 .features[FEAT_8000_0001_ECX] =
848 .xlevel = 0x80000008,
849 .model_id = "Common 32-bit KVM processor"
854 .vendor = CPUID_VENDOR_INTEL,
858 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
859 .features[FEAT_1_EDX] =
860 PPRO_FEATURES | CPUID_VME |
861 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
863 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
864 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
865 .features[FEAT_1_ECX] =
866 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
867 .features[FEAT_8000_0001_EDX] =
869 .xlevel = 0x80000008,
870 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
875 .vendor = CPUID_VENDOR_INTEL,
879 .features[FEAT_1_EDX] =
886 .vendor = CPUID_VENDOR_INTEL,
890 .features[FEAT_1_EDX] =
897 .vendor = CPUID_VENDOR_INTEL,
901 .features[FEAT_1_EDX] =
908 .vendor = CPUID_VENDOR_INTEL,
912 .features[FEAT_1_EDX] =
919 .vendor = CPUID_VENDOR_AMD,
923 .features[FEAT_1_EDX] =
924 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
926 .features[FEAT_8000_0001_EDX] =
927 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
928 .xlevel = 0x80000008,
929 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
934 .vendor = CPUID_VENDOR_INTEL,
938 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
939 .features[FEAT_1_EDX] =
941 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
942 CPUID_ACPI | CPUID_SS,
943 /* Some CPUs got no CPUID_SEP */
944 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
946 .features[FEAT_1_ECX] =
947 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
949 .features[FEAT_8000_0001_EDX] =
951 .features[FEAT_8000_0001_ECX] =
953 .xlevel = 0x80000008,
954 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
959 .vendor = CPUID_VENDOR_INTEL,
963 .features[FEAT_1_EDX] =
964 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
965 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
966 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
967 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
968 CPUID_DE | CPUID_FP87,
969 .features[FEAT_1_ECX] =
970 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
971 .features[FEAT_8000_0001_EDX] =
972 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
973 .features[FEAT_8000_0001_ECX] =
975 .xlevel = 0x80000008,
976 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
981 .vendor = CPUID_VENDOR_INTEL,
985 .features[FEAT_1_EDX] =
986 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
987 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
988 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
989 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
990 CPUID_DE | CPUID_FP87,
991 .features[FEAT_1_ECX] =
992 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
994 .features[FEAT_8000_0001_EDX] =
995 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
996 .features[FEAT_8000_0001_ECX] =
998 .xlevel = 0x80000008,
999 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1004 .vendor = CPUID_VENDOR_INTEL,
1008 .features[FEAT_1_EDX] =
1009 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1010 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1011 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1012 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1013 CPUID_DE | CPUID_FP87,
1014 .features[FEAT_1_ECX] =
1015 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1016 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1017 .features[FEAT_8000_0001_EDX] =
1018 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1019 .features[FEAT_8000_0001_ECX] =
1021 .xlevel = 0x80000008,
1022 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1027 .vendor = CPUID_VENDOR_INTEL,
1031 .features[FEAT_1_EDX] =
1032 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1033 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1034 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1035 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1036 CPUID_DE | CPUID_FP87,
1037 .features[FEAT_1_ECX] =
1038 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1039 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1040 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1041 .features[FEAT_8000_0001_EDX] =
1042 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1043 .features[FEAT_8000_0001_ECX] =
1045 .features[FEAT_6_EAX] =
1047 .xlevel = 0x80000008,
1048 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1051 .name = "SandyBridge",
1053 .vendor = CPUID_VENDOR_INTEL,
1057 .features[FEAT_1_EDX] =
1058 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1059 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1060 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1061 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1062 CPUID_DE | CPUID_FP87,
1063 .features[FEAT_1_ECX] =
1064 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1065 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1066 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1067 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1069 .features[FEAT_8000_0001_EDX] =
1070 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1072 .features[FEAT_8000_0001_ECX] =
1074 .features[FEAT_XSAVE] =
1075 CPUID_XSAVE_XSAVEOPT,
1076 .features[FEAT_6_EAX] =
1078 .xlevel = 0x80000008,
1079 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1082 .name = "IvyBridge",
1084 .vendor = CPUID_VENDOR_INTEL,
1088 .features[FEAT_1_EDX] =
1089 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1090 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1091 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1092 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1093 CPUID_DE | CPUID_FP87,
1094 .features[FEAT_1_ECX] =
1095 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1096 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1097 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1098 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1099 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1100 .features[FEAT_7_0_EBX] =
1101 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1103 .features[FEAT_8000_0001_EDX] =
1104 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1106 .features[FEAT_8000_0001_ECX] =
1108 .features[FEAT_XSAVE] =
1109 CPUID_XSAVE_XSAVEOPT,
1110 .features[FEAT_6_EAX] =
1112 .xlevel = 0x80000008,
1113 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1116 .name = "Haswell-noTSX",
1118 .vendor = CPUID_VENDOR_INTEL,
1122 .features[FEAT_1_EDX] =
1123 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1124 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1125 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1126 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1127 CPUID_DE | CPUID_FP87,
1128 .features[FEAT_1_ECX] =
1129 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1130 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1131 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1132 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1133 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1134 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1135 .features[FEAT_8000_0001_EDX] =
1136 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1138 .features[FEAT_8000_0001_ECX] =
1139 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1140 .features[FEAT_7_0_EBX] =
1141 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1142 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1143 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1144 .features[FEAT_XSAVE] =
1145 CPUID_XSAVE_XSAVEOPT,
1146 .features[FEAT_6_EAX] =
1148 .xlevel = 0x80000008,
1149 .model_id = "Intel Core Processor (Haswell, no TSX)",
1153 .vendor = CPUID_VENDOR_INTEL,
1157 .features[FEAT_1_EDX] =
1158 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1159 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1160 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1161 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1162 CPUID_DE | CPUID_FP87,
1163 .features[FEAT_1_ECX] =
1164 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1165 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1166 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1167 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1168 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1169 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1170 .features[FEAT_8000_0001_EDX] =
1171 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1173 .features[FEAT_8000_0001_ECX] =
1174 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1175 .features[FEAT_7_0_EBX] =
1176 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1177 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1178 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1180 .features[FEAT_XSAVE] =
1181 CPUID_XSAVE_XSAVEOPT,
1182 .features[FEAT_6_EAX] =
1184 .xlevel = 0x80000008,
1185 .model_id = "Intel Core Processor (Haswell)",
1188 .name = "Broadwell-noTSX",
1190 .vendor = CPUID_VENDOR_INTEL,
1194 .features[FEAT_1_EDX] =
1195 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1196 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1197 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1198 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1199 CPUID_DE | CPUID_FP87,
1200 .features[FEAT_1_ECX] =
1201 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1202 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1203 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1204 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1205 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1206 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1207 .features[FEAT_8000_0001_EDX] =
1208 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1210 .features[FEAT_8000_0001_ECX] =
1211 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1212 .features[FEAT_7_0_EBX] =
1213 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1214 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1215 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1216 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1218 .features[FEAT_XSAVE] =
1219 CPUID_XSAVE_XSAVEOPT,
1220 .features[FEAT_6_EAX] =
1222 .xlevel = 0x80000008,
1223 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1226 .name = "Broadwell",
1228 .vendor = CPUID_VENDOR_INTEL,
1232 .features[FEAT_1_EDX] =
1233 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1234 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1235 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1236 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1237 CPUID_DE | CPUID_FP87,
1238 .features[FEAT_1_ECX] =
1239 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1240 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1241 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1242 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1243 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1244 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1245 .features[FEAT_8000_0001_EDX] =
1246 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1248 .features[FEAT_8000_0001_ECX] =
1249 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1250 .features[FEAT_7_0_EBX] =
1251 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1252 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1253 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1254 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1256 .features[FEAT_XSAVE] =
1257 CPUID_XSAVE_XSAVEOPT,
1258 .features[FEAT_6_EAX] =
1260 .xlevel = 0x80000008,
1261 .model_id = "Intel Core Processor (Broadwell)",
1264 .name = "Skylake-Client",
1266 .vendor = CPUID_VENDOR_INTEL,
1270 .features[FEAT_1_EDX] =
1271 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1272 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1273 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1274 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1275 CPUID_DE | CPUID_FP87,
1276 .features[FEAT_1_ECX] =
1277 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1278 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1279 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1280 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1281 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1282 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1283 .features[FEAT_8000_0001_EDX] =
1284 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1286 .features[FEAT_8000_0001_ECX] =
1287 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1288 .features[FEAT_7_0_EBX] =
1289 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1290 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1291 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1292 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1293 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1294 /* Missing: XSAVES (not supported by some Linux versions,
1295 * including v4.1 to v4.6).
1296 * KVM doesn't yet expose any XSAVES state save component,
1297 * and the only one defined in Skylake (processor tracing)
1298 * probably will block migration anyway.
1300 .features[FEAT_XSAVE] =
1301 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1302 CPUID_XSAVE_XGETBV1,
1303 .features[FEAT_6_EAX] =
1305 .xlevel = 0x80000008,
1306 .model_id = "Intel Core Processor (Skylake)",
1309 .name = "Opteron_G1",
1311 .vendor = CPUID_VENDOR_AMD,
1315 .features[FEAT_1_EDX] =
1316 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1317 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1318 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1319 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1320 CPUID_DE | CPUID_FP87,
1321 .features[FEAT_1_ECX] =
1323 .features[FEAT_8000_0001_EDX] =
1324 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1325 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1326 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1327 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1328 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1329 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1330 .xlevel = 0x80000008,
1331 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1334 .name = "Opteron_G2",
1336 .vendor = CPUID_VENDOR_AMD,
1340 .features[FEAT_1_EDX] =
1341 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1342 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1343 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1344 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1345 CPUID_DE | CPUID_FP87,
1346 .features[FEAT_1_ECX] =
1347 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1348 /* Missing: CPUID_EXT2_RDTSCP */
1349 .features[FEAT_8000_0001_EDX] =
1350 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1351 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1352 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1353 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1354 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1355 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1356 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1357 .features[FEAT_8000_0001_ECX] =
1358 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1359 .xlevel = 0x80000008,
1360 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1363 .name = "Opteron_G3",
1365 .vendor = CPUID_VENDOR_AMD,
1369 .features[FEAT_1_EDX] =
1370 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1371 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1372 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1373 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1374 CPUID_DE | CPUID_FP87,
1375 .features[FEAT_1_ECX] =
1376 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1378 /* Missing: CPUID_EXT2_RDTSCP */
1379 .features[FEAT_8000_0001_EDX] =
1380 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1381 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1382 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1383 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1384 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1385 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1386 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1387 .features[FEAT_8000_0001_ECX] =
1388 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1389 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1390 .xlevel = 0x80000008,
1391 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1394 .name = "Opteron_G4",
1396 .vendor = CPUID_VENDOR_AMD,
1400 .features[FEAT_1_EDX] =
1401 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1402 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1403 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1404 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1405 CPUID_DE | CPUID_FP87,
1406 .features[FEAT_1_ECX] =
1407 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1408 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1409 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1411 /* Missing: CPUID_EXT2_RDTSCP */
1412 .features[FEAT_8000_0001_EDX] =
1414 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1415 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1416 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1417 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1418 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1419 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1420 .features[FEAT_8000_0001_ECX] =
1421 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1422 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1423 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1426 .xlevel = 0x8000001A,
1427 .model_id = "AMD Opteron 62xx class CPU",
1430 .name = "Opteron_G5",
1432 .vendor = CPUID_VENDOR_AMD,
1436 .features[FEAT_1_EDX] =
1437 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1438 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1439 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1440 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1441 CPUID_DE | CPUID_FP87,
1442 .features[FEAT_1_ECX] =
1443 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1444 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1445 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1446 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1447 /* Missing: CPUID_EXT2_RDTSCP */
1448 .features[FEAT_8000_0001_EDX] =
1450 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1451 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1452 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1453 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1454 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1455 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1456 .features[FEAT_8000_0001_ECX] =
1457 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1458 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1459 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1462 .xlevel = 0x8000001A,
1463 .model_id = "AMD Opteron 63xx class CPU",
1467 typedef struct PropValue {
1468 const char *prop, *value;
1471 /* KVM-specific features that are automatically added/removed
1472 * from all CPU models when KVM is enabled.
1474 static PropValue kvm_default_props[] = {
1475 { "kvmclock", "on" },
1476 { "kvm-nopiodelay", "on" },
1477 { "kvm-asyncpf", "on" },
1478 { "kvm-steal-time", "on" },
1479 { "kvm-pv-eoi", "on" },
1480 { "kvmclock-stable-bit", "on" },
1483 { "monitor", "off" },
1488 /* TCG-specific defaults that override all CPU models when using TCG
1490 static PropValue tcg_default_props[] = {
1496 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1499 for (pv = kvm_default_props; pv->prop; pv++) {
1500 if (!strcmp(pv->prop, prop)) {
1506 /* It is valid to call this function only for properties that
1507 * are already present in the kvm_default_props table.
1512 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1513 bool migratable_only);
1517 static bool lmce_supported(void)
1521 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1525 return !!(mce_cap & MCG_LMCE_P);
1528 static int cpu_x86_fill_model_id(char *str)
1530 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1533 for (i = 0; i < 3; i++) {
1534 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1535 memcpy(str + i * 16 + 0, &eax, 4);
1536 memcpy(str + i * 16 + 4, &ebx, 4);
1537 memcpy(str + i * 16 + 8, &ecx, 4);
1538 memcpy(str + i * 16 + 12, &edx, 4);
1543 static X86CPUDefinition host_cpudef;
1545 static Property host_x86_cpu_properties[] = {
1546 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1547 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1548 DEFINE_PROP_END_OF_LIST()
1551 /* class_init for the "host" CPU model
1553 * This function may be called before KVM is initialized.
1555 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1557 DeviceClass *dc = DEVICE_CLASS(oc);
1558 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1559 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1561 xcc->kvm_required = true;
1563 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1564 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1566 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1567 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1568 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1569 host_cpudef.stepping = eax & 0x0F;
1571 cpu_x86_fill_model_id(host_cpudef.model_id);
1573 xcc->cpu_def = &host_cpudef;
1574 xcc->model_description =
1575 "KVM processor with all supported host features "
1576 "(only available in KVM mode)";
1578 /* level, xlevel, xlevel2, and the feature words are initialized on
1579 * instance_init, because they require KVM to be initialized.
1582 dc->props = host_x86_cpu_properties;
1583 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1584 dc->cannot_destroy_with_object_finalize_yet = true;
1587 static void host_x86_cpu_initfn(Object *obj)
1589 X86CPU *cpu = X86_CPU(obj);
1590 CPUX86State *env = &cpu->env;
1591 KVMState *s = kvm_state;
1593 /* We can't fill the features array here because we don't know yet if
1594 * "migratable" is true or false.
1596 cpu->host_features = true;
1598 /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
1599 if (kvm_enabled()) {
1600 env->cpuid_min_level =
1601 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1602 env->cpuid_min_xlevel =
1603 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1604 env->cpuid_min_xlevel2 =
1605 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1607 if (lmce_supported()) {
1608 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1612 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1615 static const TypeInfo host_x86_cpu_type_info = {
1616 .name = X86_CPU_TYPE_NAME("host"),
1617 .parent = TYPE_X86_CPU,
1618 .instance_init = host_x86_cpu_initfn,
1619 .class_init = host_x86_cpu_class_init,
1624 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1626 FeatureWordInfo *f = &feature_word_info[w];
1629 for (i = 0; i < 32; ++i) {
1630 if ((1UL << i) & mask) {
1631 const char *reg = get_register_name_32(f->cpuid_reg);
1633 fprintf(stderr, "warning: %s doesn't support requested feature: "
1634 "CPUID.%02XH:%s%s%s [bit %d]\n",
1635 kvm_enabled() ? "host" : "TCG",
1637 f->feat_names[i] ? "." : "",
1638 f->feat_names[i] ? f->feat_names[i] : "", i);
1643 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1644 const char *name, void *opaque,
1647 X86CPU *cpu = X86_CPU(obj);
1648 CPUX86State *env = &cpu->env;
1651 value = (env->cpuid_version >> 8) & 0xf;
1653 value += (env->cpuid_version >> 20) & 0xff;
1655 visit_type_int(v, name, &value, errp);
1658 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1659 const char *name, void *opaque,
1662 X86CPU *cpu = X86_CPU(obj);
1663 CPUX86State *env = &cpu->env;
1664 const int64_t min = 0;
1665 const int64_t max = 0xff + 0xf;
1666 Error *local_err = NULL;
1669 visit_type_int(v, name, &value, &local_err);
1671 error_propagate(errp, local_err);
1674 if (value < min || value > max) {
1675 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1676 name ? name : "null", value, min, max);
1680 env->cpuid_version &= ~0xff00f00;
1682 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1684 env->cpuid_version |= value << 8;
1688 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1689 const char *name, void *opaque,
1692 X86CPU *cpu = X86_CPU(obj);
1693 CPUX86State *env = &cpu->env;
1696 value = (env->cpuid_version >> 4) & 0xf;
1697 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1698 visit_type_int(v, name, &value, errp);
1701 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1702 const char *name, void *opaque,
1705 X86CPU *cpu = X86_CPU(obj);
1706 CPUX86State *env = &cpu->env;
1707 const int64_t min = 0;
1708 const int64_t max = 0xff;
1709 Error *local_err = NULL;
1712 visit_type_int(v, name, &value, &local_err);
1714 error_propagate(errp, local_err);
1717 if (value < min || value > max) {
1718 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1719 name ? name : "null", value, min, max);
1723 env->cpuid_version &= ~0xf00f0;
1724 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1727 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1728 const char *name, void *opaque,
1731 X86CPU *cpu = X86_CPU(obj);
1732 CPUX86State *env = &cpu->env;
1735 value = env->cpuid_version & 0xf;
1736 visit_type_int(v, name, &value, errp);
1739 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1740 const char *name, void *opaque,
1743 X86CPU *cpu = X86_CPU(obj);
1744 CPUX86State *env = &cpu->env;
1745 const int64_t min = 0;
1746 const int64_t max = 0xf;
1747 Error *local_err = NULL;
1750 visit_type_int(v, name, &value, &local_err);
1752 error_propagate(errp, local_err);
1755 if (value < min || value > max) {
1756 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1757 name ? name : "null", value, min, max);
1761 env->cpuid_version &= ~0xf;
1762 env->cpuid_version |= value & 0xf;
1765 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1767 X86CPU *cpu = X86_CPU(obj);
1768 CPUX86State *env = &cpu->env;
1771 value = g_malloc(CPUID_VENDOR_SZ + 1);
1772 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1773 env->cpuid_vendor3);
1777 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1780 X86CPU *cpu = X86_CPU(obj);
1781 CPUX86State *env = &cpu->env;
1784 if (strlen(value) != CPUID_VENDOR_SZ) {
1785 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1789 env->cpuid_vendor1 = 0;
1790 env->cpuid_vendor2 = 0;
1791 env->cpuid_vendor3 = 0;
1792 for (i = 0; i < 4; i++) {
1793 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1794 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1795 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1799 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1801 X86CPU *cpu = X86_CPU(obj);
1802 CPUX86State *env = &cpu->env;
1806 value = g_malloc(48 + 1);
1807 for (i = 0; i < 48; i++) {
1808 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1814 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1817 X86CPU *cpu = X86_CPU(obj);
1818 CPUX86State *env = &cpu->env;
1821 if (model_id == NULL) {
1824 len = strlen(model_id);
1825 memset(env->cpuid_model, 0, 48);
1826 for (i = 0; i < 48; i++) {
1830 c = (uint8_t)model_id[i];
1832 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1836 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1837 void *opaque, Error **errp)
1839 X86CPU *cpu = X86_CPU(obj);
1842 value = cpu->env.tsc_khz * 1000;
1843 visit_type_int(v, name, &value, errp);
1846 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1847 void *opaque, Error **errp)
1849 X86CPU *cpu = X86_CPU(obj);
1850 const int64_t min = 0;
1851 const int64_t max = INT64_MAX;
1852 Error *local_err = NULL;
1855 visit_type_int(v, name, &value, &local_err);
1857 error_propagate(errp, local_err);
1860 if (value < min || value > max) {
1861 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1862 name ? name : "null", value, min, max);
1866 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1869 /* Generic getter for "feature-words" and "filtered-features" properties */
1870 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1871 const char *name, void *opaque,
1874 uint32_t *array = (uint32_t *)opaque;
1876 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1877 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1878 X86CPUFeatureWordInfoList *list = NULL;
1880 for (w = 0; w < FEATURE_WORDS; w++) {
1881 FeatureWordInfo *wi = &feature_word_info[w];
1882 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1883 qwi->cpuid_input_eax = wi->cpuid_eax;
1884 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1885 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1886 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1887 qwi->features = array[w];
1889 /* List will be in reverse order, but order shouldn't matter */
1890 list_entries[w].next = list;
1891 list_entries[w].value = &word_infos[w];
1892 list = &list_entries[w];
1895 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1898 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1899 void *opaque, Error **errp)
1901 X86CPU *cpu = X86_CPU(obj);
1902 int64_t value = cpu->hyperv_spinlock_attempts;
1904 visit_type_int(v, name, &value, errp);
1907 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1908 void *opaque, Error **errp)
1910 const int64_t min = 0xFFF;
1911 const int64_t max = UINT_MAX;
1912 X86CPU *cpu = X86_CPU(obj);
1916 visit_type_int(v, name, &value, &err);
1918 error_propagate(errp, err);
1922 if (value < min || value > max) {
1923 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1924 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1925 object_get_typename(obj), name ? name : "null",
1929 cpu->hyperv_spinlock_attempts = value;
1932 static PropertyInfo qdev_prop_spinlocks = {
1934 .get = x86_get_hv_spinlocks,
1935 .set = x86_set_hv_spinlocks,
1938 /* Convert all '_' in a feature string option name to '-', to make feature
1939 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1941 static inline void feat2prop(char *s)
1943 while ((s = strchr(s, '_'))) {
1948 /* Compatibily hack to maintain legacy +-feat semantic,
1949 * where +-feat overwrites any feature set by
1950 * feat=on|feat even if the later is parsed after +-feat
1951 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
1953 static GList *plus_features, *minus_features;
1955 /* Parse "+feature,-feature,feature=foo" CPU feature string
1957 static void x86_cpu_parse_featurestr(const char *typename, char *features,
1960 char *featurestr; /* Single 'key=value" string being parsed */
1961 Error *local_err = NULL;
1962 static bool cpu_globals_initialized;
1964 if (cpu_globals_initialized) {
1967 cpu_globals_initialized = true;
1973 for (featurestr = strtok(features, ",");
1974 featurestr && !local_err;
1975 featurestr = strtok(NULL, ",")) {
1977 const char *val = NULL;
1980 GlobalProperty *prop;
1982 /* Compatibility syntax: */
1983 if (featurestr[0] == '+') {
1984 plus_features = g_list_append(plus_features,
1985 g_strdup(featurestr + 1));
1987 } else if (featurestr[0] == '-') {
1988 minus_features = g_list_append(minus_features,
1989 g_strdup(featurestr + 1));
1993 eq = strchr(featurestr, '=');
2001 feat2prop(featurestr);
2005 if (!strcmp(name, "tsc-freq")) {
2009 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
2010 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
2011 if (tsc_freq < 0 || *err) {
2012 error_setg(errp, "bad numerical value %s", val);
2015 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2017 name = "tsc-frequency";
2020 prop = g_new0(typeof(*prop), 1);
2021 prop->driver = typename;
2022 prop->property = g_strdup(name);
2023 prop->value = g_strdup(val);
2024 prop->errp = &error_fatal;
2025 qdev_prop_register_global(prop);
2029 error_propagate(errp, local_err);
2033 /* Print all cpuid feature names in featureset
2035 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2040 for (bit = 0; bit < 32; bit++) {
2041 if (featureset[bit]) {
2042 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2048 /* Sort alphabetically by type name, listing kvm_required models last. */
2049 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2051 ObjectClass *class_a = (ObjectClass *)a;
2052 ObjectClass *class_b = (ObjectClass *)b;
2053 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2054 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2055 const char *name_a, *name_b;
2057 if (cc_a->kvm_required != cc_b->kvm_required) {
2058 /* kvm_required items go last */
2059 return cc_a->kvm_required ? 1 : -1;
2061 name_a = object_class_get_name(class_a);
2062 name_b = object_class_get_name(class_b);
2063 return strcmp(name_a, name_b);
2067 static GSList *get_sorted_cpu_model_list(void)
2069 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2070 list = g_slist_sort(list, x86_cpu_list_compare);
2074 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2076 ObjectClass *oc = data;
2077 X86CPUClass *cc = X86_CPU_CLASS(oc);
2078 CPUListState *s = user_data;
2079 char *name = x86_cpu_class_get_model_name(cc);
2080 const char *desc = cc->model_description;
2082 desc = cc->cpu_def->model_id;
2085 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2090 /* list available CPU models and flags */
2091 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2096 .cpu_fprintf = cpu_fprintf,
2100 (*cpu_fprintf)(f, "Available CPUs:\n");
2101 list = get_sorted_cpu_model_list();
2102 g_slist_foreach(list, x86_cpu_list_entry, &s);
2105 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2106 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2107 FeatureWordInfo *fw = &feature_word_info[i];
2109 (*cpu_fprintf)(f, " ");
2110 listflags(f, cpu_fprintf, fw->feat_names);
2111 (*cpu_fprintf)(f, "\n");
2115 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2117 ObjectClass *oc = data;
2118 X86CPUClass *cc = X86_CPU_CLASS(oc);
2119 CpuDefinitionInfoList **cpu_list = user_data;
2120 CpuDefinitionInfoList *entry;
2121 CpuDefinitionInfo *info;
2123 info = g_malloc0(sizeof(*info));
2124 info->name = x86_cpu_class_get_model_name(cc);
2126 entry = g_malloc0(sizeof(*entry));
2127 entry->value = info;
2128 entry->next = *cpu_list;
2132 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2134 CpuDefinitionInfoList *cpu_list = NULL;
2135 GSList *list = get_sorted_cpu_model_list();
2136 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2141 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2142 bool migratable_only)
2144 FeatureWordInfo *wi = &feature_word_info[w];
2147 if (kvm_enabled()) {
2148 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2151 } else if (tcg_enabled()) {
2152 r = wi->tcg_features;
2156 if (migratable_only) {
2157 r &= x86_cpu_get_migratable_flags(w);
2163 * Filters CPU feature words based on host availability of each feature.
2165 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2167 static int x86_cpu_filter_features(X86CPU *cpu)
2169 CPUX86State *env = &cpu->env;
2173 for (w = 0; w < FEATURE_WORDS; w++) {
2174 uint32_t host_feat =
2175 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2176 uint32_t requested_features = env->features[w];
2177 env->features[w] &= host_feat;
2178 cpu->filtered_features[w] = requested_features & ~env->features[w];
2179 if (cpu->filtered_features[w]) {
2187 static void x86_cpu_report_filtered_features(X86CPU *cpu)
2191 for (w = 0; w < FEATURE_WORDS; w++) {
2192 report_unavailable_features(w, cpu->filtered_features[w]);
2196 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2199 for (pv = props; pv->prop; pv++) {
2203 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2208 /* Load data from X86CPUDefinition
2210 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2212 CPUX86State *env = &cpu->env;
2214 char host_vendor[CPUID_VENDOR_SZ + 1];
2217 /* CPU models only set _minimum_ values for level/xlevel: */
2218 object_property_set_int(OBJECT(cpu), def->level, "min-level", errp);
2219 object_property_set_int(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2221 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2222 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2223 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2224 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2225 for (w = 0; w < FEATURE_WORDS; w++) {
2226 env->features[w] = def->features[w];
2229 /* Special cases not set in the X86CPUDefinition structs: */
2230 if (kvm_enabled()) {
2231 if (!kvm_irqchip_in_kernel()) {
2232 x86_cpu_change_kvm_default("x2apic", "off");
2235 x86_cpu_apply_props(cpu, kvm_default_props);
2236 } else if (tcg_enabled()) {
2237 x86_cpu_apply_props(cpu, tcg_default_props);
2240 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2242 /* sysenter isn't supported in compatibility mode on AMD,
2243 * syscall isn't supported in compatibility mode on Intel.
2244 * Normally we advertise the actual CPU vendor, but you can
2245 * override this using the 'vendor' property if you want to use
2246 * KVM's sysenter/syscall emulation in compatibility mode and
2247 * when doing cross vendor migration
2249 vendor = def->vendor;
2250 if (kvm_enabled()) {
2251 uint32_t ebx = 0, ecx = 0, edx = 0;
2252 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2253 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2254 vendor = host_vendor;
2257 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2261 X86CPU *cpu_x86_init(const char *cpu_model)
2263 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2266 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2268 X86CPUDefinition *cpudef = data;
2269 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2271 xcc->cpu_def = cpudef;
2274 static void x86_register_cpudef_type(X86CPUDefinition *def)
2276 char *typename = x86_cpu_type_name(def->name);
2279 .parent = TYPE_X86_CPU,
2280 .class_init = x86_cpu_cpudef_class_init,
2288 #if !defined(CONFIG_USER_ONLY)
2290 void cpu_clear_apic_feature(CPUX86State *env)
2292 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2295 #endif /* !CONFIG_USER_ONLY */
2297 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2298 uint32_t *eax, uint32_t *ebx,
2299 uint32_t *ecx, uint32_t *edx)
2301 X86CPU *cpu = x86_env_get_cpu(env);
2302 CPUState *cs = CPU(cpu);
2303 uint32_t pkg_offset;
2305 /* test if maximum index reached */
2306 if (index & 0x80000000) {
2307 if (index > env->cpuid_xlevel) {
2308 if (env->cpuid_xlevel2 > 0) {
2309 /* Handle the Centaur's CPUID instruction. */
2310 if (index > env->cpuid_xlevel2) {
2311 index = env->cpuid_xlevel2;
2312 } else if (index < 0xC0000000) {
2313 index = env->cpuid_xlevel;
2316 /* Intel documentation states that invalid EAX input will
2317 * return the same information as EAX=cpuid_level
2318 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2320 index = env->cpuid_level;
2324 if (index > env->cpuid_level)
2325 index = env->cpuid_level;
2330 *eax = env->cpuid_level;
2331 *ebx = env->cpuid_vendor1;
2332 *edx = env->cpuid_vendor2;
2333 *ecx = env->cpuid_vendor3;
2336 *eax = env->cpuid_version;
2337 *ebx = (cpu->apic_id << 24) |
2338 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2339 *ecx = env->features[FEAT_1_ECX];
2340 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2341 *ecx |= CPUID_EXT_OSXSAVE;
2343 *edx = env->features[FEAT_1_EDX];
2344 if (cs->nr_cores * cs->nr_threads > 1) {
2345 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2350 /* cache info: needed for Pentium Pro compatibility */
2351 if (cpu->cache_info_passthrough) {
2352 host_cpuid(index, 0, eax, ebx, ecx, edx);
2355 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2357 if (!cpu->enable_l3_cache) {
2360 *ecx = L3_N_DESCRIPTOR;
2362 *edx = (L1D_DESCRIPTOR << 16) | \
2363 (L1I_DESCRIPTOR << 8) | \
2367 /* cache info: needed for Core compatibility */
2368 if (cpu->cache_info_passthrough) {
2369 host_cpuid(index, count, eax, ebx, ecx, edx);
2370 *eax &= ~0xFC000000;
2374 case 0: /* L1 dcache info */
2375 *eax |= CPUID_4_TYPE_DCACHE | \
2376 CPUID_4_LEVEL(1) | \
2377 CPUID_4_SELF_INIT_LEVEL;
2378 *ebx = (L1D_LINE_SIZE - 1) | \
2379 ((L1D_PARTITIONS - 1) << 12) | \
2380 ((L1D_ASSOCIATIVITY - 1) << 22);
2381 *ecx = L1D_SETS - 1;
2382 *edx = CPUID_4_NO_INVD_SHARING;
2384 case 1: /* L1 icache info */
2385 *eax |= CPUID_4_TYPE_ICACHE | \
2386 CPUID_4_LEVEL(1) | \
2387 CPUID_4_SELF_INIT_LEVEL;
2388 *ebx = (L1I_LINE_SIZE - 1) | \
2389 ((L1I_PARTITIONS - 1) << 12) | \
2390 ((L1I_ASSOCIATIVITY - 1) << 22);
2391 *ecx = L1I_SETS - 1;
2392 *edx = CPUID_4_NO_INVD_SHARING;
2394 case 2: /* L2 cache info */
2395 *eax |= CPUID_4_TYPE_UNIFIED | \
2396 CPUID_4_LEVEL(2) | \
2397 CPUID_4_SELF_INIT_LEVEL;
2398 if (cs->nr_threads > 1) {
2399 *eax |= (cs->nr_threads - 1) << 14;
2401 *ebx = (L2_LINE_SIZE - 1) | \
2402 ((L2_PARTITIONS - 1) << 12) | \
2403 ((L2_ASSOCIATIVITY - 1) << 22);
2405 *edx = CPUID_4_NO_INVD_SHARING;
2407 case 3: /* L3 cache info */
2408 if (!cpu->enable_l3_cache) {
2415 *eax |= CPUID_4_TYPE_UNIFIED | \
2416 CPUID_4_LEVEL(3) | \
2417 CPUID_4_SELF_INIT_LEVEL;
2418 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2419 *eax |= ((1 << pkg_offset) - 1) << 14;
2420 *ebx = (L3_N_LINE_SIZE - 1) | \
2421 ((L3_N_PARTITIONS - 1) << 12) | \
2422 ((L3_N_ASSOCIATIVITY - 1) << 22);
2423 *ecx = L3_N_SETS - 1;
2424 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2426 default: /* end of info */
2435 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2436 if ((*eax & 31) && cs->nr_cores > 1) {
2437 *eax |= (cs->nr_cores - 1) << 26;
2441 /* mwait info: needed for Core compatibility */
2442 *eax = 0; /* Smallest monitor-line size in bytes */
2443 *ebx = 0; /* Largest monitor-line size in bytes */
2444 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2448 /* Thermal and Power Leaf */
2449 *eax = env->features[FEAT_6_EAX];
2455 /* Structured Extended Feature Flags Enumeration Leaf */
2457 *eax = 0; /* Maximum ECX value for sub-leaves */
2458 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2459 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2460 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2461 *ecx |= CPUID_7_0_ECX_OSPKE;
2463 *edx = 0; /* Reserved */
2472 /* Direct Cache Access Information Leaf */
2473 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2479 /* Architectural Performance Monitoring Leaf */
2480 if (kvm_enabled() && cpu->enable_pmu) {
2481 KVMState *s = cs->kvm_state;
2483 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2484 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2485 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2486 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2495 /* Extended Topology Enumeration Leaf */
2496 if (!cpu->enable_cpuid_0xb) {
2497 *eax = *ebx = *ecx = *edx = 0;
2501 *ecx = count & 0xff;
2502 *edx = cpu->apic_id;
2506 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
2507 *ebx = cs->nr_threads;
2508 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2511 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2512 *ebx = cs->nr_cores * cs->nr_threads;
2513 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2518 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2521 assert(!(*eax & ~0x1f));
2522 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2525 /* Processor Extended State */
2530 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2535 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
2536 *eax = env->features[FEAT_XSAVE_COMP_LO];
2537 *edx = env->features[FEAT_XSAVE_COMP_HI];
2539 } else if (count == 1) {
2540 *eax = env->features[FEAT_XSAVE];
2541 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2542 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
2543 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2551 *eax = env->cpuid_xlevel;
2552 *ebx = env->cpuid_vendor1;
2553 *edx = env->cpuid_vendor2;
2554 *ecx = env->cpuid_vendor3;
2557 *eax = env->cpuid_version;
2559 *ecx = env->features[FEAT_8000_0001_ECX];
2560 *edx = env->features[FEAT_8000_0001_EDX];
2562 /* The Linux kernel checks for the CMPLegacy bit and
2563 * discards multiple thread information if it is set.
2564 * So don't set it here for Intel to make Linux guests happy.
2566 if (cs->nr_cores * cs->nr_threads > 1) {
2567 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2568 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2569 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2570 *ecx |= 1 << 1; /* CmpLegacy bit */
2577 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2578 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2579 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2580 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2583 /* cache info (L1 cache) */
2584 if (cpu->cache_info_passthrough) {
2585 host_cpuid(index, 0, eax, ebx, ecx, edx);
2588 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2589 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2590 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2591 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2592 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2593 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2594 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2595 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2598 /* cache info (L2 cache) */
2599 if (cpu->cache_info_passthrough) {
2600 host_cpuid(index, 0, eax, ebx, ecx, edx);
2603 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2604 (L2_DTLB_2M_ENTRIES << 16) | \
2605 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2606 (L2_ITLB_2M_ENTRIES);
2607 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2608 (L2_DTLB_4K_ENTRIES << 16) | \
2609 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2610 (L2_ITLB_4K_ENTRIES);
2611 *ecx = (L2_SIZE_KB_AMD << 16) | \
2612 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2613 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2614 if (!cpu->enable_l3_cache) {
2615 *edx = ((L3_SIZE_KB / 512) << 18) | \
2616 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2617 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2619 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
2620 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
2621 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
2628 *edx = env->features[FEAT_8000_0007_EDX];
2631 /* virtual & phys address size in low 2 bytes. */
2632 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2633 /* 64 bit processor, 48 bits virtual, configurable
2636 *eax = 0x00003000 + cpu->phys_bits;
2638 *eax = cpu->phys_bits;
2643 if (cs->nr_cores * cs->nr_threads > 1) {
2644 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2648 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2649 *eax = 0x00000001; /* SVM Revision */
2650 *ebx = 0x00000010; /* nr of ASIDs */
2652 *edx = env->features[FEAT_SVM]; /* optional features */
2661 *eax = env->cpuid_xlevel2;
2667 /* Support for VIA CPU's CPUID instruction */
2668 *eax = env->cpuid_version;
2671 *edx = env->features[FEAT_C000_0001_EDX];
2676 /* Reserved for the future, and now filled with zero */
2683 /* reserved values: zero */
2692 /* CPUClass::reset() */
2693 static void x86_cpu_reset(CPUState *s)
2695 X86CPU *cpu = X86_CPU(s);
2696 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2697 CPUX86State *env = &cpu->env;
2702 xcc->parent_reset(s);
2704 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
2708 env->old_exception = -1;
2710 /* init to reset state */
2712 env->hflags2 |= HF2_GIF_MASK;
2714 cpu_x86_update_cr0(env, 0x60000010);
2715 env->a20_mask = ~0x0;
2716 env->smbase = 0x30000;
2718 env->idt.limit = 0xffff;
2719 env->gdt.limit = 0xffff;
2720 env->ldt.limit = 0xffff;
2721 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2722 env->tr.limit = 0xffff;
2723 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2725 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2726 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2727 DESC_R_MASK | DESC_A_MASK);
2728 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2729 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2731 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2732 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2734 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2735 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2737 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2738 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2740 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2741 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2745 env->regs[R_EDX] = env->cpuid_version;
2750 for (i = 0; i < 8; i++) {
2753 cpu_set_fpuc(env, 0x37f);
2755 env->mxcsr = 0x1f80;
2756 /* All units are in INIT state. */
2759 env->pat = 0x0007040600070406ULL;
2760 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2762 memset(env->dr, 0, sizeof(env->dr));
2763 env->dr[6] = DR6_FIXED_1;
2764 env->dr[7] = DR7_FIXED_1;
2765 cpu_breakpoint_remove_all(s, BP_CPU);
2766 cpu_watchpoint_remove_all(s, BP_CPU);
2769 xcr0 = XSTATE_FP_MASK;
2771 #ifdef CONFIG_USER_ONLY
2772 /* Enable all the features for user-mode. */
2773 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2774 xcr0 |= XSTATE_SSE_MASK;
2776 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2777 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2778 if (env->features[esa->feature] & esa->bits) {
2783 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2784 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2786 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2787 cr4 |= CR4_FSGSBASE_MASK;
2792 cpu_x86_update_cr4(env, cr4);
2795 * SDM 11.11.5 requires:
2796 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2797 * - IA32_MTRR_PHYSMASKn.V = 0
2798 * All other bits are undefined. For simplification, zero it all.
2800 env->mtrr_deftype = 0;
2801 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2802 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2804 #if !defined(CONFIG_USER_ONLY)
2805 /* We hard-wire the BSP to the first CPU. */
2806 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2808 s->halted = !cpu_is_bsp(cpu);
2810 if (kvm_enabled()) {
2811 kvm_arch_reset_vcpu(cpu);
2816 #ifndef CONFIG_USER_ONLY
2817 bool cpu_is_bsp(X86CPU *cpu)
2819 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2822 /* TODO: remove me, when reset over QOM tree is implemented */
2823 static void x86_cpu_machine_reset_cb(void *opaque)
2825 X86CPU *cpu = opaque;
2826 cpu_reset(CPU(cpu));
2830 static void mce_init(X86CPU *cpu)
2832 CPUX86State *cenv = &cpu->env;
2835 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2836 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2837 (CPUID_MCE | CPUID_MCA)) {
2838 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
2839 (cpu->enable_lmce ? MCG_LMCE_P : 0);
2840 cenv->mcg_ctl = ~(uint64_t)0;
2841 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2842 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2847 #ifndef CONFIG_USER_ONLY
2848 APICCommonClass *apic_get_class(void)
2850 const char *apic_type = "apic";
2852 if (kvm_apic_in_kernel()) {
2853 apic_type = "kvm-apic";
2854 } else if (xen_enabled()) {
2855 apic_type = "xen-apic";
2858 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
2861 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2863 APICCommonState *apic;
2864 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
2866 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
2868 object_property_add_child(OBJECT(cpu), "lapic",
2869 OBJECT(cpu->apic_state), &error_abort);
2870 object_unref(OBJECT(cpu->apic_state));
2872 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2873 /* TODO: convert to link<> */
2874 apic = APIC_COMMON(cpu->apic_state);
2876 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2879 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2881 APICCommonState *apic;
2882 static bool apic_mmio_map_once;
2884 if (cpu->apic_state == NULL) {
2887 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2890 /* Map APIC MMIO area */
2891 apic = APIC_COMMON(cpu->apic_state);
2892 if (!apic_mmio_map_once) {
2893 memory_region_add_subregion_overlap(get_system_memory(),
2895 MSR_IA32_APICBASE_BASE,
2898 apic_mmio_map_once = true;
2902 static void x86_cpu_machine_done(Notifier *n, void *unused)
2904 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2905 MemoryRegion *smram =
2906 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2909 cpu->smram = g_new(MemoryRegion, 1);
2910 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2911 smram, 0, 1ull << 32);
2912 memory_region_set_enabled(cpu->smram, false);
2913 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2917 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2922 /* Note: Only safe for use on x86(-64) hosts */
2923 static uint32_t x86_host_phys_bits(void)
2926 uint32_t host_phys_bits;
2928 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
2929 if (eax >= 0x80000008) {
2930 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
2931 /* Note: According to AMD doc 25481 rev 2.34 they have a field
2932 * at 23:16 that can specify a maximum physical address bits for
2933 * the guest that can override this value; but I've not seen
2934 * anything with that set.
2936 host_phys_bits = eax & 0xff;
2938 /* It's an odd 64 bit machine that doesn't have the leaf for
2939 * physical address bits; fall back to 36 that's most older
2942 host_phys_bits = 36;
2945 return host_phys_bits;
2948 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
2955 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
2956 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
2958 CPUX86State *env = &cpu->env;
2959 FeatureWordInfo *fi = &feature_word_info[w];
2960 uint32_t eax = fi->cpuid_eax;
2961 uint32_t region = eax & 0xF0000000;
2963 if (!env->features[w]) {
2969 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
2972 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
2975 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
2980 /* Calculate XSAVE components based on the configured CPU feature flags */
2981 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
2983 CPUX86State *env = &cpu->env;
2987 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2992 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2993 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2994 if (env->features[esa->feature] & esa->bits) {
2995 mask |= (1ULL << i);
2999 env->features[FEAT_XSAVE_COMP_LO] = mask;
3000 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
3003 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
3004 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
3005 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
3006 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
3007 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
3008 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
3009 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
3011 CPUState *cs = CPU(dev);
3012 X86CPU *cpu = X86_CPU(dev);
3013 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3014 CPUX86State *env = &cpu->env;
3015 Error *local_err = NULL;
3016 static bool ht_warned;
3020 if (xcc->kvm_required && !kvm_enabled()) {
3021 char *name = x86_cpu_class_get_model_name(xcc);
3022 error_setg(&local_err, "CPU model '%s' requires KVM", name);
3027 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
3028 error_setg(errp, "apic-id property was not initialized properly");
3032 /*TODO: cpu->host_features incorrectly overwrites features
3033 * set using "feat=on|off". Once we fix this, we can convert
3034 * plus_features & minus_features to global properties
3035 * inside x86_cpu_parse_featurestr() too.
3037 if (cpu->host_features) {
3038 for (w = 0; w < FEATURE_WORDS; w++) {
3040 x86_cpu_get_supported_feature_word(w, cpu->migratable);
3044 for (l = plus_features; l; l = l->next) {
3045 const char *prop = l->data;
3046 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
3052 for (l = minus_features; l; l = l->next) {
3053 const char *prop = l->data;
3054 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
3060 if (!kvm_enabled() || !cpu->expose_kvm) {
3061 env->features[FEAT_KVM] = 0;
3064 x86_cpu_enable_xsave_components(cpu);
3066 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3067 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3068 if (cpu->full_cpuid_auto_level) {
3069 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3070 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3071 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3072 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3073 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3074 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3075 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3076 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3077 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3078 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3079 /* SVM requires CPUID[0x8000000A] */
3080 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3081 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3085 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3086 if (env->cpuid_level == UINT32_MAX) {
3087 env->cpuid_level = env->cpuid_min_level;
3089 if (env->cpuid_xlevel == UINT32_MAX) {
3090 env->cpuid_xlevel = env->cpuid_min_xlevel;
3092 if (env->cpuid_xlevel2 == UINT32_MAX) {
3093 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3096 if (x86_cpu_filter_features(cpu) &&
3097 (cpu->check_cpuid || cpu->enforce_cpuid)) {
3098 x86_cpu_report_filtered_features(cpu);
3099 if (cpu->enforce_cpuid) {
3100 error_setg(&local_err,
3102 "Host doesn't support requested features" :
3103 "TCG doesn't support requested features");
3108 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3111 if (IS_AMD_CPU(env)) {
3112 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3113 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3114 & CPUID_EXT2_AMD_ALIASES);
3117 /* For 64bit systems think about the number of physical bits to present.
3118 * ideally this should be the same as the host; anything other than matching
3119 * the host can cause incorrect guest behaviour.
3120 * QEMU used to pick the magic value of 40 bits that corresponds to
3121 * consumer AMD devices but nothing else.
3123 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3124 if (kvm_enabled()) {
3125 uint32_t host_phys_bits = x86_host_phys_bits();
3128 if (cpu->host_phys_bits) {
3129 /* The user asked for us to use the host physical bits */
3130 cpu->phys_bits = host_phys_bits;
3133 /* Print a warning if the user set it to a value that's not the
3136 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3138 error_report("Warning: Host physical bits (%u)"
3139 " does not match phys-bits property (%u)",
3140 host_phys_bits, cpu->phys_bits);
3144 if (cpu->phys_bits &&
3145 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3146 cpu->phys_bits < 32)) {
3147 error_setg(errp, "phys-bits should be between 32 and %u "
3149 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3153 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3154 error_setg(errp, "TCG only supports phys-bits=%u",
3155 TCG_PHYS_ADDR_BITS);
3159 /* 0 means it was not explicitly set by the user (or by machine
3160 * compat_props or by the host code above). In this case, the default
3161 * is the value used by TCG (40).
3163 if (cpu->phys_bits == 0) {
3164 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3167 /* For 32 bit systems don't use the user set value, but keep
3168 * phys_bits consistent with what we tell the guest.
3170 if (cpu->phys_bits != 0) {
3171 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3175 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3176 cpu->phys_bits = 36;
3178 cpu->phys_bits = 32;
3181 cpu_exec_init(cs, &error_abort);
3183 if (tcg_enabled()) {
3187 #ifndef CONFIG_USER_ONLY
3188 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3190 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3191 x86_cpu_apic_create(cpu, &local_err);
3192 if (local_err != NULL) {
3200 #ifndef CONFIG_USER_ONLY
3201 if (tcg_enabled()) {
3202 AddressSpace *newas = g_new(AddressSpace, 1);
3204 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3205 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3207 /* Outer container... */
3208 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3209 memory_region_set_enabled(cpu->cpu_as_root, true);
3211 /* ... with two regions inside: normal system memory with low
3214 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3215 get_system_memory(), 0, ~0ull);
3216 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3217 memory_region_set_enabled(cpu->cpu_as_mem, true);
3218 address_space_init(newas, cpu->cpu_as_root, "CPU");
3220 cpu_address_space_init(cs, newas, 0);
3222 /* ... SMRAM with higher priority, linked from /machine/smram. */
3223 cpu->machine_done.notify = x86_cpu_machine_done;
3224 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3230 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3231 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3232 * based on inputs (sockets,cores,threads), it is still better to gives
3235 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3236 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3238 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3239 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3240 " -smp options properly.");
3244 x86_cpu_apic_realize(cpu, &local_err);
3245 if (local_err != NULL) {
3250 xcc->parent_realize(dev, &local_err);
3253 if (local_err != NULL) {
3254 error_propagate(errp, local_err);
3259 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3261 X86CPU *cpu = X86_CPU(dev);
3263 #ifndef CONFIG_USER_ONLY
3264 cpu_remove_sync(CPU(dev));
3265 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3268 if (cpu->apic_state) {
3269 object_unparent(OBJECT(cpu->apic_state));
3270 cpu->apic_state = NULL;
3274 typedef struct BitProperty {
3279 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3280 void *opaque, Error **errp)
3282 BitProperty *fp = opaque;
3283 bool value = (*fp->ptr & fp->mask) == fp->mask;
3284 visit_type_bool(v, name, &value, errp);
3287 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3288 void *opaque, Error **errp)
3290 DeviceState *dev = DEVICE(obj);
3291 BitProperty *fp = opaque;
3292 Error *local_err = NULL;
3295 if (dev->realized) {
3296 qdev_prop_set_after_realize(dev, name, errp);
3300 visit_type_bool(v, name, &value, &local_err);
3302 error_propagate(errp, local_err);
3307 *fp->ptr |= fp->mask;
3309 *fp->ptr &= ~fp->mask;
3313 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3316 BitProperty *prop = opaque;
3320 /* Register a boolean property to get/set a single bit in a uint32_t field.
3322 * The same property name can be registered multiple times to make it affect
3323 * multiple bits in the same FeatureWord. In that case, the getter will return
3324 * true only if all bits are set.
3326 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3327 const char *prop_name,
3333 uint32_t mask = (1UL << bitnr);
3335 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3338 assert(fp->ptr == field);
3341 fp = g_new0(BitProperty, 1);
3344 object_property_add(OBJECT(cpu), prop_name, "bool",
3345 x86_cpu_get_bit_prop,
3346 x86_cpu_set_bit_prop,
3347 x86_cpu_release_bit_prop, fp, &error_abort);
3351 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3355 FeatureWordInfo *fi = &feature_word_info[w];
3356 const char *name = fi->feat_names[bitnr];
3362 /* Property names should use "-" instead of "_".
3363 * Old names containing underscores are registered as aliases
3364 * using object_property_add_alias()
3366 assert(!strchr(name, '_'));
3367 /* aliases don't use "|" delimiters anymore, they are registered
3368 * manually using object_property_add_alias() */
3369 assert(!strchr(name, '|'));
3370 x86_cpu_register_bit_prop(cpu, name, &cpu->env.features[w], bitnr);
3373 static void x86_cpu_initfn(Object *obj)
3375 CPUState *cs = CPU(obj);
3376 X86CPU *cpu = X86_CPU(obj);
3377 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3378 CPUX86State *env = &cpu->env;
3383 object_property_add(obj, "family", "int",
3384 x86_cpuid_version_get_family,
3385 x86_cpuid_version_set_family, NULL, NULL, NULL);
3386 object_property_add(obj, "model", "int",
3387 x86_cpuid_version_get_model,
3388 x86_cpuid_version_set_model, NULL, NULL, NULL);
3389 object_property_add(obj, "stepping", "int",
3390 x86_cpuid_version_get_stepping,
3391 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3392 object_property_add_str(obj, "vendor",
3393 x86_cpuid_get_vendor,
3394 x86_cpuid_set_vendor, NULL);
3395 object_property_add_str(obj, "model-id",
3396 x86_cpuid_get_model_id,
3397 x86_cpuid_set_model_id, NULL);
3398 object_property_add(obj, "tsc-frequency", "int",
3399 x86_cpuid_get_tsc_freq,
3400 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3401 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3402 x86_cpu_get_feature_words,
3403 NULL, NULL, (void *)env->features, NULL);
3404 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3405 x86_cpu_get_feature_words,
3406 NULL, NULL, (void *)cpu->filtered_features, NULL);
3408 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3410 for (w = 0; w < FEATURE_WORDS; w++) {
3413 for (bitnr = 0; bitnr < 32; bitnr++) {
3414 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3418 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
3419 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
3420 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
3421 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
3422 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
3423 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
3424 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
3426 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
3427 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
3428 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
3429 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
3430 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
3431 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
3432 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
3433 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
3434 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
3435 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
3436 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
3437 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
3438 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
3439 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
3440 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
3441 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
3442 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
3443 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
3444 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
3445 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
3446 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
3448 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3451 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3453 X86CPU *cpu = X86_CPU(cs);
3455 return cpu->apic_id;
3458 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3460 X86CPU *cpu = X86_CPU(cs);
3462 return cpu->env.cr[0] & CR0_PG_MASK;
3465 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3467 X86CPU *cpu = X86_CPU(cs);
3469 cpu->env.eip = value;
3472 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3474 X86CPU *cpu = X86_CPU(cs);
3476 cpu->env.eip = tb->pc - tb->cs_base;
3479 static bool x86_cpu_has_work(CPUState *cs)
3481 X86CPU *cpu = X86_CPU(cs);
3482 CPUX86State *env = &cpu->env;
3484 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3485 CPU_INTERRUPT_POLL)) &&
3486 (env->eflags & IF_MASK)) ||
3487 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3488 CPU_INTERRUPT_INIT |
3489 CPU_INTERRUPT_SIPI |
3490 CPU_INTERRUPT_MCE)) ||
3491 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3492 !(env->hflags & HF_SMM_MASK));
3495 static Property x86_cpu_properties[] = {
3496 #ifdef CONFIG_USER_ONLY
3497 /* apic_id = 0 by default for *-user, see commit 9886e834 */
3498 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
3499 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
3500 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
3501 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
3503 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
3504 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
3505 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
3506 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
3508 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3509 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3510 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3511 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3512 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3513 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3514 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3515 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3516 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3517 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3518 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3519 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3520 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3521 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3522 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
3523 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
3524 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
3525 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
3526 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
3527 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
3528 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
3529 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
3530 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
3531 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
3532 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3533 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3534 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
3535 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
3536 DEFINE_PROP_END_OF_LIST()
3539 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3541 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3542 CPUClass *cc = CPU_CLASS(oc);
3543 DeviceClass *dc = DEVICE_CLASS(oc);
3545 xcc->parent_realize = dc->realize;
3546 dc->realize = x86_cpu_realizefn;
3547 dc->unrealize = x86_cpu_unrealizefn;
3548 dc->props = x86_cpu_properties;
3550 xcc->parent_reset = cc->reset;
3551 cc->reset = x86_cpu_reset;
3552 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3554 cc->class_by_name = x86_cpu_class_by_name;
3555 cc->parse_features = x86_cpu_parse_featurestr;
3556 cc->has_work = x86_cpu_has_work;
3557 cc->do_interrupt = x86_cpu_do_interrupt;
3558 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3559 cc->dump_state = x86_cpu_dump_state;
3560 cc->set_pc = x86_cpu_set_pc;
3561 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3562 cc->gdb_read_register = x86_cpu_gdb_read_register;
3563 cc->gdb_write_register = x86_cpu_gdb_write_register;
3564 cc->get_arch_id = x86_cpu_get_arch_id;
3565 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3566 #ifdef CONFIG_USER_ONLY
3567 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3569 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3570 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3571 cc->write_elf64_note = x86_cpu_write_elf64_note;
3572 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3573 cc->write_elf32_note = x86_cpu_write_elf32_note;
3574 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3575 cc->vmsd = &vmstate_x86_cpu;
3577 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3578 #ifndef CONFIG_USER_ONLY
3579 cc->debug_excp_handler = breakpoint_handler;
3581 cc->cpu_exec_enter = x86_cpu_exec_enter;
3582 cc->cpu_exec_exit = x86_cpu_exec_exit;
3584 dc->cannot_instantiate_with_device_add_yet = false;
3587 static const TypeInfo x86_cpu_type_info = {
3588 .name = TYPE_X86_CPU,
3590 .instance_size = sizeof(X86CPU),
3591 .instance_init = x86_cpu_initfn,
3593 .class_size = sizeof(X86CPUClass),
3594 .class_init = x86_cpu_common_class_init,
3597 static void x86_cpu_register_types(void)
3601 type_register_static(&x86_cpu_type_info);
3602 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3603 x86_register_cpudef_type(&builtin_x86_defs[i]);
3606 type_register_static(&host_x86_cpu_type_info);
3610 type_init(x86_cpu_register_types)