1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
5 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
7 * Copyright (c) 2005 Keir Fraser
9 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
10 * privileged instructions:
12 * Copyright (C) 2006 Qumranet
13 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
15 * Avi Kivity <avi@qumranet.com>
16 * Yaniv Kamay <yaniv@qumranet.com>
18 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21 #include <linux/kvm_host.h>
22 #include "kvm_cache_regs.h"
23 #include "kvm_emulate.h"
24 #include <linux/stringify.h>
25 #include <asm/debugreg.h>
26 #include <asm/nospec-branch.h>
37 #define OpImplicit 1ull /* No generic decode */
38 #define OpReg 2ull /* Register */
39 #define OpMem 3ull /* Memory */
40 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
41 #define OpDI 5ull /* ES:DI/EDI/RDI */
42 #define OpMem64 6ull /* Memory, 64-bit */
43 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
44 #define OpDX 8ull /* DX register */
45 #define OpCL 9ull /* CL register (for shifts) */
46 #define OpImmByte 10ull /* 8-bit sign extended immediate */
47 #define OpOne 11ull /* Implied 1 */
48 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
49 #define OpMem16 13ull /* Memory operand (16-bit). */
50 #define OpMem32 14ull /* Memory operand (32-bit). */
51 #define OpImmU 15ull /* Immediate operand, zero extended */
52 #define OpSI 16ull /* SI/ESI/RSI */
53 #define OpImmFAddr 17ull /* Immediate far address */
54 #define OpMemFAddr 18ull /* Far address in memory */
55 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
56 #define OpES 20ull /* ES */
57 #define OpCS 21ull /* CS */
58 #define OpSS 22ull /* SS */
59 #define OpDS 23ull /* DS */
60 #define OpFS 24ull /* FS */
61 #define OpGS 25ull /* GS */
62 #define OpMem8 26ull /* 8-bit zero extended memory operand */
63 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
64 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
65 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
66 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
68 #define OpBits 5 /* Width of operand field */
69 #define OpMask ((1ull << OpBits) - 1)
72 * Opcode effective-address decode tables.
73 * Note that we only emulate instructions that have at least one memory
74 * operand (excluding implicit stack references). We assume that stack
75 * references and instruction fetches will never occur in special memory
76 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
80 /* Operand sizes: 8-bit operands or specified/overridden size. */
81 #define ByteOp (1<<0) /* 8-bit operands. */
82 /* Destination operand type. */
84 #define ImplicitOps (OpImplicit << DstShift)
85 #define DstReg (OpReg << DstShift)
86 #define DstMem (OpMem << DstShift)
87 #define DstAcc (OpAcc << DstShift)
88 #define DstDI (OpDI << DstShift)
89 #define DstMem64 (OpMem64 << DstShift)
90 #define DstMem16 (OpMem16 << DstShift)
91 #define DstImmUByte (OpImmUByte << DstShift)
92 #define DstDX (OpDX << DstShift)
93 #define DstAccLo (OpAccLo << DstShift)
94 #define DstMask (OpMask << DstShift)
95 /* Source operand type. */
97 #define SrcNone (OpNone << SrcShift)
98 #define SrcReg (OpReg << SrcShift)
99 #define SrcMem (OpMem << SrcShift)
100 #define SrcMem16 (OpMem16 << SrcShift)
101 #define SrcMem32 (OpMem32 << SrcShift)
102 #define SrcImm (OpImm << SrcShift)
103 #define SrcImmByte (OpImmByte << SrcShift)
104 #define SrcOne (OpOne << SrcShift)
105 #define SrcImmUByte (OpImmUByte << SrcShift)
106 #define SrcImmU (OpImmU << SrcShift)
107 #define SrcSI (OpSI << SrcShift)
108 #define SrcXLat (OpXLat << SrcShift)
109 #define SrcImmFAddr (OpImmFAddr << SrcShift)
110 #define SrcMemFAddr (OpMemFAddr << SrcShift)
111 #define SrcAcc (OpAcc << SrcShift)
112 #define SrcImmU16 (OpImmU16 << SrcShift)
113 #define SrcImm64 (OpImm64 << SrcShift)
114 #define SrcDX (OpDX << SrcShift)
115 #define SrcMem8 (OpMem8 << SrcShift)
116 #define SrcAccHi (OpAccHi << SrcShift)
117 #define SrcMask (OpMask << SrcShift)
118 #define BitOp (1<<11)
119 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
120 #define String (1<<13) /* String instruction (rep capable) */
121 #define Stack (1<<14) /* Stack instruction (push/pop) */
122 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
123 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
124 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
125 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
126 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
127 #define Escape (5<<15) /* Escape to coprocessor instruction */
128 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
129 #define ModeDual (7<<15) /* Different instruction for 32/64 bit */
130 #define Sse (1<<18) /* SSE Vector instruction */
131 /* Generic ModRM decode. */
132 #define ModRM (1<<19)
133 /* Destination is only written; never read. */
136 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
137 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
138 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
139 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
140 #define Undefined (1<<25) /* No Such Instruction */
141 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
142 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
144 #define PageTable (1 << 29) /* instruction used to write page table */
145 #define NotImpl (1 << 30) /* instruction is not implemented */
146 /* Source 2 operand type */
147 #define Src2Shift (31)
148 #define Src2None (OpNone << Src2Shift)
149 #define Src2Mem (OpMem << Src2Shift)
150 #define Src2CL (OpCL << Src2Shift)
151 #define Src2ImmByte (OpImmByte << Src2Shift)
152 #define Src2One (OpOne << Src2Shift)
153 #define Src2Imm (OpImm << Src2Shift)
154 #define Src2ES (OpES << Src2Shift)
155 #define Src2CS (OpCS << Src2Shift)
156 #define Src2SS (OpSS << Src2Shift)
157 #define Src2DS (OpDS << Src2Shift)
158 #define Src2FS (OpFS << Src2Shift)
159 #define Src2GS (OpGS << Src2Shift)
160 #define Src2Mask (OpMask << Src2Shift)
161 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
162 #define AlignMask ((u64)7 << 41)
163 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
164 #define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
165 #define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
166 #define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
167 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
168 #define NoWrite ((u64)1 << 45) /* No writeback */
169 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
170 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
171 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
172 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
173 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
174 #define NearBranch ((u64)1 << 52) /* Near branches */
175 #define No16 ((u64)1 << 53) /* No 16 bit operand */
176 #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
177 #define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */
178 #define IsBranch ((u64)1 << 56) /* Instruction is considered a branch. */
180 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
182 #define X2(x...) x, x
183 #define X3(x...) X2(x), x
184 #define X4(x...) X2(x), X2(x)
185 #define X5(x...) X4(x), x
186 #define X6(x...) X4(x), X2(x)
187 #define X7(x...) X4(x), X3(x)
188 #define X8(x...) X4(x), X4(x)
189 #define X16(x...) X8(x), X8(x)
191 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
192 #define FASTOP_SIZE 8
199 int (*execute)(struct x86_emulate_ctxt *ctxt);
200 const struct opcode *group;
201 const struct group_dual *gdual;
202 const struct gprefix *gprefix;
203 const struct escape *esc;
204 const struct instr_dual *idual;
205 const struct mode_dual *mdual;
206 void (*fastop)(struct fastop *fake);
208 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
212 struct opcode mod012[8];
213 struct opcode mod3[8];
217 struct opcode pfx_no;
218 struct opcode pfx_66;
219 struct opcode pfx_f2;
220 struct opcode pfx_f3;
225 struct opcode high[64];
229 struct opcode mod012;
234 struct opcode mode32;
235 struct opcode mode64;
238 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
240 enum x86_transfer_type {
242 X86_TRANSFER_CALL_JMP,
244 X86_TRANSFER_TASK_SWITCH,
247 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
249 if (!(ctxt->regs_valid & (1 << nr))) {
250 ctxt->regs_valid |= 1 << nr;
251 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
253 return ctxt->_regs[nr];
256 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
258 ctxt->regs_valid |= 1 << nr;
259 ctxt->regs_dirty |= 1 << nr;
260 return &ctxt->_regs[nr];
263 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
266 return reg_write(ctxt, nr);
269 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
273 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
274 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
277 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
279 ctxt->regs_dirty = 0;
280 ctxt->regs_valid = 0;
284 * These EFLAGS bits are restored from saved value during emulation, and
285 * any changes are written back to the saved value after emulation.
287 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
288 X86_EFLAGS_PF|X86_EFLAGS_CF)
297 * fastop functions have a special calling convention:
302 * flags: rflags (in/out)
303 * ex: rsi (in:fastop pointer, out:zero if exception)
305 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
306 * different operand sizes can be reached by calculation, rather than a jump
307 * table (which would be bigger than the code).
309 static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
311 #define __FOP_FUNC(name) \
312 ".align " __stringify(FASTOP_SIZE) " \n\t" \
313 ".type " name ", @function \n\t" \
316 #define FOP_FUNC(name) \
319 #define __FOP_RET(name) \
321 ".size " name ", .-" name "\n\t"
323 #define FOP_RET(name) \
326 #define FOP_START(op) \
327 extern void em_##op(struct fastop *fake); \
328 asm(".pushsection .text, \"ax\" \n\t" \
329 ".global em_" #op " \n\t" \
330 ".align " __stringify(FASTOP_SIZE) " \n\t" \
336 #define __FOPNOP(name) \
341 __FOPNOP(__stringify(__UNIQUE_ID(nop)))
343 #define FOP1E(op, dst) \
344 __FOP_FUNC(#op "_" #dst) \
345 "10: " #op " %" #dst " \n\t" \
346 __FOP_RET(#op "_" #dst)
348 #define FOP1EEX(op, dst) \
349 FOP1E(op, dst) _ASM_EXTABLE_TYPE_REG(10b, 11b, EX_TYPE_ZERO_REG, %%esi)
351 #define FASTOP1(op) \
356 ON64(FOP1E(op##q, rax)) \
359 /* 1-operand, using src2 (for MUL/DIV r/m) */
360 #define FASTOP1SRC2(op, name) \
365 ON64(FOP1E(op, rcx)) \
368 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
369 #define FASTOP1SRC2EX(op, name) \
374 ON64(FOP1EEX(op, rcx)) \
377 #define FOP2E(op, dst, src) \
378 __FOP_FUNC(#op "_" #dst "_" #src) \
379 #op " %" #src ", %" #dst " \n\t" \
380 __FOP_RET(#op "_" #dst "_" #src)
382 #define FASTOP2(op) \
384 FOP2E(op##b, al, dl) \
385 FOP2E(op##w, ax, dx) \
386 FOP2E(op##l, eax, edx) \
387 ON64(FOP2E(op##q, rax, rdx)) \
390 /* 2 operand, word only */
391 #define FASTOP2W(op) \
394 FOP2E(op##w, ax, dx) \
395 FOP2E(op##l, eax, edx) \
396 ON64(FOP2E(op##q, rax, rdx)) \
399 /* 2 operand, src is CL */
400 #define FASTOP2CL(op) \
402 FOP2E(op##b, al, cl) \
403 FOP2E(op##w, ax, cl) \
404 FOP2E(op##l, eax, cl) \
405 ON64(FOP2E(op##q, rax, cl)) \
408 /* 2 operand, src and dest are reversed */
409 #define FASTOP2R(op, name) \
411 FOP2E(op##b, dl, al) \
412 FOP2E(op##w, dx, ax) \
413 FOP2E(op##l, edx, eax) \
414 ON64(FOP2E(op##q, rdx, rax)) \
417 #define FOP3E(op, dst, src, src2) \
418 __FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
419 #op " %" #src2 ", %" #src ", %" #dst " \n\t"\
420 __FOP_RET(#op "_" #dst "_" #src "_" #src2)
422 /* 3-operand, word-only, src2=cl */
423 #define FASTOP3WCL(op) \
426 FOP3E(op##w, ax, dx, cl) \
427 FOP3E(op##l, eax, edx, cl) \
428 ON64(FOP3E(op##q, rax, rdx, cl)) \
431 /* Special case for SETcc - 1 instruction per cc */
432 #define FOP_SETCC(op) \
434 ".type " #op ", @function \n\t" \
460 "pushf; sbb %al, %al; popf \n\t"
465 * XXX: inoutclob user must know where the argument is being expanded.
466 * Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault.
468 #define asm_safe(insn, inoutclob...) \
472 asm volatile("1:" insn "\n" \
474 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %[_fault]) \
475 : [_fault] "+r"(_fault) inoutclob ); \
477 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
480 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
481 enum x86_intercept intercept,
482 enum x86_intercept_stage stage)
484 struct x86_instruction_info info = {
485 .intercept = intercept,
486 .rep_prefix = ctxt->rep_prefix,
487 .modrm_mod = ctxt->modrm_mod,
488 .modrm_reg = ctxt->modrm_reg,
489 .modrm_rm = ctxt->modrm_rm,
490 .src_val = ctxt->src.val64,
491 .dst_val = ctxt->dst.val64,
492 .src_bytes = ctxt->src.bytes,
493 .dst_bytes = ctxt->dst.bytes,
494 .ad_bytes = ctxt->ad_bytes,
495 .next_rip = ctxt->eip,
498 return ctxt->ops->intercept(ctxt, &info, stage);
501 static void assign_masked(ulong *dest, ulong src, ulong mask)
503 *dest = (*dest & ~mask) | (src & mask);
506 static void assign_register(unsigned long *reg, u64 val, int bytes)
508 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
511 *(u8 *)reg = (u8)val;
514 *(u16 *)reg = (u16)val;
518 break; /* 64b: zero-extend */
525 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
527 return (1UL << (ctxt->ad_bytes << 3)) - 1;
530 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
533 struct desc_struct ss;
535 if (ctxt->mode == X86EMUL_MODE_PROT64)
537 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
538 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
541 static int stack_size(struct x86_emulate_ctxt *ctxt)
543 return (__fls(stack_mask(ctxt)) + 1) >> 3;
546 /* Access/update address held in a register, based on addressing mode. */
547 static inline unsigned long
548 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
550 if (ctxt->ad_bytes == sizeof(unsigned long))
553 return reg & ad_mask(ctxt);
556 static inline unsigned long
557 register_address(struct x86_emulate_ctxt *ctxt, int reg)
559 return address_mask(ctxt, reg_read(ctxt, reg));
562 static void masked_increment(ulong *reg, ulong mask, int inc)
564 assign_masked(reg, *reg + inc, mask);
568 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
570 ulong *preg = reg_rmw(ctxt, reg);
572 assign_register(preg, *preg + inc, ctxt->ad_bytes);
575 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
577 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
580 static u32 desc_limit_scaled(struct desc_struct *desc)
582 u32 limit = get_desc_limit(desc);
584 return desc->g ? (limit << 12) | 0xfff : limit;
587 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
589 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
592 return ctxt->ops->get_cached_segment_base(ctxt, seg);
595 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
596 u32 error, bool valid)
599 ctxt->exception.vector = vec;
600 ctxt->exception.error_code = error;
601 ctxt->exception.error_code_valid = valid;
602 return X86EMUL_PROPAGATE_FAULT;
605 static int emulate_db(struct x86_emulate_ctxt *ctxt)
607 return emulate_exception(ctxt, DB_VECTOR, 0, false);
610 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
612 return emulate_exception(ctxt, GP_VECTOR, err, true);
615 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
617 return emulate_exception(ctxt, SS_VECTOR, err, true);
620 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
622 return emulate_exception(ctxt, UD_VECTOR, 0, false);
625 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
627 return emulate_exception(ctxt, TS_VECTOR, err, true);
630 static int emulate_de(struct x86_emulate_ctxt *ctxt)
632 return emulate_exception(ctxt, DE_VECTOR, 0, false);
635 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
637 return emulate_exception(ctxt, NM_VECTOR, 0, false);
640 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
643 struct desc_struct desc;
645 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
649 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
654 struct desc_struct desc;
656 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
657 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
660 static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
662 return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
665 static inline bool emul_is_noncanonical_address(u64 la,
666 struct x86_emulate_ctxt *ctxt)
668 return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la;
672 * x86 defines three classes of vector instructions: explicitly
673 * aligned, explicitly unaligned, and the rest, which change behaviour
674 * depending on whether they're AVX encoded or not.
676 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
677 * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
678 * 512 bytes of data must be aligned to a 16 byte boundary.
680 static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
682 u64 alignment = ctxt->d & AlignMask;
684 if (likely(size < 16))
699 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
700 struct segmented_address addr,
701 unsigned *max_size, unsigned size,
702 bool write, bool fetch,
703 enum x86emul_mode mode, ulong *linear)
705 struct desc_struct desc;
712 la = seg_base(ctxt, addr.seg) + addr.ea;
715 case X86EMUL_MODE_PROT64:
717 va_bits = ctxt_virt_addr_bits(ctxt);
718 if (get_canonical(la, va_bits) != la)
721 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
722 if (size > *max_size)
726 *linear = la = (u32)la;
727 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
731 /* code segment in protected mode or read-only data segment */
732 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
733 || !(desc.type & 2)) && write)
735 /* unreadable code segment */
736 if (!fetch && (desc.type & 8) && !(desc.type & 2))
738 lim = desc_limit_scaled(&desc);
739 if (!(desc.type & 8) && (desc.type & 4)) {
740 /* expand-down segment */
743 lim = desc.d ? 0xffffffff : 0xffff;
747 if (lim == 0xffffffff)
750 *max_size = (u64)lim + 1 - addr.ea;
751 if (size > *max_size)
756 if (la & (insn_alignment(ctxt, size) - 1))
757 return emulate_gp(ctxt, 0);
758 return X86EMUL_CONTINUE;
760 if (addr.seg == VCPU_SREG_SS)
761 return emulate_ss(ctxt, 0);
763 return emulate_gp(ctxt, 0);
766 static int linearize(struct x86_emulate_ctxt *ctxt,
767 struct segmented_address addr,
768 unsigned size, bool write,
772 return __linearize(ctxt, addr, &max_size, size, write, false,
776 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
777 enum x86emul_mode mode)
782 struct segmented_address addr = { .seg = VCPU_SREG_CS,
785 if (ctxt->op_bytes != sizeof(unsigned long))
786 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
787 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
788 if (rc == X86EMUL_CONTINUE)
789 ctxt->_eip = addr.ea;
793 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
795 return assign_eip(ctxt, dst, ctxt->mode);
798 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
799 const struct desc_struct *cs_desc)
801 enum x86emul_mode mode = ctxt->mode;
805 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
809 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
811 mode = X86EMUL_MODE_PROT64;
813 mode = X86EMUL_MODE_PROT32; /* temporary value */
816 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
817 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
818 rc = assign_eip(ctxt, dst, mode);
819 if (rc == X86EMUL_CONTINUE)
824 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
826 return assign_eip_near(ctxt, ctxt->_eip + rel);
829 static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
830 void *data, unsigned size)
832 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
835 static int linear_write_system(struct x86_emulate_ctxt *ctxt,
836 ulong linear, void *data,
839 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
842 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
843 struct segmented_address addr,
850 rc = linearize(ctxt, addr, size, false, &linear);
851 if (rc != X86EMUL_CONTINUE)
853 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
856 static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
857 struct segmented_address addr,
864 rc = linearize(ctxt, addr, size, true, &linear);
865 if (rc != X86EMUL_CONTINUE)
867 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
871 * Prefetch the remaining bytes of the instruction without crossing page
872 * boundary if they are not in fetch_cache yet.
874 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
877 unsigned size, max_size;
878 unsigned long linear;
879 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
880 struct segmented_address addr = { .seg = VCPU_SREG_CS,
881 .ea = ctxt->eip + cur_size };
884 * We do not know exactly how many bytes will be needed, and
885 * __linearize is expensive, so fetch as much as possible. We
886 * just have to avoid going beyond the 15 byte limit, the end
887 * of the segment, or the end of the page.
889 * __linearize is called with size 0 so that it does not do any
890 * boundary check itself. Instead, we use max_size to check
893 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
895 if (unlikely(rc != X86EMUL_CONTINUE))
898 size = min_t(unsigned, 15UL ^ cur_size, max_size);
899 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
902 * One instruction can only straddle two pages,
903 * and one has been loaded at the beginning of
904 * x86_decode_insn. So, if not enough bytes
905 * still, we must have hit the 15-byte boundary.
907 if (unlikely(size < op_size))
908 return emulate_gp(ctxt, 0);
910 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
911 size, &ctxt->exception);
912 if (unlikely(rc != X86EMUL_CONTINUE))
914 ctxt->fetch.end += size;
915 return X86EMUL_CONTINUE;
918 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
921 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
923 if (unlikely(done_size < size))
924 return __do_insn_fetch_bytes(ctxt, size - done_size);
926 return X86EMUL_CONTINUE;
929 /* Fetch next part of the instruction being emulated. */
930 #define insn_fetch(_type, _ctxt) \
933 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
934 if (rc != X86EMUL_CONTINUE) \
936 ctxt->_eip += sizeof(_type); \
937 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
938 ctxt->fetch.ptr += sizeof(_type); \
942 #define insn_fetch_arr(_arr, _size, _ctxt) \
944 rc = do_insn_fetch_bytes(_ctxt, _size); \
945 if (rc != X86EMUL_CONTINUE) \
947 ctxt->_eip += (_size); \
948 memcpy(_arr, ctxt->fetch.ptr, _size); \
949 ctxt->fetch.ptr += (_size); \
953 * Given the 'reg' portion of a ModRM byte, and a register block, return a
954 * pointer into the block that addresses the relevant register.
955 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
957 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
961 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
963 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
964 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
966 p = reg_rmw(ctxt, modrm_reg);
970 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
971 struct segmented_address addr,
972 u16 *size, unsigned long *address, int op_bytes)
979 rc = segmented_read_std(ctxt, addr, size, 2);
980 if (rc != X86EMUL_CONTINUE)
983 rc = segmented_read_std(ctxt, addr, address, op_bytes);
997 FASTOP1SRC2(mul, mul_ex);
998 FASTOP1SRC2(imul, imul_ex);
999 FASTOP1SRC2EX(div, div_ex);
1000 FASTOP1SRC2EX(idiv, idiv_ex);
1029 FASTOP2R(cmp, cmp_r);
1031 static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1033 /* If src is zero, do not writeback, but update flags */
1034 if (ctxt->src.val == 0)
1035 ctxt->dst.type = OP_NONE;
1036 return fastop(ctxt, em_bsf);
1039 static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1041 /* If src is zero, do not writeback, but update flags */
1042 if (ctxt->src.val == 0)
1043 ctxt->dst.type = OP_NONE;
1044 return fastop(ctxt, em_bsr);
1047 static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1050 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1052 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1053 asm("push %[flags]; popf; " CALL_NOSPEC
1054 : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1058 static void fetch_register_operand(struct operand *op)
1060 switch (op->bytes) {
1062 op->val = *(u8 *)op->addr.reg;
1065 op->val = *(u16 *)op->addr.reg;
1068 op->val = *(u32 *)op->addr.reg;
1071 op->val = *(u64 *)op->addr.reg;
1076 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1078 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1079 return emulate_nm(ctxt);
1082 asm volatile("fninit");
1084 return X86EMUL_CONTINUE;
1087 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1091 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1092 return emulate_nm(ctxt);
1095 asm volatile("fnstcw %0": "+m"(fcw));
1098 ctxt->dst.val = fcw;
1100 return X86EMUL_CONTINUE;
1103 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1107 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1108 return emulate_nm(ctxt);
1111 asm volatile("fnstsw %0": "+m"(fsw));
1114 ctxt->dst.val = fsw;
1116 return X86EMUL_CONTINUE;
1119 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1122 unsigned reg = ctxt->modrm_reg;
1124 if (!(ctxt->d & ModRM))
1125 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1127 if (ctxt->d & Sse) {
1131 kvm_read_sse_reg(reg, &op->vec_val);
1134 if (ctxt->d & Mmx) {
1143 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1144 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1146 fetch_register_operand(op);
1147 op->orig_val = op->val;
1150 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1152 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1153 ctxt->modrm_seg = VCPU_SREG_SS;
1156 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1160 int index_reg, base_reg, scale;
1161 int rc = X86EMUL_CONTINUE;
1164 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1165 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1166 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1168 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1169 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1170 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1171 ctxt->modrm_seg = VCPU_SREG_DS;
1173 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1175 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1176 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1178 if (ctxt->d & Sse) {
1181 op->addr.xmm = ctxt->modrm_rm;
1182 kvm_read_sse_reg(ctxt->modrm_rm, &op->vec_val);
1185 if (ctxt->d & Mmx) {
1188 op->addr.mm = ctxt->modrm_rm & 7;
1191 fetch_register_operand(op);
1197 if (ctxt->ad_bytes == 2) {
1198 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1199 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1200 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1201 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1203 /* 16-bit ModR/M decode. */
1204 switch (ctxt->modrm_mod) {
1206 if (ctxt->modrm_rm == 6)
1207 modrm_ea += insn_fetch(u16, ctxt);
1210 modrm_ea += insn_fetch(s8, ctxt);
1213 modrm_ea += insn_fetch(u16, ctxt);
1216 switch (ctxt->modrm_rm) {
1218 modrm_ea += bx + si;
1221 modrm_ea += bx + di;
1224 modrm_ea += bp + si;
1227 modrm_ea += bp + di;
1236 if (ctxt->modrm_mod != 0)
1243 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1244 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1245 ctxt->modrm_seg = VCPU_SREG_SS;
1246 modrm_ea = (u16)modrm_ea;
1248 /* 32/64-bit ModR/M decode. */
1249 if ((ctxt->modrm_rm & 7) == 4) {
1250 sib = insn_fetch(u8, ctxt);
1251 index_reg |= (sib >> 3) & 7;
1252 base_reg |= sib & 7;
1255 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1256 modrm_ea += insn_fetch(s32, ctxt);
1258 modrm_ea += reg_read(ctxt, base_reg);
1259 adjust_modrm_seg(ctxt, base_reg);
1260 /* Increment ESP on POP [ESP] */
1261 if ((ctxt->d & IncSP) &&
1262 base_reg == VCPU_REGS_RSP)
1263 modrm_ea += ctxt->op_bytes;
1266 modrm_ea += reg_read(ctxt, index_reg) << scale;
1267 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1268 modrm_ea += insn_fetch(s32, ctxt);
1269 if (ctxt->mode == X86EMUL_MODE_PROT64)
1270 ctxt->rip_relative = 1;
1272 base_reg = ctxt->modrm_rm;
1273 modrm_ea += reg_read(ctxt, base_reg);
1274 adjust_modrm_seg(ctxt, base_reg);
1276 switch (ctxt->modrm_mod) {
1278 modrm_ea += insn_fetch(s8, ctxt);
1281 modrm_ea += insn_fetch(s32, ctxt);
1285 op->addr.mem.ea = modrm_ea;
1286 if (ctxt->ad_bytes != 8)
1287 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1293 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1296 int rc = X86EMUL_CONTINUE;
1299 switch (ctxt->ad_bytes) {
1301 op->addr.mem.ea = insn_fetch(u16, ctxt);
1304 op->addr.mem.ea = insn_fetch(u32, ctxt);
1307 op->addr.mem.ea = insn_fetch(u64, ctxt);
1314 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1318 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1319 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1321 if (ctxt->src.bytes == 2)
1322 sv = (s16)ctxt->src.val & (s16)mask;
1323 else if (ctxt->src.bytes == 4)
1324 sv = (s32)ctxt->src.val & (s32)mask;
1326 sv = (s64)ctxt->src.val & (s64)mask;
1328 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1329 ctxt->dst.addr.mem.ea + (sv >> 3));
1332 /* only subword offset */
1333 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1336 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1337 unsigned long addr, void *dest, unsigned size)
1340 struct read_cache *mc = &ctxt->mem_read;
1342 if (mc->pos < mc->end)
1345 WARN_ON((mc->end + size) >= sizeof(mc->data));
1347 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1349 if (rc != X86EMUL_CONTINUE)
1355 memcpy(dest, mc->data + mc->pos, size);
1357 return X86EMUL_CONTINUE;
1360 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1361 struct segmented_address addr,
1368 rc = linearize(ctxt, addr, size, false, &linear);
1369 if (rc != X86EMUL_CONTINUE)
1371 return read_emulated(ctxt, linear, data, size);
1374 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1375 struct segmented_address addr,
1382 rc = linearize(ctxt, addr, size, true, &linear);
1383 if (rc != X86EMUL_CONTINUE)
1385 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1389 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1390 struct segmented_address addr,
1391 const void *orig_data, const void *data,
1397 rc = linearize(ctxt, addr, size, true, &linear);
1398 if (rc != X86EMUL_CONTINUE)
1400 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1401 size, &ctxt->exception);
1404 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1405 unsigned int size, unsigned short port,
1408 struct read_cache *rc = &ctxt->io_read;
1410 if (rc->pos == rc->end) { /* refill pio read ahead */
1411 unsigned int in_page, n;
1412 unsigned int count = ctxt->rep_prefix ?
1413 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1414 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1415 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1416 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1417 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1420 rc->pos = rc->end = 0;
1421 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1426 if (ctxt->rep_prefix && (ctxt->d & String) &&
1427 !(ctxt->eflags & X86_EFLAGS_DF)) {
1428 ctxt->dst.data = rc->data + rc->pos;
1429 ctxt->dst.type = OP_MEM_STR;
1430 ctxt->dst.count = (rc->end - rc->pos) / size;
1433 memcpy(dest, rc->data + rc->pos, size);
1439 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1440 u16 index, struct desc_struct *desc)
1445 ctxt->ops->get_idt(ctxt, &dt);
1447 if (dt.size < index * 8 + 7)
1448 return emulate_gp(ctxt, index << 3 | 0x2);
1450 addr = dt.address + index * 8;
1451 return linear_read_system(ctxt, addr, desc, sizeof(*desc));
1454 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1455 u16 selector, struct desc_ptr *dt)
1457 const struct x86_emulate_ops *ops = ctxt->ops;
1460 if (selector & 1 << 2) {
1461 struct desc_struct desc;
1464 memset(dt, 0, sizeof(*dt));
1465 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1469 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1470 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1472 ops->get_gdt(ctxt, dt);
1475 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1476 u16 selector, ulong *desc_addr_p)
1479 u16 index = selector >> 3;
1482 get_descriptor_table_ptr(ctxt, selector, &dt);
1484 if (dt.size < index * 8 + 7)
1485 return emulate_gp(ctxt, selector & 0xfffc);
1487 addr = dt.address + index * 8;
1489 #ifdef CONFIG_X86_64
1490 if (addr >> 32 != 0) {
1493 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1494 if (!(efer & EFER_LMA))
1499 *desc_addr_p = addr;
1500 return X86EMUL_CONTINUE;
1503 /* allowed just for 8 bytes segments */
1504 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1505 u16 selector, struct desc_struct *desc,
1510 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1511 if (rc != X86EMUL_CONTINUE)
1514 return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1517 /* allowed just for 8 bytes segments */
1518 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1519 u16 selector, struct desc_struct *desc)
1524 rc = get_descriptor_ptr(ctxt, selector, &addr);
1525 if (rc != X86EMUL_CONTINUE)
1528 return linear_write_system(ctxt, addr, desc, sizeof(*desc));
1531 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1532 u16 selector, int seg, u8 cpl,
1533 enum x86_transfer_type transfer,
1534 struct desc_struct *desc)
1536 struct desc_struct seg_desc, old_desc;
1538 unsigned err_vec = GP_VECTOR;
1540 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1546 memset(&seg_desc, 0, sizeof(seg_desc));
1548 if (ctxt->mode == X86EMUL_MODE_REAL) {
1549 /* set real mode segment descriptor (keep limit etc. for
1551 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1552 set_desc_base(&seg_desc, selector << 4);
1554 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1555 /* VM86 needs a clean new segment descriptor */
1556 set_desc_base(&seg_desc, selector << 4);
1557 set_desc_limit(&seg_desc, 0xffff);
1567 /* TR should be in GDT only */
1568 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1571 /* NULL selector is not valid for TR, CS and (except for long mode) SS */
1572 if (null_selector) {
1573 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1576 if (seg == VCPU_SREG_SS) {
1577 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1581 * ctxt->ops->set_segment expects the CPL to be in
1582 * SS.DPL, so fake an expand-up 32-bit data segment.
1592 /* Skip all following checks */
1596 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1597 if (ret != X86EMUL_CONTINUE)
1600 err_code = selector & 0xfffc;
1601 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1604 /* can't load system descriptor into segment selector */
1605 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1606 if (transfer == X86_TRANSFER_CALL_JMP)
1607 return X86EMUL_UNHANDLEABLE;
1612 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1621 * segment is not a writable data segment or segment
1622 * selector's RPL != CPL or segment selector's RPL != CPL
1624 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1628 if (!(seg_desc.type & 8))
1631 if (seg_desc.type & 4) {
1637 if (rpl > cpl || dpl != cpl)
1640 /* in long-mode d/b must be clear if l is set */
1641 if (seg_desc.d && seg_desc.l) {
1644 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1645 if (efer & EFER_LMA)
1649 /* CS(RPL) <- CPL */
1650 selector = (selector & 0xfffc) | cpl;
1653 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1655 old_desc = seg_desc;
1656 seg_desc.type |= 2; /* busy */
1657 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1658 sizeof(seg_desc), &ctxt->exception);
1659 if (ret != X86EMUL_CONTINUE)
1662 case VCPU_SREG_LDTR:
1663 if (seg_desc.s || seg_desc.type != 2)
1666 default: /* DS, ES, FS, or GS */
1668 * segment is not a data or readable code segment or
1669 * ((segment is a data or nonconforming code segment)
1670 * and (both RPL and CPL > DPL))
1672 if ((seg_desc.type & 0xa) == 0x8 ||
1673 (((seg_desc.type & 0xc) != 0xc) &&
1674 (rpl > dpl && cpl > dpl)))
1680 /* mark segment as accessed */
1681 if (!(seg_desc.type & 1)) {
1683 ret = write_segment_descriptor(ctxt, selector,
1685 if (ret != X86EMUL_CONTINUE)
1688 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1689 ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1690 if (ret != X86EMUL_CONTINUE)
1692 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1693 ((u64)base3 << 32), ctxt))
1694 return emulate_gp(ctxt, 0);
1697 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1700 return X86EMUL_CONTINUE;
1702 return emulate_exception(ctxt, err_vec, err_code, true);
1705 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1706 u16 selector, int seg)
1708 u8 cpl = ctxt->ops->cpl(ctxt);
1711 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1712 * they can load it at CPL<3 (Intel's manual says only LSS can,
1715 * However, the Intel manual says that putting IST=1/DPL=3 in
1716 * an interrupt gate will result in SS=3 (the AMD manual instead
1717 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1718 * and only forbid it here.
1720 if (seg == VCPU_SREG_SS && selector == 3 &&
1721 ctxt->mode == X86EMUL_MODE_PROT64)
1722 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1724 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1725 X86_TRANSFER_NONE, NULL);
1728 static void write_register_operand(struct operand *op)
1730 return assign_register(op->addr.reg, op->val, op->bytes);
1733 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1737 write_register_operand(op);
1740 if (ctxt->lock_prefix)
1741 return segmented_cmpxchg(ctxt,
1747 return segmented_write(ctxt,
1753 return segmented_write(ctxt,
1756 op->bytes * op->count);
1759 kvm_write_sse_reg(op->addr.xmm, &op->vec_val);
1762 kvm_write_mmx_reg(op->addr.mm, &op->mm_val);
1770 return X86EMUL_CONTINUE;
1773 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1775 struct segmented_address addr;
1777 rsp_increment(ctxt, -bytes);
1778 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1779 addr.seg = VCPU_SREG_SS;
1781 return segmented_write(ctxt, addr, data, bytes);
1784 static int em_push(struct x86_emulate_ctxt *ctxt)
1786 /* Disable writeback. */
1787 ctxt->dst.type = OP_NONE;
1788 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1791 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1792 void *dest, int len)
1795 struct segmented_address addr;
1797 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1798 addr.seg = VCPU_SREG_SS;
1799 rc = segmented_read(ctxt, addr, dest, len);
1800 if (rc != X86EMUL_CONTINUE)
1803 rsp_increment(ctxt, len);
1807 static int em_pop(struct x86_emulate_ctxt *ctxt)
1809 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1812 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1813 void *dest, int len)
1816 unsigned long val, change_mask;
1817 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1818 int cpl = ctxt->ops->cpl(ctxt);
1820 rc = emulate_pop(ctxt, &val, len);
1821 if (rc != X86EMUL_CONTINUE)
1824 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1825 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1826 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1827 X86_EFLAGS_AC | X86_EFLAGS_ID;
1829 switch(ctxt->mode) {
1830 case X86EMUL_MODE_PROT64:
1831 case X86EMUL_MODE_PROT32:
1832 case X86EMUL_MODE_PROT16:
1834 change_mask |= X86_EFLAGS_IOPL;
1836 change_mask |= X86_EFLAGS_IF;
1838 case X86EMUL_MODE_VM86:
1840 return emulate_gp(ctxt, 0);
1841 change_mask |= X86_EFLAGS_IF;
1843 default: /* real mode */
1844 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1848 *(unsigned long *)dest =
1849 (ctxt->eflags & ~change_mask) | (val & change_mask);
1854 static int em_popf(struct x86_emulate_ctxt *ctxt)
1856 ctxt->dst.type = OP_REG;
1857 ctxt->dst.addr.reg = &ctxt->eflags;
1858 ctxt->dst.bytes = ctxt->op_bytes;
1859 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1862 static int em_enter(struct x86_emulate_ctxt *ctxt)
1865 unsigned frame_size = ctxt->src.val;
1866 unsigned nesting_level = ctxt->src2.val & 31;
1870 return X86EMUL_UNHANDLEABLE;
1872 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1873 rc = push(ctxt, &rbp, stack_size(ctxt));
1874 if (rc != X86EMUL_CONTINUE)
1876 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1878 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1879 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1881 return X86EMUL_CONTINUE;
1884 static int em_leave(struct x86_emulate_ctxt *ctxt)
1886 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1888 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1891 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1893 int seg = ctxt->src2.val;
1895 ctxt->src.val = get_segment_selector(ctxt, seg);
1896 if (ctxt->op_bytes == 4) {
1897 rsp_increment(ctxt, -2);
1901 return em_push(ctxt);
1904 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1906 int seg = ctxt->src2.val;
1907 unsigned long selector;
1910 rc = emulate_pop(ctxt, &selector, 2);
1911 if (rc != X86EMUL_CONTINUE)
1914 if (ctxt->modrm_reg == VCPU_SREG_SS)
1915 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1916 if (ctxt->op_bytes > 2)
1917 rsp_increment(ctxt, ctxt->op_bytes - 2);
1919 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1923 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1925 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1926 int rc = X86EMUL_CONTINUE;
1927 int reg = VCPU_REGS_RAX;
1929 while (reg <= VCPU_REGS_RDI) {
1930 (reg == VCPU_REGS_RSP) ?
1931 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1934 if (rc != X86EMUL_CONTINUE)
1943 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1945 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
1946 return em_push(ctxt);
1949 static int em_popa(struct x86_emulate_ctxt *ctxt)
1951 int rc = X86EMUL_CONTINUE;
1952 int reg = VCPU_REGS_RDI;
1955 while (reg >= VCPU_REGS_RAX) {
1956 if (reg == VCPU_REGS_RSP) {
1957 rsp_increment(ctxt, ctxt->op_bytes);
1961 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
1962 if (rc != X86EMUL_CONTINUE)
1964 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
1970 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1972 const struct x86_emulate_ops *ops = ctxt->ops;
1979 /* TODO: Add limit checks */
1980 ctxt->src.val = ctxt->eflags;
1982 if (rc != X86EMUL_CONTINUE)
1985 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
1987 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1989 if (rc != X86EMUL_CONTINUE)
1992 ctxt->src.val = ctxt->_eip;
1994 if (rc != X86EMUL_CONTINUE)
1997 ops->get_idt(ctxt, &dt);
1999 eip_addr = dt.address + (irq << 2);
2000 cs_addr = dt.address + (irq << 2) + 2;
2002 rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2003 if (rc != X86EMUL_CONTINUE)
2006 rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2007 if (rc != X86EMUL_CONTINUE)
2010 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2011 if (rc != X86EMUL_CONTINUE)
2019 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2023 invalidate_registers(ctxt);
2024 rc = __emulate_int_real(ctxt, irq);
2025 if (rc == X86EMUL_CONTINUE)
2026 writeback_registers(ctxt);
2030 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2032 switch(ctxt->mode) {
2033 case X86EMUL_MODE_REAL:
2034 return __emulate_int_real(ctxt, irq);
2035 case X86EMUL_MODE_VM86:
2036 case X86EMUL_MODE_PROT16:
2037 case X86EMUL_MODE_PROT32:
2038 case X86EMUL_MODE_PROT64:
2040 /* Protected mode interrupts unimplemented yet */
2041 return X86EMUL_UNHANDLEABLE;
2045 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2047 int rc = X86EMUL_CONTINUE;
2048 unsigned long temp_eip = 0;
2049 unsigned long temp_eflags = 0;
2050 unsigned long cs = 0;
2051 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2052 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2053 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2054 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2055 X86_EFLAGS_AC | X86_EFLAGS_ID |
2057 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2060 /* TODO: Add stack limit check */
2062 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2064 if (rc != X86EMUL_CONTINUE)
2067 if (temp_eip & ~0xffff)
2068 return emulate_gp(ctxt, 0);
2070 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2072 if (rc != X86EMUL_CONTINUE)
2075 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2077 if (rc != X86EMUL_CONTINUE)
2080 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2082 if (rc != X86EMUL_CONTINUE)
2085 ctxt->_eip = temp_eip;
2087 if (ctxt->op_bytes == 4)
2088 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2089 else if (ctxt->op_bytes == 2) {
2090 ctxt->eflags &= ~0xffff;
2091 ctxt->eflags |= temp_eflags;
2094 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2095 ctxt->eflags |= X86_EFLAGS_FIXED;
2096 ctxt->ops->set_nmi_mask(ctxt, false);
2101 static int em_iret(struct x86_emulate_ctxt *ctxt)
2103 switch(ctxt->mode) {
2104 case X86EMUL_MODE_REAL:
2105 return emulate_iret_real(ctxt);
2106 case X86EMUL_MODE_VM86:
2107 case X86EMUL_MODE_PROT16:
2108 case X86EMUL_MODE_PROT32:
2109 case X86EMUL_MODE_PROT64:
2111 /* iret from protected mode unimplemented yet */
2112 return X86EMUL_UNHANDLEABLE;
2116 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2120 struct desc_struct new_desc;
2121 u8 cpl = ctxt->ops->cpl(ctxt);
2123 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2125 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2126 X86_TRANSFER_CALL_JMP,
2128 if (rc != X86EMUL_CONTINUE)
2131 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2132 /* Error handling is not implemented. */
2133 if (rc != X86EMUL_CONTINUE)
2134 return X86EMUL_UNHANDLEABLE;
2139 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2141 return assign_eip_near(ctxt, ctxt->src.val);
2144 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2149 old_eip = ctxt->_eip;
2150 rc = assign_eip_near(ctxt, ctxt->src.val);
2151 if (rc != X86EMUL_CONTINUE)
2153 ctxt->src.val = old_eip;
2158 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2160 u64 old = ctxt->dst.orig_val64;
2162 if (ctxt->dst.bytes == 16)
2163 return X86EMUL_UNHANDLEABLE;
2165 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2166 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2167 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2168 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2169 ctxt->eflags &= ~X86_EFLAGS_ZF;
2171 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2172 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2174 ctxt->eflags |= X86_EFLAGS_ZF;
2176 return X86EMUL_CONTINUE;
2179 static int em_ret(struct x86_emulate_ctxt *ctxt)
2184 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2185 if (rc != X86EMUL_CONTINUE)
2188 return assign_eip_near(ctxt, eip);
2191 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2194 unsigned long eip, cs;
2195 int cpl = ctxt->ops->cpl(ctxt);
2196 struct desc_struct new_desc;
2198 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2199 if (rc != X86EMUL_CONTINUE)
2201 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2202 if (rc != X86EMUL_CONTINUE)
2204 /* Outer-privilege level return is not implemented */
2205 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2206 return X86EMUL_UNHANDLEABLE;
2207 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2210 if (rc != X86EMUL_CONTINUE)
2212 rc = assign_eip_far(ctxt, eip, &new_desc);
2213 /* Error handling is not implemented. */
2214 if (rc != X86EMUL_CONTINUE)
2215 return X86EMUL_UNHANDLEABLE;
2220 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2224 rc = em_ret_far(ctxt);
2225 if (rc != X86EMUL_CONTINUE)
2227 rsp_increment(ctxt, ctxt->src.val);
2228 return X86EMUL_CONTINUE;
2231 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2233 /* Save real source value, then compare EAX against destination. */
2234 ctxt->dst.orig_val = ctxt->dst.val;
2235 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2236 ctxt->src.orig_val = ctxt->src.val;
2237 ctxt->src.val = ctxt->dst.orig_val;
2238 fastop(ctxt, em_cmp);
2240 if (ctxt->eflags & X86_EFLAGS_ZF) {
2241 /* Success: write back to memory; no update of EAX */
2242 ctxt->src.type = OP_NONE;
2243 ctxt->dst.val = ctxt->src.orig_val;
2245 /* Failure: write the value we saw to EAX. */
2246 ctxt->src.type = OP_REG;
2247 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2248 ctxt->src.val = ctxt->dst.orig_val;
2249 /* Create write-cycle to dest by writing the same value */
2250 ctxt->dst.val = ctxt->dst.orig_val;
2252 return X86EMUL_CONTINUE;
2255 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2257 int seg = ctxt->src2.val;
2261 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2263 rc = load_segment_descriptor(ctxt, sel, seg);
2264 if (rc != X86EMUL_CONTINUE)
2267 ctxt->dst.val = ctxt->src.val;
2271 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2273 #ifdef CONFIG_X86_64
2274 return ctxt->ops->guest_has_long_mode(ctxt);
2280 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2282 desc->g = (flags >> 23) & 1;
2283 desc->d = (flags >> 22) & 1;
2284 desc->l = (flags >> 21) & 1;
2285 desc->avl = (flags >> 20) & 1;
2286 desc->p = (flags >> 15) & 1;
2287 desc->dpl = (flags >> 13) & 3;
2288 desc->s = (flags >> 12) & 1;
2289 desc->type = (flags >> 8) & 15;
2292 static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
2295 struct desc_struct desc;
2299 selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
2302 offset = 0x7f84 + n * 12;
2304 offset = 0x7f2c + (n - 3) * 12;
2306 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2307 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2308 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
2309 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2310 return X86EMUL_CONTINUE;
2313 #ifdef CONFIG_X86_64
2314 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
2317 struct desc_struct desc;
2322 offset = 0x7e00 + n * 16;
2324 selector = GET_SMSTATE(u16, smstate, offset);
2325 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
2326 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2327 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2328 base3 = GET_SMSTATE(u32, smstate, offset + 12);
2330 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2331 return X86EMUL_CONTINUE;
2335 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2336 u64 cr0, u64 cr3, u64 cr4)
2341 /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */
2343 if (cr4 & X86_CR4_PCIDE) {
2348 bad = ctxt->ops->set_cr(ctxt, 3, cr3);
2350 return X86EMUL_UNHANDLEABLE;
2353 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2354 * Then enable protected mode. However, PCID cannot be enabled
2355 * if EFER.LMA=0, so set it separately.
2357 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2359 return X86EMUL_UNHANDLEABLE;
2361 bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2363 return X86EMUL_UNHANDLEABLE;
2365 if (cr4 & X86_CR4_PCIDE) {
2366 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2368 return X86EMUL_UNHANDLEABLE;
2370 bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
2372 return X86EMUL_UNHANDLEABLE;
2377 return X86EMUL_CONTINUE;
2380 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
2381 const char *smstate)
2383 struct desc_struct desc;
2386 u32 val, cr0, cr3, cr4;
2389 cr0 = GET_SMSTATE(u32, smstate, 0x7ffc);
2390 cr3 = GET_SMSTATE(u32, smstate, 0x7ff8);
2391 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
2392 ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0);
2394 for (i = 0; i < 8; i++)
2395 *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
2397 val = GET_SMSTATE(u32, smstate, 0x7fcc);
2399 if (ctxt->ops->set_dr(ctxt, 6, val))
2400 return X86EMUL_UNHANDLEABLE;
2402 val = GET_SMSTATE(u32, smstate, 0x7fc8);
2404 if (ctxt->ops->set_dr(ctxt, 7, val))
2405 return X86EMUL_UNHANDLEABLE;
2407 selector = GET_SMSTATE(u32, smstate, 0x7fc4);
2408 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
2409 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60));
2410 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c));
2411 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2413 selector = GET_SMSTATE(u32, smstate, 0x7fc0);
2414 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80));
2415 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c));
2416 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78));
2417 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2419 dt.address = GET_SMSTATE(u32, smstate, 0x7f74);
2420 dt.size = GET_SMSTATE(u32, smstate, 0x7f70);
2421 ctxt->ops->set_gdt(ctxt, &dt);
2423 dt.address = GET_SMSTATE(u32, smstate, 0x7f58);
2424 dt.size = GET_SMSTATE(u32, smstate, 0x7f54);
2425 ctxt->ops->set_idt(ctxt, &dt);
2427 for (i = 0; i < 6; i++) {
2428 int r = rsm_load_seg_32(ctxt, smstate, i);
2429 if (r != X86EMUL_CONTINUE)
2433 cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
2435 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
2437 return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2440 #ifdef CONFIG_X86_64
2441 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
2442 const char *smstate)
2444 struct desc_struct desc;
2446 u64 val, cr0, cr3, cr4;
2451 for (i = 0; i < 16; i++)
2452 *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
2454 ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78);
2455 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
2457 val = GET_SMSTATE(u64, smstate, 0x7f68);
2459 if (ctxt->ops->set_dr(ctxt, 6, val))
2460 return X86EMUL_UNHANDLEABLE;
2462 val = GET_SMSTATE(u64, smstate, 0x7f60);
2464 if (ctxt->ops->set_dr(ctxt, 7, val))
2465 return X86EMUL_UNHANDLEABLE;
2467 cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
2468 cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
2469 cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
2470 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
2471 val = GET_SMSTATE(u64, smstate, 0x7ed0);
2473 if (ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA))
2474 return X86EMUL_UNHANDLEABLE;
2476 selector = GET_SMSTATE(u32, smstate, 0x7e90);
2477 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
2478 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e94));
2479 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e98));
2480 base3 = GET_SMSTATE(u32, smstate, 0x7e9c);
2481 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2483 dt.size = GET_SMSTATE(u32, smstate, 0x7e84);
2484 dt.address = GET_SMSTATE(u64, smstate, 0x7e88);
2485 ctxt->ops->set_idt(ctxt, &dt);
2487 selector = GET_SMSTATE(u32, smstate, 0x7e70);
2488 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8);
2489 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e74));
2490 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e78));
2491 base3 = GET_SMSTATE(u32, smstate, 0x7e7c);
2492 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2494 dt.size = GET_SMSTATE(u32, smstate, 0x7e64);
2495 dt.address = GET_SMSTATE(u64, smstate, 0x7e68);
2496 ctxt->ops->set_gdt(ctxt, &dt);
2498 r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2499 if (r != X86EMUL_CONTINUE)
2502 for (i = 0; i < 6; i++) {
2503 r = rsm_load_seg_64(ctxt, smstate, i);
2504 if (r != X86EMUL_CONTINUE)
2508 return X86EMUL_CONTINUE;
2512 static int em_rsm(struct x86_emulate_ctxt *ctxt)
2514 unsigned long cr0, cr4, efer;
2519 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2520 return emulate_ud(ctxt);
2522 smbase = ctxt->ops->get_smbase(ctxt);
2524 ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
2525 if (ret != X86EMUL_CONTINUE)
2526 return X86EMUL_UNHANDLEABLE;
2528 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2529 ctxt->ops->set_nmi_mask(ctxt, false);
2531 ctxt->ops->exiting_smm(ctxt);
2534 * Get back to real mode, to prepare a safe state in which to load
2535 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
2536 * supports long mode.
2538 if (emulator_has_longmode(ctxt)) {
2539 struct desc_struct cs_desc;
2541 /* Zero CR4.PCIDE before CR0.PG. */
2542 cr4 = ctxt->ops->get_cr(ctxt, 4);
2543 if (cr4 & X86_CR4_PCIDE)
2544 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2546 /* A 32-bit code segment is required to clear EFER.LMA. */
2547 memset(&cs_desc, 0, sizeof(cs_desc));
2549 cs_desc.s = cs_desc.g = cs_desc.p = 1;
2550 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2553 /* For the 64-bit case, this will clear EFER.LMA. */
2554 cr0 = ctxt->ops->get_cr(ctxt, 0);
2555 if (cr0 & X86_CR0_PE)
2556 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2558 if (emulator_has_longmode(ctxt)) {
2559 /* Clear CR4.PAE before clearing EFER.LME. */
2560 cr4 = ctxt->ops->get_cr(ctxt, 4);
2561 if (cr4 & X86_CR4_PAE)
2562 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2564 /* And finally go back to 32-bit mode. */
2566 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2570 * Give leave_smm() a chance to make ISA-specific changes to the vCPU
2571 * state (e.g. enter guest mode) before loading state from the SMM
2574 if (ctxt->ops->leave_smm(ctxt, buf))
2575 goto emulate_shutdown;
2577 #ifdef CONFIG_X86_64
2578 if (emulator_has_longmode(ctxt))
2579 ret = rsm_load_state_64(ctxt, buf);
2582 ret = rsm_load_state_32(ctxt, buf);
2584 if (ret != X86EMUL_CONTINUE)
2585 goto emulate_shutdown;
2588 * Note, the ctxt->ops callbacks are responsible for handling side
2589 * effects when writing MSRs and CRs, e.g. MMU context resets, CPUID
2590 * runtime updates, etc... If that changes, e.g. this flow is moved
2591 * out of the emulator to make it look more like enter_smm(), then
2592 * those side effects need to be explicitly handled for both success
2595 return X86EMUL_CONTINUE;
2598 ctxt->ops->triple_fault(ctxt);
2599 return X86EMUL_CONTINUE;
2603 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2604 struct desc_struct *cs, struct desc_struct *ss)
2606 cs->l = 0; /* will be adjusted later */
2607 set_desc_base(cs, 0); /* flat segment */
2608 cs->g = 1; /* 4kb granularity */
2609 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2610 cs->type = 0x0b; /* Read, Execute, Accessed */
2612 cs->dpl = 0; /* will be adjusted later */
2617 set_desc_base(ss, 0); /* flat segment */
2618 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2619 ss->g = 1; /* 4kb granularity */
2621 ss->type = 0x03; /* Read/Write, Accessed */
2622 ss->d = 1; /* 32bit stack segment */
2629 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2631 u32 eax, ebx, ecx, edx;
2634 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2635 return is_guest_vendor_intel(ebx, ecx, edx);
2638 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2640 const struct x86_emulate_ops *ops = ctxt->ops;
2641 u32 eax, ebx, ecx, edx;
2644 * syscall should always be enabled in longmode - so only become
2645 * vendor specific (cpuid) if other modes are active...
2647 if (ctxt->mode == X86EMUL_MODE_PROT64)
2652 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2654 * remark: Intel CPUs only support "syscall" in 64bit longmode. Also a
2655 * 64bit guest with a 32bit compat-app running will #UD !! While this
2656 * behaviour can be fixed (by emulating) into AMD response - CPUs of
2657 * AMD can't behave like Intel.
2659 if (is_guest_vendor_intel(ebx, ecx, edx))
2662 if (is_guest_vendor_amd(ebx, ecx, edx) ||
2663 is_guest_vendor_hygon(ebx, ecx, edx))
2667 * default: (not Intel, not AMD, not Hygon), apply Intel's
2673 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2675 const struct x86_emulate_ops *ops = ctxt->ops;
2676 struct desc_struct cs, ss;
2681 /* syscall is not available in real mode */
2682 if (ctxt->mode == X86EMUL_MODE_REAL ||
2683 ctxt->mode == X86EMUL_MODE_VM86)
2684 return emulate_ud(ctxt);
2686 if (!(em_syscall_is_enabled(ctxt)))
2687 return emulate_ud(ctxt);
2689 ops->get_msr(ctxt, MSR_EFER, &efer);
2690 if (!(efer & EFER_SCE))
2691 return emulate_ud(ctxt);
2693 setup_syscalls_segments(ctxt, &cs, &ss);
2694 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2696 cs_sel = (u16)(msr_data & 0xfffc);
2697 ss_sel = (u16)(msr_data + 8);
2699 if (efer & EFER_LMA) {
2703 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2704 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2706 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2707 if (efer & EFER_LMA) {
2708 #ifdef CONFIG_X86_64
2709 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2712 ctxt->mode == X86EMUL_MODE_PROT64 ?
2713 MSR_LSTAR : MSR_CSTAR, &msr_data);
2714 ctxt->_eip = msr_data;
2716 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2717 ctxt->eflags &= ~msr_data;
2718 ctxt->eflags |= X86_EFLAGS_FIXED;
2722 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2723 ctxt->_eip = (u32)msr_data;
2725 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2728 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2729 return X86EMUL_CONTINUE;
2732 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2734 const struct x86_emulate_ops *ops = ctxt->ops;
2735 struct desc_struct cs, ss;
2740 ops->get_msr(ctxt, MSR_EFER, &efer);
2741 /* inject #GP if in real mode */
2742 if (ctxt->mode == X86EMUL_MODE_REAL)
2743 return emulate_gp(ctxt, 0);
2746 * Not recognized on AMD in compat mode (but is recognized in legacy
2749 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2750 && !vendor_intel(ctxt))
2751 return emulate_ud(ctxt);
2753 /* sysenter/sysexit have not been tested in 64bit mode. */
2754 if (ctxt->mode == X86EMUL_MODE_PROT64)
2755 return X86EMUL_UNHANDLEABLE;
2757 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2758 if ((msr_data & 0xfffc) == 0x0)
2759 return emulate_gp(ctxt, 0);
2761 setup_syscalls_segments(ctxt, &cs, &ss);
2762 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2763 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2764 ss_sel = cs_sel + 8;
2765 if (efer & EFER_LMA) {
2770 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2771 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2773 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2774 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2776 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2777 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2779 if (efer & EFER_LMA)
2780 ctxt->mode = X86EMUL_MODE_PROT64;
2782 return X86EMUL_CONTINUE;
2785 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2787 const struct x86_emulate_ops *ops = ctxt->ops;
2788 struct desc_struct cs, ss;
2789 u64 msr_data, rcx, rdx;
2791 u16 cs_sel = 0, ss_sel = 0;
2793 /* inject #GP if in real mode or Virtual 8086 mode */
2794 if (ctxt->mode == X86EMUL_MODE_REAL ||
2795 ctxt->mode == X86EMUL_MODE_VM86)
2796 return emulate_gp(ctxt, 0);
2798 setup_syscalls_segments(ctxt, &cs, &ss);
2800 if ((ctxt->rex_prefix & 0x8) != 0x0)
2801 usermode = X86EMUL_MODE_PROT64;
2803 usermode = X86EMUL_MODE_PROT32;
2805 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2806 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2810 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2812 case X86EMUL_MODE_PROT32:
2813 cs_sel = (u16)(msr_data + 16);
2814 if ((msr_data & 0xfffc) == 0x0)
2815 return emulate_gp(ctxt, 0);
2816 ss_sel = (u16)(msr_data + 24);
2820 case X86EMUL_MODE_PROT64:
2821 cs_sel = (u16)(msr_data + 32);
2822 if (msr_data == 0x0)
2823 return emulate_gp(ctxt, 0);
2824 ss_sel = cs_sel + 8;
2827 if (emul_is_noncanonical_address(rcx, ctxt) ||
2828 emul_is_noncanonical_address(rdx, ctxt))
2829 return emulate_gp(ctxt, 0);
2832 cs_sel |= SEGMENT_RPL_MASK;
2833 ss_sel |= SEGMENT_RPL_MASK;
2835 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2836 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2839 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2841 return X86EMUL_CONTINUE;
2844 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2847 if (ctxt->mode == X86EMUL_MODE_REAL)
2849 if (ctxt->mode == X86EMUL_MODE_VM86)
2851 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2852 return ctxt->ops->cpl(ctxt) > iopl;
2855 #define VMWARE_PORT_VMPORT (0x5658)
2856 #define VMWARE_PORT_VMRPC (0x5659)
2858 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2861 const struct x86_emulate_ops *ops = ctxt->ops;
2862 struct desc_struct tr_seg;
2865 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2866 unsigned mask = (1 << len) - 1;
2870 * VMware allows access to these ports even if denied
2871 * by TSS I/O permission bitmap. Mimic behavior.
2873 if (enable_vmware_backdoor &&
2874 ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2877 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2880 if (desc_limit_scaled(&tr_seg) < 103)
2882 base = get_desc_base(&tr_seg);
2883 #ifdef CONFIG_X86_64
2884 base |= ((u64)base3) << 32;
2886 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2887 if (r != X86EMUL_CONTINUE)
2889 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2891 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2892 if (r != X86EMUL_CONTINUE)
2894 if ((perm >> bit_idx) & mask)
2899 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2905 if (emulator_bad_iopl(ctxt))
2906 if (!emulator_io_port_access_allowed(ctxt, port, len))
2909 ctxt->perm_ok = true;
2914 static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2917 * Intel CPUs mask the counter and pointers in quite strange
2918 * manner when ECX is zero due to REP-string optimizations.
2920 #ifdef CONFIG_X86_64
2921 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2924 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
2927 case 0xa4: /* movsb */
2928 case 0xa5: /* movsd/w */
2929 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2931 case 0xaa: /* stosb */
2932 case 0xab: /* stosd/w */
2933 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2938 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2939 struct tss_segment_16 *tss)
2941 tss->ip = ctxt->_eip;
2942 tss->flag = ctxt->eflags;
2943 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2944 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2945 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2946 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2947 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2948 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2949 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2950 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2952 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2953 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2954 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2955 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2956 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2959 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2960 struct tss_segment_16 *tss)
2965 ctxt->_eip = tss->ip;
2966 ctxt->eflags = tss->flag | 2;
2967 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2968 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2969 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2970 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2971 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2972 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2973 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2974 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2977 * SDM says that segment selectors are loaded before segment
2980 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2981 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2982 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2983 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2984 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2989 * Now load segment descriptors. If fault happens at this stage
2990 * it is handled in a context of new task
2992 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2993 X86_TRANSFER_TASK_SWITCH, NULL);
2994 if (ret != X86EMUL_CONTINUE)
2996 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2997 X86_TRANSFER_TASK_SWITCH, NULL);
2998 if (ret != X86EMUL_CONTINUE)
3000 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3001 X86_TRANSFER_TASK_SWITCH, NULL);
3002 if (ret != X86EMUL_CONTINUE)
3004 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3005 X86_TRANSFER_TASK_SWITCH, NULL);
3006 if (ret != X86EMUL_CONTINUE)
3008 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3009 X86_TRANSFER_TASK_SWITCH, NULL);
3010 if (ret != X86EMUL_CONTINUE)
3013 return X86EMUL_CONTINUE;
3016 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3017 u16 tss_selector, u16 old_tss_sel,
3018 ulong old_tss_base, struct desc_struct *new_desc)
3020 struct tss_segment_16 tss_seg;
3022 u32 new_tss_base = get_desc_base(new_desc);
3024 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3025 if (ret != X86EMUL_CONTINUE)
3028 save_state_to_tss16(ctxt, &tss_seg);
3030 ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3031 if (ret != X86EMUL_CONTINUE)
3034 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3035 if (ret != X86EMUL_CONTINUE)
3038 if (old_tss_sel != 0xffff) {
3039 tss_seg.prev_task_link = old_tss_sel;
3041 ret = linear_write_system(ctxt, new_tss_base,
3042 &tss_seg.prev_task_link,
3043 sizeof(tss_seg.prev_task_link));
3044 if (ret != X86EMUL_CONTINUE)
3048 return load_state_from_tss16(ctxt, &tss_seg);
3051 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3052 struct tss_segment_32 *tss)
3054 /* CR3 and ldt selector are not saved intentionally */
3055 tss->eip = ctxt->_eip;
3056 tss->eflags = ctxt->eflags;
3057 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3058 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3059 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3060 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3061 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3062 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3063 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3064 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3066 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3067 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3068 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3069 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3070 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3071 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3074 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3075 struct tss_segment_32 *tss)
3080 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3081 return emulate_gp(ctxt, 0);
3082 ctxt->_eip = tss->eip;
3083 ctxt->eflags = tss->eflags | 2;
3085 /* General purpose registers */
3086 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3087 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3088 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3089 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3090 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3091 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3092 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3093 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3096 * SDM says that segment selectors are loaded before segment
3097 * descriptors. This is important because CPL checks will
3100 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3101 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3102 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3103 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3104 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3105 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3106 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3109 * If we're switching between Protected Mode and VM86, we need to make
3110 * sure to update the mode before loading the segment descriptors so
3111 * that the selectors are interpreted correctly.
3113 if (ctxt->eflags & X86_EFLAGS_VM) {
3114 ctxt->mode = X86EMUL_MODE_VM86;
3117 ctxt->mode = X86EMUL_MODE_PROT32;
3122 * Now load segment descriptors. If fault happens at this stage
3123 * it is handled in a context of new task
3125 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3126 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3127 if (ret != X86EMUL_CONTINUE)
3129 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3130 X86_TRANSFER_TASK_SWITCH, NULL);
3131 if (ret != X86EMUL_CONTINUE)
3133 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3134 X86_TRANSFER_TASK_SWITCH, NULL);
3135 if (ret != X86EMUL_CONTINUE)
3137 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3138 X86_TRANSFER_TASK_SWITCH, NULL);
3139 if (ret != X86EMUL_CONTINUE)
3141 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3142 X86_TRANSFER_TASK_SWITCH, NULL);
3143 if (ret != X86EMUL_CONTINUE)
3145 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3146 X86_TRANSFER_TASK_SWITCH, NULL);
3147 if (ret != X86EMUL_CONTINUE)
3149 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3150 X86_TRANSFER_TASK_SWITCH, NULL);
3155 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3156 u16 tss_selector, u16 old_tss_sel,
3157 ulong old_tss_base, struct desc_struct *new_desc)
3159 struct tss_segment_32 tss_seg;
3161 u32 new_tss_base = get_desc_base(new_desc);
3162 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3163 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3165 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3166 if (ret != X86EMUL_CONTINUE)
3169 save_state_to_tss32(ctxt, &tss_seg);
3171 /* Only GP registers and segment selectors are saved */
3172 ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3173 ldt_sel_offset - eip_offset);
3174 if (ret != X86EMUL_CONTINUE)
3177 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3178 if (ret != X86EMUL_CONTINUE)
3181 if (old_tss_sel != 0xffff) {
3182 tss_seg.prev_task_link = old_tss_sel;
3184 ret = linear_write_system(ctxt, new_tss_base,
3185 &tss_seg.prev_task_link,
3186 sizeof(tss_seg.prev_task_link));
3187 if (ret != X86EMUL_CONTINUE)
3191 return load_state_from_tss32(ctxt, &tss_seg);
3194 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3195 u16 tss_selector, int idt_index, int reason,
3196 bool has_error_code, u32 error_code)
3198 const struct x86_emulate_ops *ops = ctxt->ops;
3199 struct desc_struct curr_tss_desc, next_tss_desc;
3201 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3202 ulong old_tss_base =
3203 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3205 ulong desc_addr, dr7;
3207 /* FIXME: old_tss_base == ~0 ? */
3209 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3210 if (ret != X86EMUL_CONTINUE)
3212 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3213 if (ret != X86EMUL_CONTINUE)
3216 /* FIXME: check that next_tss_desc is tss */
3219 * Check privileges. The three cases are task switch caused by...
3221 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3222 * 2. Exception/IRQ/iret: No check is performed
3223 * 3. jmp/call to TSS/task-gate: No check is performed since the
3224 * hardware checks it before exiting.
3226 if (reason == TASK_SWITCH_GATE) {
3227 if (idt_index != -1) {
3228 /* Software interrupts */
3229 struct desc_struct task_gate_desc;
3232 ret = read_interrupt_descriptor(ctxt, idt_index,
3234 if (ret != X86EMUL_CONTINUE)
3237 dpl = task_gate_desc.dpl;
3238 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3239 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3243 desc_limit = desc_limit_scaled(&next_tss_desc);
3244 if (!next_tss_desc.p ||
3245 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3246 desc_limit < 0x2b)) {
3247 return emulate_ts(ctxt, tss_selector & 0xfffc);
3250 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3251 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
3252 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3255 if (reason == TASK_SWITCH_IRET)
3256 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3258 /* set back link to prev task only if NT bit is set in eflags
3259 note that old_tss_sel is not used after this point */
3260 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3261 old_tss_sel = 0xffff;
3263 if (next_tss_desc.type & 8)
3264 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3265 old_tss_base, &next_tss_desc);
3267 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3268 old_tss_base, &next_tss_desc);
3269 if (ret != X86EMUL_CONTINUE)
3272 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3273 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3275 if (reason != TASK_SWITCH_IRET) {
3276 next_tss_desc.type |= (1 << 1); /* set busy flag */
3277 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3280 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3281 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3283 if (has_error_code) {
3284 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3285 ctxt->lock_prefix = 0;
3286 ctxt->src.val = (unsigned long) error_code;
3287 ret = em_push(ctxt);
3290 ops->get_dr(ctxt, 7, &dr7);
3291 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3296 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3297 u16 tss_selector, int idt_index, int reason,
3298 bool has_error_code, u32 error_code)
3302 invalidate_registers(ctxt);
3303 ctxt->_eip = ctxt->eip;
3304 ctxt->dst.type = OP_NONE;
3306 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3307 has_error_code, error_code);
3309 if (rc == X86EMUL_CONTINUE) {
3310 ctxt->eip = ctxt->_eip;
3311 writeback_registers(ctxt);
3314 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3317 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3320 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3322 register_address_increment(ctxt, reg, df * op->bytes);
3323 op->addr.mem.ea = register_address(ctxt, reg);
3326 static int em_das(struct x86_emulate_ctxt *ctxt)
3329 bool af, cf, old_cf;
3331 cf = ctxt->eflags & X86_EFLAGS_CF;
3337 af = ctxt->eflags & X86_EFLAGS_AF;
3338 if ((al & 0x0f) > 9 || af) {
3340 cf = old_cf | (al >= 250);
3345 if (old_al > 0x99 || old_cf) {
3351 /* Set PF, ZF, SF */
3352 ctxt->src.type = OP_IMM;
3354 ctxt->src.bytes = 1;
3355 fastop(ctxt, em_or);
3356 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3358 ctxt->eflags |= X86_EFLAGS_CF;
3360 ctxt->eflags |= X86_EFLAGS_AF;
3361 return X86EMUL_CONTINUE;
3364 static int em_aam(struct x86_emulate_ctxt *ctxt)
3368 if (ctxt->src.val == 0)
3369 return emulate_de(ctxt);
3371 al = ctxt->dst.val & 0xff;
3372 ah = al / ctxt->src.val;
3373 al %= ctxt->src.val;
3375 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3377 /* Set PF, ZF, SF */
3378 ctxt->src.type = OP_IMM;
3380 ctxt->src.bytes = 1;
3381 fastop(ctxt, em_or);
3383 return X86EMUL_CONTINUE;
3386 static int em_aad(struct x86_emulate_ctxt *ctxt)
3388 u8 al = ctxt->dst.val & 0xff;
3389 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3391 al = (al + (ah * ctxt->src.val)) & 0xff;
3393 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3395 /* Set PF, ZF, SF */
3396 ctxt->src.type = OP_IMM;
3398 ctxt->src.bytes = 1;
3399 fastop(ctxt, em_or);
3401 return X86EMUL_CONTINUE;
3404 static int em_call(struct x86_emulate_ctxt *ctxt)
3407 long rel = ctxt->src.val;
3409 ctxt->src.val = (unsigned long)ctxt->_eip;
3410 rc = jmp_rel(ctxt, rel);
3411 if (rc != X86EMUL_CONTINUE)
3413 return em_push(ctxt);
3416 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3421 struct desc_struct old_desc, new_desc;
3422 const struct x86_emulate_ops *ops = ctxt->ops;
3423 int cpl = ctxt->ops->cpl(ctxt);
3424 enum x86emul_mode prev_mode = ctxt->mode;
3426 old_eip = ctxt->_eip;
3427 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3429 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3430 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3431 X86_TRANSFER_CALL_JMP, &new_desc);
3432 if (rc != X86EMUL_CONTINUE)
3435 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3436 if (rc != X86EMUL_CONTINUE)
3439 ctxt->src.val = old_cs;
3441 if (rc != X86EMUL_CONTINUE)
3444 ctxt->src.val = old_eip;
3446 /* If we failed, we tainted the memory, but the very least we should
3448 if (rc != X86EMUL_CONTINUE) {
3449 pr_warn_once("faulting far call emulation tainted memory\n");
3454 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3455 ctxt->mode = prev_mode;
3460 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3465 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3466 if (rc != X86EMUL_CONTINUE)
3468 rc = assign_eip_near(ctxt, eip);
3469 if (rc != X86EMUL_CONTINUE)
3471 rsp_increment(ctxt, ctxt->src.val);
3472 return X86EMUL_CONTINUE;
3475 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3477 /* Write back the register source. */
3478 ctxt->src.val = ctxt->dst.val;
3479 write_register_operand(&ctxt->src);
3481 /* Write back the memory destination with implicit LOCK prefix. */
3482 ctxt->dst.val = ctxt->src.orig_val;
3483 ctxt->lock_prefix = 1;
3484 return X86EMUL_CONTINUE;
3487 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3489 ctxt->dst.val = ctxt->src2.val;
3490 return fastop(ctxt, em_imul);
3493 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3495 ctxt->dst.type = OP_REG;
3496 ctxt->dst.bytes = ctxt->src.bytes;
3497 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3498 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3500 return X86EMUL_CONTINUE;
3503 static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3507 if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
3508 return emulate_ud(ctxt);
3509 ctxt->dst.val = tsc_aux;
3510 return X86EMUL_CONTINUE;
3513 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3517 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3518 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3519 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3520 return X86EMUL_CONTINUE;
3523 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3527 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3528 return emulate_gp(ctxt, 0);
3529 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3530 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3531 return X86EMUL_CONTINUE;
3534 static int em_mov(struct x86_emulate_ctxt *ctxt)
3536 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3537 return X86EMUL_CONTINUE;
3540 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3544 if (!ctxt->ops->guest_has_movbe(ctxt))
3545 return emulate_ud(ctxt);
3547 switch (ctxt->op_bytes) {
3550 * From MOVBE definition: "...When the operand size is 16 bits,
3551 * the upper word of the destination register remains unchanged
3554 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3555 * rules so we have to do the operation almost per hand.
3557 tmp = (u16)ctxt->src.val;
3558 ctxt->dst.val &= ~0xffffUL;
3559 ctxt->dst.val |= (unsigned long)swab16(tmp);
3562 ctxt->dst.val = swab32((u32)ctxt->src.val);
3565 ctxt->dst.val = swab64(ctxt->src.val);
3570 return X86EMUL_CONTINUE;
3573 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3575 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3576 return emulate_gp(ctxt, 0);
3578 /* Disable writeback. */
3579 ctxt->dst.type = OP_NONE;
3580 return X86EMUL_CONTINUE;
3583 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3587 if (ctxt->mode == X86EMUL_MODE_PROT64)
3588 val = ctxt->src.val & ~0ULL;
3590 val = ctxt->src.val & ~0U;
3592 /* #UD condition is already handled. */
3593 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3594 return emulate_gp(ctxt, 0);
3596 /* Disable writeback. */
3597 ctxt->dst.type = OP_NONE;
3598 return X86EMUL_CONTINUE;
3601 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3603 u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3607 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3608 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3609 r = ctxt->ops->set_msr(ctxt, msr_index, msr_data);
3611 if (r == X86EMUL_IO_NEEDED)
3615 return emulate_gp(ctxt, 0);
3617 return r < 0 ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
3620 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3622 u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3626 r = ctxt->ops->get_msr(ctxt, msr_index, &msr_data);
3628 if (r == X86EMUL_IO_NEEDED)
3632 return emulate_gp(ctxt, 0);
3634 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3635 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3636 return X86EMUL_CONTINUE;
3639 static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3641 if (segment > VCPU_SREG_GS &&
3642 (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3643 ctxt->ops->cpl(ctxt) > 0)
3644 return emulate_gp(ctxt, 0);
3646 ctxt->dst.val = get_segment_selector(ctxt, segment);
3647 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3648 ctxt->dst.bytes = 2;
3649 return X86EMUL_CONTINUE;
3652 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3654 if (ctxt->modrm_reg > VCPU_SREG_GS)
3655 return emulate_ud(ctxt);
3657 return em_store_sreg(ctxt, ctxt->modrm_reg);
3660 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3662 u16 sel = ctxt->src.val;
3664 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3665 return emulate_ud(ctxt);
3667 if (ctxt->modrm_reg == VCPU_SREG_SS)
3668 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3670 /* Disable writeback. */
3671 ctxt->dst.type = OP_NONE;
3672 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3675 static int em_sldt(struct x86_emulate_ctxt *ctxt)
3677 return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3680 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3682 u16 sel = ctxt->src.val;
3684 /* Disable writeback. */
3685 ctxt->dst.type = OP_NONE;
3686 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3689 static int em_str(struct x86_emulate_ctxt *ctxt)
3691 return em_store_sreg(ctxt, VCPU_SREG_TR);
3694 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3696 u16 sel = ctxt->src.val;
3698 /* Disable writeback. */
3699 ctxt->dst.type = OP_NONE;
3700 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3703 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3708 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3709 if (rc == X86EMUL_CONTINUE)
3710 ctxt->ops->invlpg(ctxt, linear);
3711 /* Disable writeback. */
3712 ctxt->dst.type = OP_NONE;
3713 return X86EMUL_CONTINUE;
3716 static int em_clts(struct x86_emulate_ctxt *ctxt)
3720 cr0 = ctxt->ops->get_cr(ctxt, 0);
3722 ctxt->ops->set_cr(ctxt, 0, cr0);
3723 return X86EMUL_CONTINUE;
3726 static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3728 int rc = ctxt->ops->fix_hypercall(ctxt);
3730 if (rc != X86EMUL_CONTINUE)
3733 /* Let the processor re-execute the fixed hypercall */
3734 ctxt->_eip = ctxt->eip;
3735 /* Disable writeback. */
3736 ctxt->dst.type = OP_NONE;
3737 return X86EMUL_CONTINUE;
3740 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3741 void (*get)(struct x86_emulate_ctxt *ctxt,
3742 struct desc_ptr *ptr))
3744 struct desc_ptr desc_ptr;
3746 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3747 ctxt->ops->cpl(ctxt) > 0)
3748 return emulate_gp(ctxt, 0);
3750 if (ctxt->mode == X86EMUL_MODE_PROT64)
3752 get(ctxt, &desc_ptr);
3753 if (ctxt->op_bytes == 2) {
3755 desc_ptr.address &= 0x00ffffff;
3757 /* Disable writeback. */
3758 ctxt->dst.type = OP_NONE;
3759 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3760 &desc_ptr, 2 + ctxt->op_bytes);
3763 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3765 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3768 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3770 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3773 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3775 struct desc_ptr desc_ptr;
3778 if (ctxt->mode == X86EMUL_MODE_PROT64)
3780 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3781 &desc_ptr.size, &desc_ptr.address,
3783 if (rc != X86EMUL_CONTINUE)
3785 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3786 emul_is_noncanonical_address(desc_ptr.address, ctxt))
3787 return emulate_gp(ctxt, 0);
3789 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3791 ctxt->ops->set_idt(ctxt, &desc_ptr);
3792 /* Disable writeback. */
3793 ctxt->dst.type = OP_NONE;
3794 return X86EMUL_CONTINUE;
3797 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3799 return em_lgdt_lidt(ctxt, true);
3802 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3804 return em_lgdt_lidt(ctxt, false);
3807 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3809 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3810 ctxt->ops->cpl(ctxt) > 0)
3811 return emulate_gp(ctxt, 0);
3813 if (ctxt->dst.type == OP_MEM)
3814 ctxt->dst.bytes = 2;
3815 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3816 return X86EMUL_CONTINUE;
3819 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3821 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3822 | (ctxt->src.val & 0x0f));
3823 ctxt->dst.type = OP_NONE;
3824 return X86EMUL_CONTINUE;
3827 static int em_loop(struct x86_emulate_ctxt *ctxt)
3829 int rc = X86EMUL_CONTINUE;
3831 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3832 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3833 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3834 rc = jmp_rel(ctxt, ctxt->src.val);
3839 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3841 int rc = X86EMUL_CONTINUE;
3843 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3844 rc = jmp_rel(ctxt, ctxt->src.val);
3849 static int em_in(struct x86_emulate_ctxt *ctxt)
3851 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3853 return X86EMUL_IO_NEEDED;
3855 return X86EMUL_CONTINUE;
3858 static int em_out(struct x86_emulate_ctxt *ctxt)
3860 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3862 /* Disable writeback. */
3863 ctxt->dst.type = OP_NONE;
3864 return X86EMUL_CONTINUE;
3867 static int em_cli(struct x86_emulate_ctxt *ctxt)
3869 if (emulator_bad_iopl(ctxt))
3870 return emulate_gp(ctxt, 0);
3872 ctxt->eflags &= ~X86_EFLAGS_IF;
3873 return X86EMUL_CONTINUE;
3876 static int em_sti(struct x86_emulate_ctxt *ctxt)
3878 if (emulator_bad_iopl(ctxt))
3879 return emulate_gp(ctxt, 0);
3881 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3882 ctxt->eflags |= X86_EFLAGS_IF;
3883 return X86EMUL_CONTINUE;
3886 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3888 u32 eax, ebx, ecx, edx;
3891 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3892 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3893 ctxt->ops->cpl(ctxt)) {
3894 return emulate_gp(ctxt, 0);
3897 eax = reg_read(ctxt, VCPU_REGS_RAX);
3898 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3899 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3900 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3901 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3902 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3903 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3904 return X86EMUL_CONTINUE;
3907 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3911 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3913 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3915 ctxt->eflags &= ~0xffUL;
3916 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3917 return X86EMUL_CONTINUE;
3920 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3922 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3923 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3924 return X86EMUL_CONTINUE;
3927 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3929 switch (ctxt->op_bytes) {
3930 #ifdef CONFIG_X86_64
3932 asm("bswap %0" : "+r"(ctxt->dst.val));
3936 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3939 return X86EMUL_CONTINUE;
3942 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3944 /* emulating clflush regardless of cpuid */
3945 return X86EMUL_CONTINUE;
3948 static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
3950 /* emulating clflushopt regardless of cpuid */
3951 return X86EMUL_CONTINUE;
3954 static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3956 ctxt->dst.val = (s32) ctxt->src.val;
3957 return X86EMUL_CONTINUE;
3960 static int check_fxsr(struct x86_emulate_ctxt *ctxt)
3962 if (!ctxt->ops->guest_has_fxsr(ctxt))
3963 return emulate_ud(ctxt);
3965 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
3966 return emulate_nm(ctxt);
3969 * Don't emulate a case that should never be hit, instead of working
3970 * around a lack of fxsave64/fxrstor64 on old compilers.
3972 if (ctxt->mode >= X86EMUL_MODE_PROT64)
3973 return X86EMUL_UNHANDLEABLE;
3975 return X86EMUL_CONTINUE;
3979 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
3980 * and restore MXCSR.
3982 static size_t __fxstate_size(int nregs)
3984 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
3987 static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
3990 if (ctxt->mode == X86EMUL_MODE_PROT64)
3991 return __fxstate_size(16);
3993 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
3994 return __fxstate_size(cr4_osfxsr ? 8 : 0);
3998 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
4001 * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
4002 * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
4004 * 3) 64-bit mode with REX.W prefix
4005 * - like (2), but XMM 8-15 are being saved and restored
4006 * 4) 64-bit mode without REX.W prefix
4007 * - like (3), but FIP and FDP are 64 bit
4009 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
4010 * desired result. (4) is not emulated.
4012 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
4013 * and FPU DS) should match.
4015 static int em_fxsave(struct x86_emulate_ctxt *ctxt)
4017 struct fxregs_state fx_state;
4020 rc = check_fxsr(ctxt);
4021 if (rc != X86EMUL_CONTINUE)
4026 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4030 if (rc != X86EMUL_CONTINUE)
4033 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
4034 fxstate_size(ctxt));
4038 * FXRSTOR might restore XMM registers not provided by the guest. Fill
4039 * in the host registers (via FXSAVE) instead, so they won't be modified.
4040 * (preemption has to stay disabled until FXRSTOR).
4042 * Use noinline to keep the stack for other functions called by callers small.
4044 static noinline int fxregs_fixup(struct fxregs_state *fx_state,
4045 const size_t used_size)
4047 struct fxregs_state fx_tmp;
4050 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
4051 memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
4052 __fxstate_size(16) - used_size);
4057 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4059 struct fxregs_state fx_state;
4063 rc = check_fxsr(ctxt);
4064 if (rc != X86EMUL_CONTINUE)
4067 size = fxstate_size(ctxt);
4068 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4069 if (rc != X86EMUL_CONTINUE)
4074 if (size < __fxstate_size(16)) {
4075 rc = fxregs_fixup(&fx_state, size);
4076 if (rc != X86EMUL_CONTINUE)
4080 if (fx_state.mxcsr >> 16) {
4081 rc = emulate_gp(ctxt, 0);
4085 if (rc == X86EMUL_CONTINUE)
4086 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4094 static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
4098 eax = reg_read(ctxt, VCPU_REGS_RAX);
4099 edx = reg_read(ctxt, VCPU_REGS_RDX);
4100 ecx = reg_read(ctxt, VCPU_REGS_RCX);
4102 if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax))
4103 return emulate_gp(ctxt, 0);
4105 return X86EMUL_CONTINUE;
4108 static bool valid_cr(int nr)
4120 static int check_cr_access(struct x86_emulate_ctxt *ctxt)
4122 if (!valid_cr(ctxt->modrm_reg))
4123 return emulate_ud(ctxt);
4125 return X86EMUL_CONTINUE;
4128 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4132 ctxt->ops->get_dr(ctxt, 7, &dr7);
4134 /* Check if DR7.Global_Enable is set */
4135 return dr7 & (1 << 13);
4138 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4140 int dr = ctxt->modrm_reg;
4144 return emulate_ud(ctxt);
4146 cr4 = ctxt->ops->get_cr(ctxt, 4);
4147 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4148 return emulate_ud(ctxt);
4150 if (check_dr7_gd(ctxt)) {
4153 ctxt->ops->get_dr(ctxt, 6, &dr6);
4154 dr6 &= ~DR_TRAP_BITS;
4155 dr6 |= DR6_BD | DR6_ACTIVE_LOW;
4156 ctxt->ops->set_dr(ctxt, 6, dr6);
4157 return emulate_db(ctxt);
4160 return X86EMUL_CONTINUE;
4163 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4165 u64 new_val = ctxt->src.val64;
4166 int dr = ctxt->modrm_reg;
4168 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4169 return emulate_gp(ctxt, 0);
4171 return check_dr_read(ctxt);
4174 static int check_svme(struct x86_emulate_ctxt *ctxt)
4178 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4180 if (!(efer & EFER_SVME))
4181 return emulate_ud(ctxt);
4183 return X86EMUL_CONTINUE;
4186 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4188 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4190 /* Valid physical address? */
4191 if (rax & 0xffff000000000000ULL)
4192 return emulate_gp(ctxt, 0);
4194 return check_svme(ctxt);
4197 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4199 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4201 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4202 return emulate_gp(ctxt, 0);
4204 return X86EMUL_CONTINUE;
4207 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4209 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4210 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4213 * VMware allows access to these Pseduo-PMCs even when read via RDPMC
4214 * in Ring3 when CR4.PCE=0.
4216 if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
4217 return X86EMUL_CONTINUE;
4220 * If CR4.PCE is set, the SDM requires CPL=0 or CR0.PE=0. The CR0.PE
4221 * check however is unnecessary because CPL is always 0 outside
4224 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4225 ctxt->ops->check_pmc(ctxt, rcx))
4226 return emulate_gp(ctxt, 0);
4228 return X86EMUL_CONTINUE;
4231 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4233 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4234 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4235 return emulate_gp(ctxt, 0);
4237 return X86EMUL_CONTINUE;
4240 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4242 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4243 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4244 return emulate_gp(ctxt, 0);
4246 return X86EMUL_CONTINUE;
4249 #define D(_y) { .flags = (_y) }
4250 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4251 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4252 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4253 #define N D(NotImpl)
4254 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4255 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4256 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4257 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4258 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4259 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4260 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4261 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4262 #define II(_f, _e, _i) \
4263 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4264 #define IIP(_f, _e, _i, _p) \
4265 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4266 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4267 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4269 #define D2bv(_f) D((_f) | ByteOp), D(_f)
4270 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4271 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4272 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4273 #define I2bvIP(_f, _e, _i, _p) \
4274 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4276 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4277 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4278 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4280 static const struct opcode group7_rm0[] = {
4282 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
4286 static const struct opcode group7_rm1[] = {
4287 DI(SrcNone | Priv, monitor),
4288 DI(SrcNone | Priv, mwait),
4292 static const struct opcode group7_rm2[] = {
4294 II(ImplicitOps | Priv, em_xsetbv, xsetbv),
4298 static const struct opcode group7_rm3[] = {
4299 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4300 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4301 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4302 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4303 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4304 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4305 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4306 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
4309 static const struct opcode group7_rm7[] = {
4311 DIP(SrcNone, rdtscp, check_rdtsc),
4315 static const struct opcode group1[] = {
4317 F(Lock | PageTable, em_or),
4320 F(Lock | PageTable, em_and),
4326 static const struct opcode group1A[] = {
4327 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4330 static const struct opcode group2[] = {
4331 F(DstMem | ModRM, em_rol),
4332 F(DstMem | ModRM, em_ror),
4333 F(DstMem | ModRM, em_rcl),
4334 F(DstMem | ModRM, em_rcr),
4335 F(DstMem | ModRM, em_shl),
4336 F(DstMem | ModRM, em_shr),
4337 F(DstMem | ModRM, em_shl),
4338 F(DstMem | ModRM, em_sar),
4341 static const struct opcode group3[] = {
4342 F(DstMem | SrcImm | NoWrite, em_test),
4343 F(DstMem | SrcImm | NoWrite, em_test),
4344 F(DstMem | SrcNone | Lock, em_not),
4345 F(DstMem | SrcNone | Lock, em_neg),
4346 F(DstXacc | Src2Mem, em_mul_ex),
4347 F(DstXacc | Src2Mem, em_imul_ex),
4348 F(DstXacc | Src2Mem, em_div_ex),
4349 F(DstXacc | Src2Mem, em_idiv_ex),
4352 static const struct opcode group4[] = {
4353 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4354 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4358 static const struct opcode group5[] = {
4359 F(DstMem | SrcNone | Lock, em_inc),
4360 F(DstMem | SrcNone | Lock, em_dec),
4361 I(SrcMem | NearBranch | IsBranch, em_call_near_abs),
4362 I(SrcMemFAddr | ImplicitOps | IsBranch, em_call_far),
4363 I(SrcMem | NearBranch | IsBranch, em_jmp_abs),
4364 I(SrcMemFAddr | ImplicitOps | IsBranch, em_jmp_far),
4365 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
4368 static const struct opcode group6[] = {
4369 II(Prot | DstMem, em_sldt, sldt),
4370 II(Prot | DstMem, em_str, str),
4371 II(Prot | Priv | SrcMem16, em_lldt, lldt),
4372 II(Prot | Priv | SrcMem16, em_ltr, ltr),
4376 static const struct group_dual group7 = { {
4377 II(Mov | DstMem, em_sgdt, sgdt),
4378 II(Mov | DstMem, em_sidt, sidt),
4379 II(SrcMem | Priv, em_lgdt, lgdt),
4380 II(SrcMem | Priv, em_lidt, lidt),
4381 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4382 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4383 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4389 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4390 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4394 static const struct opcode group8[] = {
4396 F(DstMem | SrcImmByte | NoWrite, em_bt),
4397 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4398 F(DstMem | SrcImmByte | Lock, em_btr),
4399 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4403 * The "memory" destination is actually always a register, since we come
4404 * from the register case of group9.
4406 static const struct gprefix pfx_0f_c7_7 = {
4407 N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid),
4411 static const struct group_dual group9 = { {
4412 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4414 N, N, N, N, N, N, N,
4415 GP(0, &pfx_0f_c7_7),
4418 static const struct opcode group11[] = {
4419 I(DstMem | SrcImm | Mov | PageTable, em_mov),
4423 static const struct gprefix pfx_0f_ae_7 = {
4424 I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N,
4427 static const struct group_dual group15 = { {
4428 I(ModRM | Aligned16, em_fxsave),
4429 I(ModRM | Aligned16, em_fxrstor),
4430 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4432 N, N, N, N, N, N, N, N,
4435 static const struct gprefix pfx_0f_6f_0f_7f = {
4436 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4439 static const struct instr_dual instr_dual_0f_2b = {
4443 static const struct gprefix pfx_0f_2b = {
4444 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4447 static const struct gprefix pfx_0f_10_0f_11 = {
4448 I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
4451 static const struct gprefix pfx_0f_28_0f_29 = {
4452 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4455 static const struct gprefix pfx_0f_e7 = {
4456 N, I(Sse, em_mov), N, N,
4459 static const struct escape escape_d9 = { {
4460 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4463 N, N, N, N, N, N, N, N,
4465 N, N, N, N, N, N, N, N,
4467 N, N, N, N, N, N, N, N,
4469 N, N, N, N, N, N, N, N,
4471 N, N, N, N, N, N, N, N,
4473 N, N, N, N, N, N, N, N,
4475 N, N, N, N, N, N, N, N,
4477 N, N, N, N, N, N, N, N,
4480 static const struct escape escape_db = { {
4481 N, N, N, N, N, N, N, N,
4484 N, N, N, N, N, N, N, N,
4486 N, N, N, N, N, N, N, N,
4488 N, N, N, N, N, N, N, N,
4490 N, N, N, N, N, N, N, N,
4492 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4494 N, N, N, N, N, N, N, N,
4496 N, N, N, N, N, N, N, N,
4498 N, N, N, N, N, N, N, N,
4501 static const struct escape escape_dd = { {
4502 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4505 N, N, N, N, N, N, N, N,
4507 N, N, N, N, N, N, N, N,
4509 N, N, N, N, N, N, N, N,
4511 N, N, N, N, N, N, N, N,
4513 N, N, N, N, N, N, N, N,
4515 N, N, N, N, N, N, N, N,
4517 N, N, N, N, N, N, N, N,
4519 N, N, N, N, N, N, N, N,
4522 static const struct instr_dual instr_dual_0f_c3 = {
4523 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4526 static const struct mode_dual mode_dual_63 = {
4527 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4530 static const struct opcode opcode_table[256] = {
4532 F6ALU(Lock, em_add),
4533 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4534 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4536 F6ALU(Lock | PageTable, em_or),
4537 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4540 F6ALU(Lock, em_adc),
4541 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4542 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4544 F6ALU(Lock, em_sbb),
4545 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4546 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4548 F6ALU(Lock | PageTable, em_and), N, N,
4550 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4552 F6ALU(Lock, em_xor), N, N,
4554 F6ALU(NoWrite, em_cmp), N, N,
4556 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4558 X8(I(SrcReg | Stack, em_push)),
4560 X8(I(DstReg | Stack, em_pop)),
4562 I(ImplicitOps | Stack | No64, em_pusha),
4563 I(ImplicitOps | Stack | No64, em_popa),
4564 N, MD(ModRM, &mode_dual_63),
4567 I(SrcImm | Mov | Stack, em_push),
4568 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4569 I(SrcImmByte | Mov | Stack, em_push),
4570 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4571 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4572 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4574 X16(D(SrcImmByte | NearBranch | IsBranch)),
4576 G(ByteOp | DstMem | SrcImm, group1),
4577 G(DstMem | SrcImm, group1),
4578 G(ByteOp | DstMem | SrcImm | No64, group1),
4579 G(DstMem | SrcImmByte, group1),
4580 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4581 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4583 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4584 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4585 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4586 D(ModRM | SrcMem | NoAccess | DstReg),
4587 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4590 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4592 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4593 I(SrcImmFAddr | No64 | IsBranch, em_call_far), N,
4594 II(ImplicitOps | Stack, em_pushf, pushf),
4595 II(ImplicitOps | Stack, em_popf, popf),
4596 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4598 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4599 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4600 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4601 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4603 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4604 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4605 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4606 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4608 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4610 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4612 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4613 I(ImplicitOps | NearBranch | SrcImmU16 | IsBranch, em_ret_near_imm),
4614 I(ImplicitOps | NearBranch | IsBranch, em_ret),
4615 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4616 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4617 G(ByteOp, group11), G(0, group11),
4619 I(Stack | SrcImmU16 | Src2ImmByte | IsBranch, em_enter),
4620 I(Stack | IsBranch, em_leave),
4621 I(ImplicitOps | SrcImmU16 | IsBranch, em_ret_far_imm),
4622 I(ImplicitOps | IsBranch, em_ret_far),
4623 D(ImplicitOps | IsBranch), DI(SrcImmByte | IsBranch, intn),
4624 D(ImplicitOps | No64 | IsBranch),
4625 II(ImplicitOps | IsBranch, em_iret, iret),
4627 G(Src2One | ByteOp, group2), G(Src2One, group2),
4628 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4629 I(DstAcc | SrcImmUByte | No64, em_aam),
4630 I(DstAcc | SrcImmUByte | No64, em_aad),
4631 F(DstAcc | ByteOp | No64, em_salc),
4632 I(DstAcc | SrcXLat | ByteOp, em_mov),
4634 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4636 X3(I(SrcImmByte | NearBranch | IsBranch, em_loop)),
4637 I(SrcImmByte | NearBranch | IsBranch, em_jcxz),
4638 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4639 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4641 I(SrcImm | NearBranch | IsBranch, em_call),
4642 D(SrcImm | ImplicitOps | NearBranch | IsBranch),
4643 I(SrcImmFAddr | No64 | IsBranch, em_jmp_far),
4644 D(SrcImmByte | ImplicitOps | NearBranch | IsBranch),
4645 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4646 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4648 N, DI(ImplicitOps, icebp), N, N,
4649 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4650 G(ByteOp, group3), G(0, group3),
4652 D(ImplicitOps), D(ImplicitOps),
4653 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4654 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4657 static const struct opcode twobyte_table[256] = {
4659 G(0, group6), GD(0, &group7), N, N,
4660 N, I(ImplicitOps | EmulateOnUD | IsBranch, em_syscall),
4661 II(ImplicitOps | Priv, em_clts, clts), N,
4662 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4663 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4665 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
4666 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
4668 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 4 * prefetch + 4 * reserved NOP */
4669 D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4670 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4671 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4672 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4673 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */
4675 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access),
4676 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4677 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4679 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4682 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4683 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4684 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4687 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4688 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4689 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4690 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4691 I(ImplicitOps | EmulateOnUD | IsBranch, em_sysenter),
4692 I(ImplicitOps | Priv | EmulateOnUD | IsBranch, em_sysexit),
4694 N, N, N, N, N, N, N, N,
4696 X16(D(DstReg | SrcMem | ModRM)),
4698 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4703 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4708 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4710 X16(D(SrcImm | NearBranch | IsBranch)),
4712 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4714 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4715 II(ImplicitOps, em_cpuid, cpuid),
4716 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4717 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4718 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4720 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4721 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4722 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4723 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4724 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4725 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4727 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4728 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4729 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4730 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4731 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4732 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4736 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4737 I(DstReg | SrcMem | ModRM, em_bsf_c),
4738 I(DstReg | SrcMem | ModRM, em_bsr_c),
4739 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4741 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4742 N, ID(0, &instr_dual_0f_c3),
4743 N, N, N, GD(0, &group9),
4745 X8(I(DstReg, em_bswap)),
4747 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4749 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4750 N, N, N, N, N, N, N, N,
4752 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4755 static const struct instr_dual instr_dual_0f_38_f0 = {
4756 I(DstReg | SrcMem | Mov, em_movbe), N
4759 static const struct instr_dual instr_dual_0f_38_f1 = {
4760 I(DstMem | SrcReg | Mov, em_movbe), N
4763 static const struct gprefix three_byte_0f_38_f0 = {
4764 ID(0, &instr_dual_0f_38_f0), N, N, N
4767 static const struct gprefix three_byte_0f_38_f1 = {
4768 ID(0, &instr_dual_0f_38_f1), N, N, N
4772 * Insns below are selected by the prefix which indexed by the third opcode
4775 static const struct opcode opcode_map_0f_38[256] = {
4777 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4779 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4781 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4782 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4803 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4807 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4813 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4814 unsigned size, bool sign_extension)
4816 int rc = X86EMUL_CONTINUE;
4820 op->addr.mem.ea = ctxt->_eip;
4821 /* NB. Immediates are sign-extended as necessary. */
4822 switch (op->bytes) {
4824 op->val = insn_fetch(s8, ctxt);
4827 op->val = insn_fetch(s16, ctxt);
4830 op->val = insn_fetch(s32, ctxt);
4833 op->val = insn_fetch(s64, ctxt);
4836 if (!sign_extension) {
4837 switch (op->bytes) {
4845 op->val &= 0xffffffff;
4853 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4856 int rc = X86EMUL_CONTINUE;
4860 decode_register_operand(ctxt, op);
4863 rc = decode_imm(ctxt, op, 1, false);
4866 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4870 if (ctxt->d & BitOp)
4871 fetch_bit_operand(ctxt);
4872 op->orig_val = op->val;
4875 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4879 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4880 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4881 fetch_register_operand(op);
4882 op->orig_val = op->val;
4886 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4887 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4888 fetch_register_operand(op);
4889 op->orig_val = op->val;
4892 if (ctxt->d & ByteOp) {
4897 op->bytes = ctxt->op_bytes;
4898 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4899 fetch_register_operand(op);
4900 op->orig_val = op->val;
4904 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4906 register_address(ctxt, VCPU_REGS_RDI);
4907 op->addr.mem.seg = VCPU_SREG_ES;
4914 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4915 fetch_register_operand(op);
4920 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4923 rc = decode_imm(ctxt, op, 1, true);
4931 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4934 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4937 ctxt->memop.bytes = 1;
4938 if (ctxt->memop.type == OP_REG) {
4939 ctxt->memop.addr.reg = decode_register(ctxt,
4940 ctxt->modrm_rm, true);
4941 fetch_register_operand(&ctxt->memop);
4945 ctxt->memop.bytes = 2;
4948 ctxt->memop.bytes = 4;
4951 rc = decode_imm(ctxt, op, 2, false);
4954 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4958 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4960 register_address(ctxt, VCPU_REGS_RSI);
4961 op->addr.mem.seg = ctxt->seg_override;
4967 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4970 reg_read(ctxt, VCPU_REGS_RBX) +
4971 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4972 op->addr.mem.seg = ctxt->seg_override;
4977 op->addr.mem.ea = ctxt->_eip;
4978 op->bytes = ctxt->op_bytes + 2;
4979 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4982 ctxt->memop.bytes = ctxt->op_bytes + 2;
4986 op->val = VCPU_SREG_ES;
4990 op->val = VCPU_SREG_CS;
4994 op->val = VCPU_SREG_SS;
4998 op->val = VCPU_SREG_DS;
5002 op->val = VCPU_SREG_FS;
5006 op->val = VCPU_SREG_GS;
5009 /* Special instructions do their own operand decoding. */
5011 op->type = OP_NONE; /* Disable writeback. */
5019 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
5021 int rc = X86EMUL_CONTINUE;
5022 int mode = ctxt->mode;
5023 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
5024 bool op_prefix = false;
5025 bool has_seg_override = false;
5026 struct opcode opcode;
5028 struct desc_struct desc;
5030 ctxt->memop.type = OP_NONE;
5031 ctxt->memopp = NULL;
5032 ctxt->_eip = ctxt->eip;
5033 ctxt->fetch.ptr = ctxt->fetch.data;
5034 ctxt->fetch.end = ctxt->fetch.data + insn_len;
5035 ctxt->opcode_len = 1;
5036 ctxt->intercept = x86_intercept_none;
5038 memcpy(ctxt->fetch.data, insn, insn_len);
5040 rc = __do_insn_fetch_bytes(ctxt, 1);
5041 if (rc != X86EMUL_CONTINUE)
5046 case X86EMUL_MODE_REAL:
5047 case X86EMUL_MODE_VM86:
5048 def_op_bytes = def_ad_bytes = 2;
5049 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
5051 def_op_bytes = def_ad_bytes = 4;
5053 case X86EMUL_MODE_PROT16:
5054 def_op_bytes = def_ad_bytes = 2;
5056 case X86EMUL_MODE_PROT32:
5057 def_op_bytes = def_ad_bytes = 4;
5059 #ifdef CONFIG_X86_64
5060 case X86EMUL_MODE_PROT64:
5066 return EMULATION_FAILED;
5069 ctxt->op_bytes = def_op_bytes;
5070 ctxt->ad_bytes = def_ad_bytes;
5072 /* Legacy prefixes. */
5074 switch (ctxt->b = insn_fetch(u8, ctxt)) {
5075 case 0x66: /* operand-size override */
5077 /* switch between 2/4 bytes */
5078 ctxt->op_bytes = def_op_bytes ^ 6;
5080 case 0x67: /* address-size override */
5081 if (mode == X86EMUL_MODE_PROT64)
5082 /* switch between 4/8 bytes */
5083 ctxt->ad_bytes = def_ad_bytes ^ 12;
5085 /* switch between 2/4 bytes */
5086 ctxt->ad_bytes = def_ad_bytes ^ 6;
5088 case 0x26: /* ES override */
5089 has_seg_override = true;
5090 ctxt->seg_override = VCPU_SREG_ES;
5092 case 0x2e: /* CS override */
5093 has_seg_override = true;
5094 ctxt->seg_override = VCPU_SREG_CS;
5096 case 0x36: /* SS override */
5097 has_seg_override = true;
5098 ctxt->seg_override = VCPU_SREG_SS;
5100 case 0x3e: /* DS override */
5101 has_seg_override = true;
5102 ctxt->seg_override = VCPU_SREG_DS;
5104 case 0x64: /* FS override */
5105 has_seg_override = true;
5106 ctxt->seg_override = VCPU_SREG_FS;
5108 case 0x65: /* GS override */
5109 has_seg_override = true;
5110 ctxt->seg_override = VCPU_SREG_GS;
5112 case 0x40 ... 0x4f: /* REX */
5113 if (mode != X86EMUL_MODE_PROT64)
5115 ctxt->rex_prefix = ctxt->b;
5117 case 0xf0: /* LOCK */
5118 ctxt->lock_prefix = 1;
5120 case 0xf2: /* REPNE/REPNZ */
5121 case 0xf3: /* REP/REPE/REPZ */
5122 ctxt->rep_prefix = ctxt->b;
5128 /* Any legacy prefix after a REX prefix nullifies its effect. */
5130 ctxt->rex_prefix = 0;
5136 if (ctxt->rex_prefix & 8)
5137 ctxt->op_bytes = 8; /* REX.W */
5139 /* Opcode byte(s). */
5140 opcode = opcode_table[ctxt->b];
5141 /* Two-byte opcode? */
5142 if (ctxt->b == 0x0f) {
5143 ctxt->opcode_len = 2;
5144 ctxt->b = insn_fetch(u8, ctxt);
5145 opcode = twobyte_table[ctxt->b];
5147 /* 0F_38 opcode map */
5148 if (ctxt->b == 0x38) {
5149 ctxt->opcode_len = 3;
5150 ctxt->b = insn_fetch(u8, ctxt);
5151 opcode = opcode_map_0f_38[ctxt->b];
5154 ctxt->d = opcode.flags;
5156 if (ctxt->d & ModRM)
5157 ctxt->modrm = insn_fetch(u8, ctxt);
5159 /* vex-prefix instructions are not implemented */
5160 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5161 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5165 while (ctxt->d & GroupMask) {
5166 switch (ctxt->d & GroupMask) {
5168 goffset = (ctxt->modrm >> 3) & 7;
5169 opcode = opcode.u.group[goffset];
5172 goffset = (ctxt->modrm >> 3) & 7;
5173 if ((ctxt->modrm >> 6) == 3)
5174 opcode = opcode.u.gdual->mod3[goffset];
5176 opcode = opcode.u.gdual->mod012[goffset];
5179 goffset = ctxt->modrm & 7;
5180 opcode = opcode.u.group[goffset];
5183 if (ctxt->rep_prefix && op_prefix)
5184 return EMULATION_FAILED;
5185 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5186 switch (simd_prefix) {
5187 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5188 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5189 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5190 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5194 if (ctxt->modrm > 0xbf) {
5195 size_t size = ARRAY_SIZE(opcode.u.esc->high);
5196 u32 index = array_index_nospec(
5197 ctxt->modrm - 0xc0, size);
5199 opcode = opcode.u.esc->high[index];
5201 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5205 if ((ctxt->modrm >> 6) == 3)
5206 opcode = opcode.u.idual->mod3;
5208 opcode = opcode.u.idual->mod012;
5211 if (ctxt->mode == X86EMUL_MODE_PROT64)
5212 opcode = opcode.u.mdual->mode64;
5214 opcode = opcode.u.mdual->mode32;
5217 return EMULATION_FAILED;
5220 ctxt->d &= ~(u64)GroupMask;
5221 ctxt->d |= opcode.flags;
5224 ctxt->is_branch = opcode.flags & IsBranch;
5228 return EMULATION_FAILED;
5230 ctxt->execute = opcode.u.execute;
5232 if (unlikely(emulation_type & EMULTYPE_TRAP_UD) &&
5233 likely(!(ctxt->d & EmulateOnUD)))
5234 return EMULATION_FAILED;
5236 if (unlikely(ctxt->d &
5237 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5240 * These are copied unconditionally here, and checked unconditionally
5241 * in x86_emulate_insn.
5243 ctxt->check_perm = opcode.check_perm;
5244 ctxt->intercept = opcode.intercept;
5246 if (ctxt->d & NotImpl)
5247 return EMULATION_FAILED;
5249 if (mode == X86EMUL_MODE_PROT64) {
5250 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5252 else if (ctxt->d & NearBranch)
5256 if (ctxt->d & Op3264) {
5257 if (mode == X86EMUL_MODE_PROT64)
5263 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5267 ctxt->op_bytes = 16;
5268 else if (ctxt->d & Mmx)
5272 /* ModRM and SIB bytes. */
5273 if (ctxt->d & ModRM) {
5274 rc = decode_modrm(ctxt, &ctxt->memop);
5275 if (!has_seg_override) {
5276 has_seg_override = true;
5277 ctxt->seg_override = ctxt->modrm_seg;
5279 } else if (ctxt->d & MemAbs)
5280 rc = decode_abs(ctxt, &ctxt->memop);
5281 if (rc != X86EMUL_CONTINUE)
5284 if (!has_seg_override)
5285 ctxt->seg_override = VCPU_SREG_DS;
5287 ctxt->memop.addr.mem.seg = ctxt->seg_override;
5290 * Decode and fetch the source operand: register, memory
5293 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5294 if (rc != X86EMUL_CONTINUE)
5298 * Decode and fetch the second source operand: register, memory
5301 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5302 if (rc != X86EMUL_CONTINUE)
5305 /* Decode and fetch the destination operand: register or memory. */
5306 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5308 if (ctxt->rip_relative && likely(ctxt->memopp))
5309 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5310 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5313 if (rc == X86EMUL_PROPAGATE_FAULT)
5314 ctxt->have_exception = true;
5315 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5318 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5320 return ctxt->d & PageTable;
5323 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5325 /* The second termination condition only applies for REPE
5326 * and REPNE. Test if the repeat string operation prefix is
5327 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5328 * corresponding termination condition according to:
5329 * - if REPE/REPZ and ZF = 0 then done
5330 * - if REPNE/REPNZ and ZF = 1 then done
5332 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5333 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5334 && (((ctxt->rep_prefix == REPE_PREFIX) &&
5335 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5336 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5337 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5343 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5348 rc = asm_safe("fwait");
5351 if (unlikely(rc != X86EMUL_CONTINUE))
5352 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5354 return X86EMUL_CONTINUE;
5357 static void fetch_possible_mmx_operand(struct operand *op)
5359 if (op->type == OP_MM)
5360 kvm_read_mmx_reg(op->addr.mm, &op->mm_val);
5363 static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
5365 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5367 if (!(ctxt->d & ByteOp))
5368 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5370 asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5371 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5372 [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5373 : "c"(ctxt->src2.val));
5375 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5376 if (!fop) /* exception is returned in fop variable */
5377 return emulate_de(ctxt);
5378 return X86EMUL_CONTINUE;
5381 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5383 memset(&ctxt->rip_relative, 0,
5384 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5386 ctxt->io_read.pos = 0;
5387 ctxt->io_read.end = 0;
5388 ctxt->mem_read.end = 0;
5391 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5393 const struct x86_emulate_ops *ops = ctxt->ops;
5394 int rc = X86EMUL_CONTINUE;
5395 int saved_dst_type = ctxt->dst.type;
5396 unsigned emul_flags;
5398 ctxt->mem_read.pos = 0;
5400 /* LOCK prefix is allowed only with some instructions */
5401 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5402 rc = emulate_ud(ctxt);
5406 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5407 rc = emulate_ud(ctxt);
5411 emul_flags = ctxt->ops->get_hflags(ctxt);
5412 if (unlikely(ctxt->d &
5413 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5414 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5415 (ctxt->d & Undefined)) {
5416 rc = emulate_ud(ctxt);
5420 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5421 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5422 rc = emulate_ud(ctxt);
5426 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5427 rc = emulate_nm(ctxt);
5431 if (ctxt->d & Mmx) {
5432 rc = flush_pending_x87_faults(ctxt);
5433 if (rc != X86EMUL_CONTINUE)
5436 * Now that we know the fpu is exception safe, we can fetch
5439 fetch_possible_mmx_operand(&ctxt->src);
5440 fetch_possible_mmx_operand(&ctxt->src2);
5441 if (!(ctxt->d & Mov))
5442 fetch_possible_mmx_operand(&ctxt->dst);
5445 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5446 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5447 X86_ICPT_PRE_EXCEPT);
5448 if (rc != X86EMUL_CONTINUE)
5452 /* Instruction can only be executed in protected mode */
5453 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5454 rc = emulate_ud(ctxt);
5458 /* Privileged instruction can be executed only in CPL=0 */
5459 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5460 if (ctxt->d & PrivUD)
5461 rc = emulate_ud(ctxt);
5463 rc = emulate_gp(ctxt, 0);
5467 /* Do instruction specific permission checks */
5468 if (ctxt->d & CheckPerm) {
5469 rc = ctxt->check_perm(ctxt);
5470 if (rc != X86EMUL_CONTINUE)
5474 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5475 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5476 X86_ICPT_POST_EXCEPT);
5477 if (rc != X86EMUL_CONTINUE)
5481 if (ctxt->rep_prefix && (ctxt->d & String)) {
5482 /* All REP prefixes have the same first termination condition */
5483 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5484 string_registers_quirk(ctxt);
5485 ctxt->eip = ctxt->_eip;
5486 ctxt->eflags &= ~X86_EFLAGS_RF;
5492 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5493 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5494 ctxt->src.valptr, ctxt->src.bytes);
5495 if (rc != X86EMUL_CONTINUE)
5497 ctxt->src.orig_val64 = ctxt->src.val64;
5500 if (ctxt->src2.type == OP_MEM) {
5501 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5502 &ctxt->src2.val, ctxt->src2.bytes);
5503 if (rc != X86EMUL_CONTINUE)
5507 if ((ctxt->d & DstMask) == ImplicitOps)
5511 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5512 /* optimisation - avoid slow emulated read if Mov */
5513 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5514 &ctxt->dst.val, ctxt->dst.bytes);
5515 if (rc != X86EMUL_CONTINUE) {
5516 if (!(ctxt->d & NoWrite) &&
5517 rc == X86EMUL_PROPAGATE_FAULT &&
5518 ctxt->exception.vector == PF_VECTOR)
5519 ctxt->exception.error_code |= PFERR_WRITE_MASK;
5523 /* Copy full 64-bit value for CMPXCHG8B. */
5524 ctxt->dst.orig_val64 = ctxt->dst.val64;
5528 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5529 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5530 X86_ICPT_POST_MEMACCESS);
5531 if (rc != X86EMUL_CONTINUE)
5535 if (ctxt->rep_prefix && (ctxt->d & String))
5536 ctxt->eflags |= X86_EFLAGS_RF;
5538 ctxt->eflags &= ~X86_EFLAGS_RF;
5540 if (ctxt->execute) {
5541 if (ctxt->d & Fastop)
5542 rc = fastop(ctxt, ctxt->fop);
5544 rc = ctxt->execute(ctxt);
5545 if (rc != X86EMUL_CONTINUE)
5550 if (ctxt->opcode_len == 2)
5552 else if (ctxt->opcode_len == 3)
5553 goto threebyte_insn;
5556 case 0x70 ... 0x7f: /* jcc (short) */
5557 if (test_cc(ctxt->b, ctxt->eflags))
5558 rc = jmp_rel(ctxt, ctxt->src.val);
5560 case 0x8d: /* lea r16/r32, m */
5561 ctxt->dst.val = ctxt->src.addr.mem.ea;
5563 case 0x90 ... 0x97: /* nop / xchg reg, rax */
5564 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5565 ctxt->dst.type = OP_NONE;
5569 case 0x98: /* cbw/cwde/cdqe */
5570 switch (ctxt->op_bytes) {
5571 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5572 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5573 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5576 case 0xcc: /* int3 */
5577 rc = emulate_int(ctxt, 3);
5579 case 0xcd: /* int n */
5580 rc = emulate_int(ctxt, ctxt->src.val);
5582 case 0xce: /* into */
5583 if (ctxt->eflags & X86_EFLAGS_OF)
5584 rc = emulate_int(ctxt, 4);
5586 case 0xe9: /* jmp rel */
5587 case 0xeb: /* jmp rel short */
5588 rc = jmp_rel(ctxt, ctxt->src.val);
5589 ctxt->dst.type = OP_NONE; /* Disable writeback. */
5591 case 0xf4: /* hlt */
5592 ctxt->ops->halt(ctxt);
5594 case 0xf5: /* cmc */
5595 /* complement carry flag from eflags reg */
5596 ctxt->eflags ^= X86_EFLAGS_CF;
5598 case 0xf8: /* clc */
5599 ctxt->eflags &= ~X86_EFLAGS_CF;
5601 case 0xf9: /* stc */
5602 ctxt->eflags |= X86_EFLAGS_CF;
5604 case 0xfc: /* cld */
5605 ctxt->eflags &= ~X86_EFLAGS_DF;
5607 case 0xfd: /* std */
5608 ctxt->eflags |= X86_EFLAGS_DF;
5611 goto cannot_emulate;
5614 if (rc != X86EMUL_CONTINUE)
5618 if (ctxt->d & SrcWrite) {
5619 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5620 rc = writeback(ctxt, &ctxt->src);
5621 if (rc != X86EMUL_CONTINUE)
5624 if (!(ctxt->d & NoWrite)) {
5625 rc = writeback(ctxt, &ctxt->dst);
5626 if (rc != X86EMUL_CONTINUE)
5631 * restore dst type in case the decoding will be reused
5632 * (happens for string instruction )
5634 ctxt->dst.type = saved_dst_type;
5636 if ((ctxt->d & SrcMask) == SrcSI)
5637 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5639 if ((ctxt->d & DstMask) == DstDI)
5640 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5642 if (ctxt->rep_prefix && (ctxt->d & String)) {
5644 struct read_cache *r = &ctxt->io_read;
5645 if ((ctxt->d & SrcMask) == SrcSI)
5646 count = ctxt->src.count;
5648 count = ctxt->dst.count;
5649 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5651 if (!string_insn_completed(ctxt)) {
5653 * Re-enter guest when pio read ahead buffer is empty
5654 * or, if it is not used, after each 1024 iteration.
5656 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5657 (r->end == 0 || r->end != r->pos)) {
5659 * Reset read cache. Usually happens before
5660 * decode, but since instruction is restarted
5661 * we have to do it here.
5663 ctxt->mem_read.end = 0;
5664 writeback_registers(ctxt);
5665 return EMULATION_RESTART;
5667 goto done; /* skip rip writeback */
5669 ctxt->eflags &= ~X86_EFLAGS_RF;
5672 ctxt->eip = ctxt->_eip;
5673 if (ctxt->mode != X86EMUL_MODE_PROT64)
5674 ctxt->eip = (u32)ctxt->_eip;
5677 if (rc == X86EMUL_PROPAGATE_FAULT) {
5678 WARN_ON(ctxt->exception.vector > 0x1f);
5679 ctxt->have_exception = true;
5681 if (rc == X86EMUL_INTERCEPTED)
5682 return EMULATION_INTERCEPTED;
5684 if (rc == X86EMUL_CONTINUE)
5685 writeback_registers(ctxt);
5687 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5691 case 0x09: /* wbinvd */
5692 (ctxt->ops->wbinvd)(ctxt);
5694 case 0x08: /* invd */
5695 case 0x0d: /* GrpP (prefetch) */
5696 case 0x18: /* Grp16 (prefetch/nop) */
5697 case 0x1f: /* nop */
5699 case 0x20: /* mov cr, reg */
5700 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5702 case 0x21: /* mov from dr to reg */
5703 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5705 case 0x40 ... 0x4f: /* cmov */
5706 if (test_cc(ctxt->b, ctxt->eflags))
5707 ctxt->dst.val = ctxt->src.val;
5708 else if (ctxt->op_bytes != 4)
5709 ctxt->dst.type = OP_NONE; /* no writeback */
5711 case 0x80 ... 0x8f: /* jnz rel, etc*/
5712 if (test_cc(ctxt->b, ctxt->eflags))
5713 rc = jmp_rel(ctxt, ctxt->src.val);
5715 case 0x90 ... 0x9f: /* setcc r/m8 */
5716 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5718 case 0xb6 ... 0xb7: /* movzx */
5719 ctxt->dst.bytes = ctxt->op_bytes;
5720 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5721 : (u16) ctxt->src.val;
5723 case 0xbe ... 0xbf: /* movsx */
5724 ctxt->dst.bytes = ctxt->op_bytes;
5725 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5726 (s16) ctxt->src.val;
5729 goto cannot_emulate;
5734 if (rc != X86EMUL_CONTINUE)
5740 return EMULATION_FAILED;
5743 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5745 invalidate_registers(ctxt);
5748 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5750 writeback_registers(ctxt);
5753 bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5755 if (ctxt->rep_prefix && (ctxt->d & String))
5758 if (ctxt->d & TwoMemOp)