1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <asm/kvm_emulate.h>
26 #include <linux/stringify.h>
27 #include <asm/debugreg.h>
28 #include <asm/nospec-branch.h>
37 #define OpImplicit 1ull /* No generic decode */
38 #define OpReg 2ull /* Register */
39 #define OpMem 3ull /* Memory */
40 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
41 #define OpDI 5ull /* ES:DI/EDI/RDI */
42 #define OpMem64 6ull /* Memory, 64-bit */
43 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
44 #define OpDX 8ull /* DX register */
45 #define OpCL 9ull /* CL register (for shifts) */
46 #define OpImmByte 10ull /* 8-bit sign extended immediate */
47 #define OpOne 11ull /* Implied 1 */
48 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
49 #define OpMem16 13ull /* Memory operand (16-bit). */
50 #define OpMem32 14ull /* Memory operand (32-bit). */
51 #define OpImmU 15ull /* Immediate operand, zero extended */
52 #define OpSI 16ull /* SI/ESI/RSI */
53 #define OpImmFAddr 17ull /* Immediate far address */
54 #define OpMemFAddr 18ull /* Far address in memory */
55 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
56 #define OpES 20ull /* ES */
57 #define OpCS 21ull /* CS */
58 #define OpSS 22ull /* SS */
59 #define OpDS 23ull /* DS */
60 #define OpFS 24ull /* FS */
61 #define OpGS 25ull /* GS */
62 #define OpMem8 26ull /* 8-bit zero extended memory operand */
63 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
64 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
65 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
66 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
68 #define OpBits 5 /* Width of operand field */
69 #define OpMask ((1ull << OpBits) - 1)
72 * Opcode effective-address decode tables.
73 * Note that we only emulate instructions that have at least one memory
74 * operand (excluding implicit stack references). We assume that stack
75 * references and instruction fetches will never occur in special memory
76 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
80 /* Operand sizes: 8-bit operands or specified/overridden size. */
81 #define ByteOp (1<<0) /* 8-bit operands. */
82 /* Destination operand type. */
84 #define ImplicitOps (OpImplicit << DstShift)
85 #define DstReg (OpReg << DstShift)
86 #define DstMem (OpMem << DstShift)
87 #define DstAcc (OpAcc << DstShift)
88 #define DstDI (OpDI << DstShift)
89 #define DstMem64 (OpMem64 << DstShift)
90 #define DstMem16 (OpMem16 << DstShift)
91 #define DstImmUByte (OpImmUByte << DstShift)
92 #define DstDX (OpDX << DstShift)
93 #define DstAccLo (OpAccLo << DstShift)
94 #define DstMask (OpMask << DstShift)
95 /* Source operand type. */
97 #define SrcNone (OpNone << SrcShift)
98 #define SrcReg (OpReg << SrcShift)
99 #define SrcMem (OpMem << SrcShift)
100 #define SrcMem16 (OpMem16 << SrcShift)
101 #define SrcMem32 (OpMem32 << SrcShift)
102 #define SrcImm (OpImm << SrcShift)
103 #define SrcImmByte (OpImmByte << SrcShift)
104 #define SrcOne (OpOne << SrcShift)
105 #define SrcImmUByte (OpImmUByte << SrcShift)
106 #define SrcImmU (OpImmU << SrcShift)
107 #define SrcSI (OpSI << SrcShift)
108 #define SrcXLat (OpXLat << SrcShift)
109 #define SrcImmFAddr (OpImmFAddr << SrcShift)
110 #define SrcMemFAddr (OpMemFAddr << SrcShift)
111 #define SrcAcc (OpAcc << SrcShift)
112 #define SrcImmU16 (OpImmU16 << SrcShift)
113 #define SrcImm64 (OpImm64 << SrcShift)
114 #define SrcDX (OpDX << SrcShift)
115 #define SrcMem8 (OpMem8 << SrcShift)
116 #define SrcAccHi (OpAccHi << SrcShift)
117 #define SrcMask (OpMask << SrcShift)
118 #define BitOp (1<<11)
119 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
120 #define String (1<<13) /* String instruction (rep capable) */
121 #define Stack (1<<14) /* Stack instruction (push/pop) */
122 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
123 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
124 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
125 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
126 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
127 #define Escape (5<<15) /* Escape to coprocessor instruction */
128 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
129 #define ModeDual (7<<15) /* Different instruction for 32/64 bit */
130 #define Sse (1<<18) /* SSE Vector instruction */
131 /* Generic ModRM decode. */
132 #define ModRM (1<<19)
133 /* Destination is only written; never read. */
136 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
137 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
138 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
139 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
140 #define Undefined (1<<25) /* No Such Instruction */
141 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
142 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
144 #define PageTable (1 << 29) /* instruction used to write page table */
145 #define NotImpl (1 << 30) /* instruction is not implemented */
146 /* Source 2 operand type */
147 #define Src2Shift (31)
148 #define Src2None (OpNone << Src2Shift)
149 #define Src2Mem (OpMem << Src2Shift)
150 #define Src2CL (OpCL << Src2Shift)
151 #define Src2ImmByte (OpImmByte << Src2Shift)
152 #define Src2One (OpOne << Src2Shift)
153 #define Src2Imm (OpImm << Src2Shift)
154 #define Src2ES (OpES << Src2Shift)
155 #define Src2CS (OpCS << Src2Shift)
156 #define Src2SS (OpSS << Src2Shift)
157 #define Src2DS (OpDS << Src2Shift)
158 #define Src2FS (OpFS << Src2Shift)
159 #define Src2GS (OpGS << Src2Shift)
160 #define Src2Mask (OpMask << Src2Shift)
161 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
162 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
163 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
164 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
165 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
166 #define NoWrite ((u64)1 << 45) /* No writeback */
167 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
168 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
169 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
170 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
171 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
172 #define NearBranch ((u64)1 << 52) /* Near branches */
173 #define No16 ((u64)1 << 53) /* No 16 bit operand */
174 #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
175 #define Aligned16 ((u64)1 << 55) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
177 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
179 #define X2(x...) x, x
180 #define X3(x...) X2(x), x
181 #define X4(x...) X2(x), X2(x)
182 #define X5(x...) X4(x), x
183 #define X6(x...) X4(x), X2(x)
184 #define X7(x...) X4(x), X3(x)
185 #define X8(x...) X4(x), X4(x)
186 #define X16(x...) X8(x), X8(x)
188 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
189 #define FASTOP_SIZE 8
192 * fastop functions have a special calling convention:
197 * flags: rflags (in/out)
198 * ex: rsi (in:fastop pointer, out:zero if exception)
200 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
201 * different operand sizes can be reached by calculation, rather than a jump
202 * table (which would be bigger than the code).
204 * fastop functions are declared as taking a never-defined fastop parameter,
205 * so they can't be called from C directly.
214 int (*execute)(struct x86_emulate_ctxt *ctxt);
215 const struct opcode *group;
216 const struct group_dual *gdual;
217 const struct gprefix *gprefix;
218 const struct escape *esc;
219 const struct instr_dual *idual;
220 const struct mode_dual *mdual;
221 void (*fastop)(struct fastop *fake);
223 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
227 struct opcode mod012[8];
228 struct opcode mod3[8];
232 struct opcode pfx_no;
233 struct opcode pfx_66;
234 struct opcode pfx_f2;
235 struct opcode pfx_f3;
240 struct opcode high[64];
244 struct opcode mod012;
249 struct opcode mode32;
250 struct opcode mode64;
253 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
255 enum x86_transfer_type {
257 X86_TRANSFER_CALL_JMP,
259 X86_TRANSFER_TASK_SWITCH,
262 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
264 if (!(ctxt->regs_valid & (1 << nr))) {
265 ctxt->regs_valid |= 1 << nr;
266 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
268 return ctxt->_regs[nr];
271 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
273 ctxt->regs_valid |= 1 << nr;
274 ctxt->regs_dirty |= 1 << nr;
275 return &ctxt->_regs[nr];
278 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
281 return reg_write(ctxt, nr);
284 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
288 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
289 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
292 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
294 ctxt->regs_dirty = 0;
295 ctxt->regs_valid = 0;
299 * These EFLAGS bits are restored from saved value during emulation, and
300 * any changes are written back to the saved value after emulation.
302 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
303 X86_EFLAGS_PF|X86_EFLAGS_CF)
311 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
313 #define FOP_FUNC(name) \
314 ".align " __stringify(FASTOP_SIZE) " \n\t" \
315 ".type " name ", @function \n\t" \
318 #define FOP_RET "ret \n\t"
320 #define FOP_START(op) \
321 extern void em_##op(struct fastop *fake); \
322 asm(".pushsection .text, \"ax\" \n\t" \
323 ".global em_" #op " \n\t" \
330 FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
333 #define FOP1E(op, dst) \
334 FOP_FUNC(#op "_" #dst) \
335 "10: " #op " %" #dst " \n\t" FOP_RET
337 #define FOP1EEX(op, dst) \
338 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
340 #define FASTOP1(op) \
345 ON64(FOP1E(op##q, rax)) \
348 /* 1-operand, using src2 (for MUL/DIV r/m) */
349 #define FASTOP1SRC2(op, name) \
354 ON64(FOP1E(op, rcx)) \
357 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
358 #define FASTOP1SRC2EX(op, name) \
363 ON64(FOP1EEX(op, rcx)) \
366 #define FOP2E(op, dst, src) \
367 FOP_FUNC(#op "_" #dst "_" #src) \
368 #op " %" #src ", %" #dst " \n\t" FOP_RET
370 #define FASTOP2(op) \
372 FOP2E(op##b, al, dl) \
373 FOP2E(op##w, ax, dx) \
374 FOP2E(op##l, eax, edx) \
375 ON64(FOP2E(op##q, rax, rdx)) \
378 /* 2 operand, word only */
379 #define FASTOP2W(op) \
382 FOP2E(op##w, ax, dx) \
383 FOP2E(op##l, eax, edx) \
384 ON64(FOP2E(op##q, rax, rdx)) \
387 /* 2 operand, src is CL */
388 #define FASTOP2CL(op) \
390 FOP2E(op##b, al, cl) \
391 FOP2E(op##w, ax, cl) \
392 FOP2E(op##l, eax, cl) \
393 ON64(FOP2E(op##q, rax, cl)) \
396 /* 2 operand, src and dest are reversed */
397 #define FASTOP2R(op, name) \
399 FOP2E(op##b, dl, al) \
400 FOP2E(op##w, dx, ax) \
401 FOP2E(op##l, edx, eax) \
402 ON64(FOP2E(op##q, rdx, rax)) \
405 #define FOP3E(op, dst, src, src2) \
406 FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
407 #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
409 /* 3-operand, word-only, src2=cl */
410 #define FASTOP3WCL(op) \
413 FOP3E(op##w, ax, dx, cl) \
414 FOP3E(op##l, eax, edx, cl) \
415 ON64(FOP3E(op##q, rax, rdx, cl)) \
418 /* Special case for SETcc - 1 instruction per cc */
419 #define FOP_SETCC(op) \
421 ".type " #op ", @function \n\t" \
426 asm(".global kvm_fastop_exception \n"
427 "kvm_fastop_exception: xor %esi, %esi; ret");
448 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
452 * XXX: inoutclob user must know where the argument is being expanded.
453 * Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
455 #define asm_safe(insn, inoutclob...) \
459 asm volatile("1:" insn "\n" \
461 ".pushsection .fixup, \"ax\"\n" \
462 "3: movl $1, %[_fault]\n" \
465 _ASM_EXTABLE(1b, 3b) \
466 : [_fault] "+qm"(_fault) inoutclob ); \
468 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
471 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
472 enum x86_intercept intercept,
473 enum x86_intercept_stage stage)
475 struct x86_instruction_info info = {
476 .intercept = intercept,
477 .rep_prefix = ctxt->rep_prefix,
478 .modrm_mod = ctxt->modrm_mod,
479 .modrm_reg = ctxt->modrm_reg,
480 .modrm_rm = ctxt->modrm_rm,
481 .src_val = ctxt->src.val64,
482 .dst_val = ctxt->dst.val64,
483 .src_bytes = ctxt->src.bytes,
484 .dst_bytes = ctxt->dst.bytes,
485 .ad_bytes = ctxt->ad_bytes,
486 .next_rip = ctxt->eip,
489 return ctxt->ops->intercept(ctxt, &info, stage);
492 static void assign_masked(ulong *dest, ulong src, ulong mask)
494 *dest = (*dest & ~mask) | (src & mask);
497 static void assign_register(unsigned long *reg, u64 val, int bytes)
499 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
502 *(u8 *)reg = (u8)val;
505 *(u16 *)reg = (u16)val;
509 break; /* 64b: zero-extend */
516 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
518 return (1UL << (ctxt->ad_bytes << 3)) - 1;
521 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
524 struct desc_struct ss;
526 if (ctxt->mode == X86EMUL_MODE_PROT64)
528 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
529 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
532 static int stack_size(struct x86_emulate_ctxt *ctxt)
534 return (__fls(stack_mask(ctxt)) + 1) >> 3;
537 /* Access/update address held in a register, based on addressing mode. */
538 static inline unsigned long
539 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
541 if (ctxt->ad_bytes == sizeof(unsigned long))
544 return reg & ad_mask(ctxt);
547 static inline unsigned long
548 register_address(struct x86_emulate_ctxt *ctxt, int reg)
550 return address_mask(ctxt, reg_read(ctxt, reg));
553 static void masked_increment(ulong *reg, ulong mask, int inc)
555 assign_masked(reg, *reg + inc, mask);
559 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
561 ulong *preg = reg_rmw(ctxt, reg);
563 assign_register(preg, *preg + inc, ctxt->ad_bytes);
566 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
568 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
571 static u32 desc_limit_scaled(struct desc_struct *desc)
573 u32 limit = get_desc_limit(desc);
575 return desc->g ? (limit << 12) | 0xfff : limit;
578 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
580 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
583 return ctxt->ops->get_cached_segment_base(ctxt, seg);
586 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
587 u32 error, bool valid)
590 ctxt->exception.vector = vec;
591 ctxt->exception.error_code = error;
592 ctxt->exception.error_code_valid = valid;
593 return X86EMUL_PROPAGATE_FAULT;
596 static int emulate_db(struct x86_emulate_ctxt *ctxt)
598 return emulate_exception(ctxt, DB_VECTOR, 0, false);
601 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
603 return emulate_exception(ctxt, GP_VECTOR, err, true);
606 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
608 return emulate_exception(ctxt, SS_VECTOR, err, true);
611 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
613 return emulate_exception(ctxt, UD_VECTOR, 0, false);
616 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
618 return emulate_exception(ctxt, TS_VECTOR, err, true);
621 static int emulate_de(struct x86_emulate_ctxt *ctxt)
623 return emulate_exception(ctxt, DE_VECTOR, 0, false);
626 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
628 return emulate_exception(ctxt, NM_VECTOR, 0, false);
631 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
634 struct desc_struct desc;
636 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
640 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
645 struct desc_struct desc;
647 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
648 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
652 * x86 defines three classes of vector instructions: explicitly
653 * aligned, explicitly unaligned, and the rest, which change behaviour
654 * depending on whether they're AVX encoded or not.
656 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
657 * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
658 * 512 bytes of data must be aligned to a 16 byte boundary.
660 static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
662 if (likely(size < 16))
665 if (ctxt->d & Aligned)
667 else if (ctxt->d & Unaligned)
669 else if (ctxt->d & Avx)
671 else if (ctxt->d & Aligned16)
677 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
678 struct segmented_address addr,
679 unsigned *max_size, unsigned size,
680 bool write, bool fetch,
681 enum x86emul_mode mode, ulong *linear)
683 struct desc_struct desc;
689 la = seg_base(ctxt, addr.seg) + addr.ea;
692 case X86EMUL_MODE_PROT64:
694 if (is_noncanonical_address(la))
697 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
698 if (size > *max_size)
702 *linear = la = (u32)la;
703 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
707 /* code segment in protected mode or read-only data segment */
708 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
709 || !(desc.type & 2)) && write)
711 /* unreadable code segment */
712 if (!fetch && (desc.type & 8) && !(desc.type & 2))
714 lim = desc_limit_scaled(&desc);
715 if (!(desc.type & 8) && (desc.type & 4)) {
716 /* expand-down segment */
719 lim = desc.d ? 0xffffffff : 0xffff;
723 if (lim == 0xffffffff)
726 *max_size = (u64)lim + 1 - addr.ea;
727 if (size > *max_size)
732 if (la & (insn_alignment(ctxt, size) - 1))
733 return emulate_gp(ctxt, 0);
734 return X86EMUL_CONTINUE;
736 if (addr.seg == VCPU_SREG_SS)
737 return emulate_ss(ctxt, 0);
739 return emulate_gp(ctxt, 0);
742 static int linearize(struct x86_emulate_ctxt *ctxt,
743 struct segmented_address addr,
744 unsigned size, bool write,
748 return __linearize(ctxt, addr, &max_size, size, write, false,
752 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
753 enum x86emul_mode mode)
758 struct segmented_address addr = { .seg = VCPU_SREG_CS,
761 if (ctxt->op_bytes != sizeof(unsigned long))
762 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
763 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
764 if (rc == X86EMUL_CONTINUE)
765 ctxt->_eip = addr.ea;
769 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
771 return assign_eip(ctxt, dst, ctxt->mode);
774 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
775 const struct desc_struct *cs_desc)
777 enum x86emul_mode mode = ctxt->mode;
781 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
785 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
787 mode = X86EMUL_MODE_PROT64;
789 mode = X86EMUL_MODE_PROT32; /* temporary value */
792 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
793 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
794 rc = assign_eip(ctxt, dst, mode);
795 if (rc == X86EMUL_CONTINUE)
800 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
802 return assign_eip_near(ctxt, ctxt->_eip + rel);
805 static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
806 void *data, unsigned size)
808 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
811 static int linear_write_system(struct x86_emulate_ctxt *ctxt,
812 ulong linear, void *data,
815 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
818 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
819 struct segmented_address addr,
826 rc = linearize(ctxt, addr, size, false, &linear);
827 if (rc != X86EMUL_CONTINUE)
829 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
832 static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
833 struct segmented_address addr,
840 rc = linearize(ctxt, addr, size, true, &linear);
841 if (rc != X86EMUL_CONTINUE)
843 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
847 * Prefetch the remaining bytes of the instruction without crossing page
848 * boundary if they are not in fetch_cache yet.
850 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
853 unsigned size, max_size;
854 unsigned long linear;
855 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
856 struct segmented_address addr = { .seg = VCPU_SREG_CS,
857 .ea = ctxt->eip + cur_size };
860 * We do not know exactly how many bytes will be needed, and
861 * __linearize is expensive, so fetch as much as possible. We
862 * just have to avoid going beyond the 15 byte limit, the end
863 * of the segment, or the end of the page.
865 * __linearize is called with size 0 so that it does not do any
866 * boundary check itself. Instead, we use max_size to check
869 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
871 if (unlikely(rc != X86EMUL_CONTINUE))
874 size = min_t(unsigned, 15UL ^ cur_size, max_size);
875 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
878 * One instruction can only straddle two pages,
879 * and one has been loaded at the beginning of
880 * x86_decode_insn. So, if not enough bytes
881 * still, we must have hit the 15-byte boundary.
883 if (unlikely(size < op_size))
884 return emulate_gp(ctxt, 0);
886 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
887 size, &ctxt->exception);
888 if (unlikely(rc != X86EMUL_CONTINUE))
890 ctxt->fetch.end += size;
891 return X86EMUL_CONTINUE;
894 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
897 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
899 if (unlikely(done_size < size))
900 return __do_insn_fetch_bytes(ctxt, size - done_size);
902 return X86EMUL_CONTINUE;
905 /* Fetch next part of the instruction being emulated. */
906 #define insn_fetch(_type, _ctxt) \
909 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
910 if (rc != X86EMUL_CONTINUE) \
912 ctxt->_eip += sizeof(_type); \
913 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
914 ctxt->fetch.ptr += sizeof(_type); \
918 #define insn_fetch_arr(_arr, _size, _ctxt) \
920 rc = do_insn_fetch_bytes(_ctxt, _size); \
921 if (rc != X86EMUL_CONTINUE) \
923 ctxt->_eip += (_size); \
924 memcpy(_arr, ctxt->fetch.ptr, _size); \
925 ctxt->fetch.ptr += (_size); \
929 * Given the 'reg' portion of a ModRM byte, and a register block, return a
930 * pointer into the block that addresses the relevant register.
931 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
933 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
937 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
939 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
940 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
942 p = reg_rmw(ctxt, modrm_reg);
946 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
947 struct segmented_address addr,
948 u16 *size, unsigned long *address, int op_bytes)
955 rc = segmented_read_std(ctxt, addr, size, 2);
956 if (rc != X86EMUL_CONTINUE)
959 rc = segmented_read_std(ctxt, addr, address, op_bytes);
973 FASTOP1SRC2(mul, mul_ex);
974 FASTOP1SRC2(imul, imul_ex);
975 FASTOP1SRC2EX(div, div_ex);
976 FASTOP1SRC2EX(idiv, idiv_ex);
1005 FASTOP2R(cmp, cmp_r);
1007 static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1009 /* If src is zero, do not writeback, but update flags */
1010 if (ctxt->src.val == 0)
1011 ctxt->dst.type = OP_NONE;
1012 return fastop(ctxt, em_bsf);
1015 static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1017 /* If src is zero, do not writeback, but update flags */
1018 if (ctxt->src.val == 0)
1019 ctxt->dst.type = OP_NONE;
1020 return fastop(ctxt, em_bsr);
1023 static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1026 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1028 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1029 asm("push %[flags]; popf; " CALL_NOSPEC
1030 : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1034 static void fetch_register_operand(struct operand *op)
1036 switch (op->bytes) {
1038 op->val = *(u8 *)op->addr.reg;
1041 op->val = *(u16 *)op->addr.reg;
1044 op->val = *(u32 *)op->addr.reg;
1047 op->val = *(u64 *)op->addr.reg;
1052 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1054 ctxt->ops->get_fpu(ctxt);
1056 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1057 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1058 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1059 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1060 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1061 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1062 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1063 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1064 #ifdef CONFIG_X86_64
1065 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1066 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1067 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1068 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1069 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1070 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1071 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1072 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1076 ctxt->ops->put_fpu(ctxt);
1079 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1082 ctxt->ops->get_fpu(ctxt);
1084 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1085 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1086 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1087 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1088 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1089 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1090 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1091 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1092 #ifdef CONFIG_X86_64
1093 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1094 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1095 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1096 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1097 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1098 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1099 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1100 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1104 ctxt->ops->put_fpu(ctxt);
1107 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1109 ctxt->ops->get_fpu(ctxt);
1111 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1112 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1113 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1114 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1115 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1116 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1117 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1118 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1121 ctxt->ops->put_fpu(ctxt);
1124 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1126 ctxt->ops->get_fpu(ctxt);
1128 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1129 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1130 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1131 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1132 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1133 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1134 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1135 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1138 ctxt->ops->put_fpu(ctxt);
1141 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1143 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1144 return emulate_nm(ctxt);
1146 ctxt->ops->get_fpu(ctxt);
1147 asm volatile("fninit");
1148 ctxt->ops->put_fpu(ctxt);
1149 return X86EMUL_CONTINUE;
1152 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1156 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1157 return emulate_nm(ctxt);
1159 ctxt->ops->get_fpu(ctxt);
1160 asm volatile("fnstcw %0": "+m"(fcw));
1161 ctxt->ops->put_fpu(ctxt);
1163 ctxt->dst.val = fcw;
1165 return X86EMUL_CONTINUE;
1168 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1172 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1173 return emulate_nm(ctxt);
1175 ctxt->ops->get_fpu(ctxt);
1176 asm volatile("fnstsw %0": "+m"(fsw));
1177 ctxt->ops->put_fpu(ctxt);
1179 ctxt->dst.val = fsw;
1181 return X86EMUL_CONTINUE;
1184 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1187 unsigned reg = ctxt->modrm_reg;
1189 if (!(ctxt->d & ModRM))
1190 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1192 if (ctxt->d & Sse) {
1196 read_sse_reg(ctxt, &op->vec_val, reg);
1199 if (ctxt->d & Mmx) {
1208 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1209 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1211 fetch_register_operand(op);
1212 op->orig_val = op->val;
1215 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1217 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1218 ctxt->modrm_seg = VCPU_SREG_SS;
1221 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1225 int index_reg, base_reg, scale;
1226 int rc = X86EMUL_CONTINUE;
1229 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1230 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1231 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1233 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1234 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1235 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1236 ctxt->modrm_seg = VCPU_SREG_DS;
1238 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1240 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1241 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1243 if (ctxt->d & Sse) {
1246 op->addr.xmm = ctxt->modrm_rm;
1247 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1250 if (ctxt->d & Mmx) {
1253 op->addr.mm = ctxt->modrm_rm & 7;
1256 fetch_register_operand(op);
1262 if (ctxt->ad_bytes == 2) {
1263 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1264 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1265 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1266 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1268 /* 16-bit ModR/M decode. */
1269 switch (ctxt->modrm_mod) {
1271 if (ctxt->modrm_rm == 6)
1272 modrm_ea += insn_fetch(u16, ctxt);
1275 modrm_ea += insn_fetch(s8, ctxt);
1278 modrm_ea += insn_fetch(u16, ctxt);
1281 switch (ctxt->modrm_rm) {
1283 modrm_ea += bx + si;
1286 modrm_ea += bx + di;
1289 modrm_ea += bp + si;
1292 modrm_ea += bp + di;
1301 if (ctxt->modrm_mod != 0)
1308 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1309 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1310 ctxt->modrm_seg = VCPU_SREG_SS;
1311 modrm_ea = (u16)modrm_ea;
1313 /* 32/64-bit ModR/M decode. */
1314 if ((ctxt->modrm_rm & 7) == 4) {
1315 sib = insn_fetch(u8, ctxt);
1316 index_reg |= (sib >> 3) & 7;
1317 base_reg |= sib & 7;
1320 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1321 modrm_ea += insn_fetch(s32, ctxt);
1323 modrm_ea += reg_read(ctxt, base_reg);
1324 adjust_modrm_seg(ctxt, base_reg);
1325 /* Increment ESP on POP [ESP] */
1326 if ((ctxt->d & IncSP) &&
1327 base_reg == VCPU_REGS_RSP)
1328 modrm_ea += ctxt->op_bytes;
1331 modrm_ea += reg_read(ctxt, index_reg) << scale;
1332 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1333 modrm_ea += insn_fetch(s32, ctxt);
1334 if (ctxt->mode == X86EMUL_MODE_PROT64)
1335 ctxt->rip_relative = 1;
1337 base_reg = ctxt->modrm_rm;
1338 modrm_ea += reg_read(ctxt, base_reg);
1339 adjust_modrm_seg(ctxt, base_reg);
1341 switch (ctxt->modrm_mod) {
1343 modrm_ea += insn_fetch(s8, ctxt);
1346 modrm_ea += insn_fetch(s32, ctxt);
1350 op->addr.mem.ea = modrm_ea;
1351 if (ctxt->ad_bytes != 8)
1352 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1358 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1361 int rc = X86EMUL_CONTINUE;
1364 switch (ctxt->ad_bytes) {
1366 op->addr.mem.ea = insn_fetch(u16, ctxt);
1369 op->addr.mem.ea = insn_fetch(u32, ctxt);
1372 op->addr.mem.ea = insn_fetch(u64, ctxt);
1379 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1383 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1384 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1386 if (ctxt->src.bytes == 2)
1387 sv = (s16)ctxt->src.val & (s16)mask;
1388 else if (ctxt->src.bytes == 4)
1389 sv = (s32)ctxt->src.val & (s32)mask;
1391 sv = (s64)ctxt->src.val & (s64)mask;
1393 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1394 ctxt->dst.addr.mem.ea + (sv >> 3));
1397 /* only subword offset */
1398 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1401 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1402 unsigned long addr, void *dest, unsigned size)
1405 struct read_cache *mc = &ctxt->mem_read;
1407 if (mc->pos < mc->end)
1410 WARN_ON((mc->end + size) >= sizeof(mc->data));
1412 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1414 if (rc != X86EMUL_CONTINUE)
1420 memcpy(dest, mc->data + mc->pos, size);
1422 return X86EMUL_CONTINUE;
1425 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1426 struct segmented_address addr,
1433 rc = linearize(ctxt, addr, size, false, &linear);
1434 if (rc != X86EMUL_CONTINUE)
1436 return read_emulated(ctxt, linear, data, size);
1439 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1440 struct segmented_address addr,
1447 rc = linearize(ctxt, addr, size, true, &linear);
1448 if (rc != X86EMUL_CONTINUE)
1450 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1454 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1455 struct segmented_address addr,
1456 const void *orig_data, const void *data,
1462 rc = linearize(ctxt, addr, size, true, &linear);
1463 if (rc != X86EMUL_CONTINUE)
1465 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1466 size, &ctxt->exception);
1469 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1470 unsigned int size, unsigned short port,
1473 struct read_cache *rc = &ctxt->io_read;
1475 if (rc->pos == rc->end) { /* refill pio read ahead */
1476 unsigned int in_page, n;
1477 unsigned int count = ctxt->rep_prefix ?
1478 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1479 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1480 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1481 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1482 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1485 rc->pos = rc->end = 0;
1486 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1491 if (ctxt->rep_prefix && (ctxt->d & String) &&
1492 !(ctxt->eflags & X86_EFLAGS_DF)) {
1493 ctxt->dst.data = rc->data + rc->pos;
1494 ctxt->dst.type = OP_MEM_STR;
1495 ctxt->dst.count = (rc->end - rc->pos) / size;
1498 memcpy(dest, rc->data + rc->pos, size);
1504 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1505 u16 index, struct desc_struct *desc)
1510 ctxt->ops->get_idt(ctxt, &dt);
1512 if (dt.size < index * 8 + 7)
1513 return emulate_gp(ctxt, index << 3 | 0x2);
1515 addr = dt.address + index * 8;
1516 return linear_read_system(ctxt, addr, desc, sizeof *desc);
1519 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1520 u16 selector, struct desc_ptr *dt)
1522 const struct x86_emulate_ops *ops = ctxt->ops;
1525 if (selector & 1 << 2) {
1526 struct desc_struct desc;
1529 memset (dt, 0, sizeof *dt);
1530 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1534 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1535 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1537 ops->get_gdt(ctxt, dt);
1540 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1541 u16 selector, ulong *desc_addr_p)
1544 u16 index = selector >> 3;
1547 get_descriptor_table_ptr(ctxt, selector, &dt);
1549 if (dt.size < index * 8 + 7)
1550 return emulate_gp(ctxt, selector & 0xfffc);
1552 addr = dt.address + index * 8;
1554 #ifdef CONFIG_X86_64
1555 if (addr >> 32 != 0) {
1558 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1559 if (!(efer & EFER_LMA))
1564 *desc_addr_p = addr;
1565 return X86EMUL_CONTINUE;
1568 /* allowed just for 8 bytes segments */
1569 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1570 u16 selector, struct desc_struct *desc,
1575 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1576 if (rc != X86EMUL_CONTINUE)
1579 return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1582 /* allowed just for 8 bytes segments */
1583 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1584 u16 selector, struct desc_struct *desc)
1589 rc = get_descriptor_ptr(ctxt, selector, &addr);
1590 if (rc != X86EMUL_CONTINUE)
1593 return linear_write_system(ctxt, addr, desc, sizeof *desc);
1596 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1597 u16 selector, int seg, u8 cpl,
1598 enum x86_transfer_type transfer,
1599 struct desc_struct *desc)
1601 struct desc_struct seg_desc, old_desc;
1603 unsigned err_vec = GP_VECTOR;
1605 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1611 memset(&seg_desc, 0, sizeof seg_desc);
1613 if (ctxt->mode == X86EMUL_MODE_REAL) {
1614 /* set real mode segment descriptor (keep limit etc. for
1616 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1617 set_desc_base(&seg_desc, selector << 4);
1619 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1620 /* VM86 needs a clean new segment descriptor */
1621 set_desc_base(&seg_desc, selector << 4);
1622 set_desc_limit(&seg_desc, 0xffff);
1632 /* TR should be in GDT only */
1633 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1636 /* NULL selector is not valid for TR, CS and (except for long mode) SS */
1637 if (null_selector) {
1638 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1641 if (seg == VCPU_SREG_SS) {
1642 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1646 * ctxt->ops->set_segment expects the CPL to be in
1647 * SS.DPL, so fake an expand-up 32-bit data segment.
1657 /* Skip all following checks */
1661 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1662 if (ret != X86EMUL_CONTINUE)
1665 err_code = selector & 0xfffc;
1666 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1669 /* can't load system descriptor into segment selector */
1670 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1671 if (transfer == X86_TRANSFER_CALL_JMP)
1672 return X86EMUL_UNHANDLEABLE;
1677 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1686 * segment is not a writable data segment or segment
1687 * selector's RPL != CPL or segment selector's RPL != CPL
1689 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1693 if (!(seg_desc.type & 8))
1696 if (seg_desc.type & 4) {
1702 if (rpl > cpl || dpl != cpl)
1705 /* in long-mode d/b must be clear if l is set */
1706 if (seg_desc.d && seg_desc.l) {
1709 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1710 if (efer & EFER_LMA)
1714 /* CS(RPL) <- CPL */
1715 selector = (selector & 0xfffc) | cpl;
1718 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1720 old_desc = seg_desc;
1721 seg_desc.type |= 2; /* busy */
1722 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1723 sizeof(seg_desc), &ctxt->exception);
1724 if (ret != X86EMUL_CONTINUE)
1727 case VCPU_SREG_LDTR:
1728 if (seg_desc.s || seg_desc.type != 2)
1731 default: /* DS, ES, FS, or GS */
1733 * segment is not a data or readable code segment or
1734 * ((segment is a data or nonconforming code segment)
1735 * and (both RPL and CPL > DPL))
1737 if ((seg_desc.type & 0xa) == 0x8 ||
1738 (((seg_desc.type & 0xc) != 0xc) &&
1739 (rpl > dpl && cpl > dpl)))
1745 /* mark segment as accessed */
1746 if (!(seg_desc.type & 1)) {
1748 ret = write_segment_descriptor(ctxt, selector,
1750 if (ret != X86EMUL_CONTINUE)
1753 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1754 ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1755 if (ret != X86EMUL_CONTINUE)
1757 if (is_noncanonical_address(get_desc_base(&seg_desc) |
1758 ((u64)base3 << 32)))
1759 return emulate_gp(ctxt, 0);
1762 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1765 return X86EMUL_CONTINUE;
1767 return emulate_exception(ctxt, err_vec, err_code, true);
1770 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1771 u16 selector, int seg)
1773 u8 cpl = ctxt->ops->cpl(ctxt);
1776 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1777 * they can load it at CPL<3 (Intel's manual says only LSS can,
1780 * However, the Intel manual says that putting IST=1/DPL=3 in
1781 * an interrupt gate will result in SS=3 (the AMD manual instead
1782 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1783 * and only forbid it here.
1785 if (seg == VCPU_SREG_SS && selector == 3 &&
1786 ctxt->mode == X86EMUL_MODE_PROT64)
1787 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1789 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1790 X86_TRANSFER_NONE, NULL);
1793 static void write_register_operand(struct operand *op)
1795 return assign_register(op->addr.reg, op->val, op->bytes);
1798 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1802 write_register_operand(op);
1805 if (ctxt->lock_prefix)
1806 return segmented_cmpxchg(ctxt,
1812 return segmented_write(ctxt,
1818 return segmented_write(ctxt,
1821 op->bytes * op->count);
1824 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1827 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1835 return X86EMUL_CONTINUE;
1838 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1840 struct segmented_address addr;
1842 rsp_increment(ctxt, -bytes);
1843 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1844 addr.seg = VCPU_SREG_SS;
1846 return segmented_write(ctxt, addr, data, bytes);
1849 static int em_push(struct x86_emulate_ctxt *ctxt)
1851 /* Disable writeback. */
1852 ctxt->dst.type = OP_NONE;
1853 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1856 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1857 void *dest, int len)
1860 struct segmented_address addr;
1862 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1863 addr.seg = VCPU_SREG_SS;
1864 rc = segmented_read(ctxt, addr, dest, len);
1865 if (rc != X86EMUL_CONTINUE)
1868 rsp_increment(ctxt, len);
1872 static int em_pop(struct x86_emulate_ctxt *ctxt)
1874 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1877 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1878 void *dest, int len)
1881 unsigned long val, change_mask;
1882 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1883 int cpl = ctxt->ops->cpl(ctxt);
1885 rc = emulate_pop(ctxt, &val, len);
1886 if (rc != X86EMUL_CONTINUE)
1889 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1890 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1891 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1892 X86_EFLAGS_AC | X86_EFLAGS_ID;
1894 switch(ctxt->mode) {
1895 case X86EMUL_MODE_PROT64:
1896 case X86EMUL_MODE_PROT32:
1897 case X86EMUL_MODE_PROT16:
1899 change_mask |= X86_EFLAGS_IOPL;
1901 change_mask |= X86_EFLAGS_IF;
1903 case X86EMUL_MODE_VM86:
1905 return emulate_gp(ctxt, 0);
1906 change_mask |= X86_EFLAGS_IF;
1908 default: /* real mode */
1909 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1913 *(unsigned long *)dest =
1914 (ctxt->eflags & ~change_mask) | (val & change_mask);
1919 static int em_popf(struct x86_emulate_ctxt *ctxt)
1921 ctxt->dst.type = OP_REG;
1922 ctxt->dst.addr.reg = &ctxt->eflags;
1923 ctxt->dst.bytes = ctxt->op_bytes;
1924 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1927 static int em_enter(struct x86_emulate_ctxt *ctxt)
1930 unsigned frame_size = ctxt->src.val;
1931 unsigned nesting_level = ctxt->src2.val & 31;
1935 return X86EMUL_UNHANDLEABLE;
1937 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1938 rc = push(ctxt, &rbp, stack_size(ctxt));
1939 if (rc != X86EMUL_CONTINUE)
1941 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1943 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1944 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1946 return X86EMUL_CONTINUE;
1949 static int em_leave(struct x86_emulate_ctxt *ctxt)
1951 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1953 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1956 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1958 int seg = ctxt->src2.val;
1960 ctxt->src.val = get_segment_selector(ctxt, seg);
1961 if (ctxt->op_bytes == 4) {
1962 rsp_increment(ctxt, -2);
1966 return em_push(ctxt);
1969 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1971 int seg = ctxt->src2.val;
1972 unsigned long selector;
1975 rc = emulate_pop(ctxt, &selector, 2);
1976 if (rc != X86EMUL_CONTINUE)
1979 if (ctxt->modrm_reg == VCPU_SREG_SS)
1980 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1981 if (ctxt->op_bytes > 2)
1982 rsp_increment(ctxt, ctxt->op_bytes - 2);
1984 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1988 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1990 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1991 int rc = X86EMUL_CONTINUE;
1992 int reg = VCPU_REGS_RAX;
1994 while (reg <= VCPU_REGS_RDI) {
1995 (reg == VCPU_REGS_RSP) ?
1996 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1999 if (rc != X86EMUL_CONTINUE)
2008 static int em_pushf(struct x86_emulate_ctxt *ctxt)
2010 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
2011 return em_push(ctxt);
2014 static int em_popa(struct x86_emulate_ctxt *ctxt)
2016 int rc = X86EMUL_CONTINUE;
2017 int reg = VCPU_REGS_RDI;
2020 while (reg >= VCPU_REGS_RAX) {
2021 if (reg == VCPU_REGS_RSP) {
2022 rsp_increment(ctxt, ctxt->op_bytes);
2026 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2027 if (rc != X86EMUL_CONTINUE)
2029 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2035 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2037 const struct x86_emulate_ops *ops = ctxt->ops;
2044 /* TODO: Add limit checks */
2045 ctxt->src.val = ctxt->eflags;
2047 if (rc != X86EMUL_CONTINUE)
2050 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2052 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2054 if (rc != X86EMUL_CONTINUE)
2057 ctxt->src.val = ctxt->_eip;
2059 if (rc != X86EMUL_CONTINUE)
2062 ops->get_idt(ctxt, &dt);
2064 eip_addr = dt.address + (irq << 2);
2065 cs_addr = dt.address + (irq << 2) + 2;
2067 rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2068 if (rc != X86EMUL_CONTINUE)
2071 rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2072 if (rc != X86EMUL_CONTINUE)
2075 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2076 if (rc != X86EMUL_CONTINUE)
2084 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2088 invalidate_registers(ctxt);
2089 rc = __emulate_int_real(ctxt, irq);
2090 if (rc == X86EMUL_CONTINUE)
2091 writeback_registers(ctxt);
2095 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2097 switch(ctxt->mode) {
2098 case X86EMUL_MODE_REAL:
2099 return __emulate_int_real(ctxt, irq);
2100 case X86EMUL_MODE_VM86:
2101 case X86EMUL_MODE_PROT16:
2102 case X86EMUL_MODE_PROT32:
2103 case X86EMUL_MODE_PROT64:
2105 /* Protected mode interrupts unimplemented yet */
2106 return X86EMUL_UNHANDLEABLE;
2110 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2112 int rc = X86EMUL_CONTINUE;
2113 unsigned long temp_eip = 0;
2114 unsigned long temp_eflags = 0;
2115 unsigned long cs = 0;
2116 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2117 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2118 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2119 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2120 X86_EFLAGS_AC | X86_EFLAGS_ID |
2122 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2125 /* TODO: Add stack limit check */
2127 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2129 if (rc != X86EMUL_CONTINUE)
2132 if (temp_eip & ~0xffff)
2133 return emulate_gp(ctxt, 0);
2135 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2137 if (rc != X86EMUL_CONTINUE)
2140 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2142 if (rc != X86EMUL_CONTINUE)
2145 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2147 if (rc != X86EMUL_CONTINUE)
2150 ctxt->_eip = temp_eip;
2152 if (ctxt->op_bytes == 4)
2153 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2154 else if (ctxt->op_bytes == 2) {
2155 ctxt->eflags &= ~0xffff;
2156 ctxt->eflags |= temp_eflags;
2159 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2160 ctxt->eflags |= X86_EFLAGS_FIXED;
2161 ctxt->ops->set_nmi_mask(ctxt, false);
2166 static int em_iret(struct x86_emulate_ctxt *ctxt)
2168 switch(ctxt->mode) {
2169 case X86EMUL_MODE_REAL:
2170 return emulate_iret_real(ctxt);
2171 case X86EMUL_MODE_VM86:
2172 case X86EMUL_MODE_PROT16:
2173 case X86EMUL_MODE_PROT32:
2174 case X86EMUL_MODE_PROT64:
2176 /* iret from protected mode unimplemented yet */
2177 return X86EMUL_UNHANDLEABLE;
2181 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2185 struct desc_struct new_desc;
2186 u8 cpl = ctxt->ops->cpl(ctxt);
2188 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2190 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2191 X86_TRANSFER_CALL_JMP,
2193 if (rc != X86EMUL_CONTINUE)
2196 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2197 /* Error handling is not implemented. */
2198 if (rc != X86EMUL_CONTINUE)
2199 return X86EMUL_UNHANDLEABLE;
2204 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2206 return assign_eip_near(ctxt, ctxt->src.val);
2209 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2214 old_eip = ctxt->_eip;
2215 rc = assign_eip_near(ctxt, ctxt->src.val);
2216 if (rc != X86EMUL_CONTINUE)
2218 ctxt->src.val = old_eip;
2223 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2225 u64 old = ctxt->dst.orig_val64;
2227 if (ctxt->dst.bytes == 16)
2228 return X86EMUL_UNHANDLEABLE;
2230 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2231 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2232 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2233 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2234 ctxt->eflags &= ~X86_EFLAGS_ZF;
2236 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2237 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2239 ctxt->eflags |= X86_EFLAGS_ZF;
2241 return X86EMUL_CONTINUE;
2244 static int em_ret(struct x86_emulate_ctxt *ctxt)
2249 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2250 if (rc != X86EMUL_CONTINUE)
2253 return assign_eip_near(ctxt, eip);
2256 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2259 unsigned long eip, cs;
2260 int cpl = ctxt->ops->cpl(ctxt);
2261 struct desc_struct new_desc;
2263 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2264 if (rc != X86EMUL_CONTINUE)
2266 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2267 if (rc != X86EMUL_CONTINUE)
2269 /* Outer-privilege level return is not implemented */
2270 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2271 return X86EMUL_UNHANDLEABLE;
2272 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2275 if (rc != X86EMUL_CONTINUE)
2277 rc = assign_eip_far(ctxt, eip, &new_desc);
2278 /* Error handling is not implemented. */
2279 if (rc != X86EMUL_CONTINUE)
2280 return X86EMUL_UNHANDLEABLE;
2285 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2289 rc = em_ret_far(ctxt);
2290 if (rc != X86EMUL_CONTINUE)
2292 rsp_increment(ctxt, ctxt->src.val);
2293 return X86EMUL_CONTINUE;
2296 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2298 /* Save real source value, then compare EAX against destination. */
2299 ctxt->dst.orig_val = ctxt->dst.val;
2300 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2301 ctxt->src.orig_val = ctxt->src.val;
2302 ctxt->src.val = ctxt->dst.orig_val;
2303 fastop(ctxt, em_cmp);
2305 if (ctxt->eflags & X86_EFLAGS_ZF) {
2306 /* Success: write back to memory; no update of EAX */
2307 ctxt->src.type = OP_NONE;
2308 ctxt->dst.val = ctxt->src.orig_val;
2310 /* Failure: write the value we saw to EAX. */
2311 ctxt->src.type = OP_REG;
2312 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2313 ctxt->src.val = ctxt->dst.orig_val;
2314 /* Create write-cycle to dest by writing the same value */
2315 ctxt->dst.val = ctxt->dst.orig_val;
2317 return X86EMUL_CONTINUE;
2320 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2322 int seg = ctxt->src2.val;
2326 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2328 rc = load_segment_descriptor(ctxt, sel, seg);
2329 if (rc != X86EMUL_CONTINUE)
2332 ctxt->dst.val = ctxt->src.val;
2336 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2338 u32 eax, ebx, ecx, edx;
2342 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2343 return edx & bit(X86_FEATURE_LM);
2346 #define GET_SMSTATE(type, smbase, offset) \
2349 int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val, \
2351 if (r != X86EMUL_CONTINUE) \
2352 return X86EMUL_UNHANDLEABLE; \
2356 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2358 desc->g = (flags >> 23) & 1;
2359 desc->d = (flags >> 22) & 1;
2360 desc->l = (flags >> 21) & 1;
2361 desc->avl = (flags >> 20) & 1;
2362 desc->p = (flags >> 15) & 1;
2363 desc->dpl = (flags >> 13) & 3;
2364 desc->s = (flags >> 12) & 1;
2365 desc->type = (flags >> 8) & 15;
2368 static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2370 struct desc_struct desc;
2374 selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
2377 offset = 0x7f84 + n * 12;
2379 offset = 0x7f2c + (n - 3) * 12;
2381 set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
2382 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
2383 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
2384 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2385 return X86EMUL_CONTINUE;
2388 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2390 struct desc_struct desc;
2395 offset = 0x7e00 + n * 16;
2397 selector = GET_SMSTATE(u16, smbase, offset);
2398 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
2399 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
2400 set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
2401 base3 = GET_SMSTATE(u32, smbase, offset + 12);
2403 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2404 return X86EMUL_CONTINUE;
2407 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2408 u64 cr0, u64 cr3, u64 cr4)
2413 /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */
2415 if (cr4 & X86_CR4_PCIDE) {
2420 bad = ctxt->ops->set_cr(ctxt, 3, cr3);
2422 return X86EMUL_UNHANDLEABLE;
2425 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2426 * Then enable protected mode. However, PCID cannot be enabled
2427 * if EFER.LMA=0, so set it separately.
2429 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2431 return X86EMUL_UNHANDLEABLE;
2433 bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2435 return X86EMUL_UNHANDLEABLE;
2437 if (cr4 & X86_CR4_PCIDE) {
2438 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2440 return X86EMUL_UNHANDLEABLE;
2442 bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
2444 return X86EMUL_UNHANDLEABLE;
2449 return X86EMUL_CONTINUE;
2452 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
2454 struct desc_struct desc;
2457 u32 val, cr0, cr3, cr4;
2460 cr0 = GET_SMSTATE(u32, smbase, 0x7ffc);
2461 cr3 = GET_SMSTATE(u32, smbase, 0x7ff8);
2462 ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
2463 ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0);
2465 for (i = 0; i < 8; i++)
2466 *reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
2468 val = GET_SMSTATE(u32, smbase, 0x7fcc);
2469 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2470 val = GET_SMSTATE(u32, smbase, 0x7fc8);
2471 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2473 selector = GET_SMSTATE(u32, smbase, 0x7fc4);
2474 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f64));
2475 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f60));
2476 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f5c));
2477 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2479 selector = GET_SMSTATE(u32, smbase, 0x7fc0);
2480 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f80));
2481 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f7c));
2482 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f78));
2483 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2485 dt.address = GET_SMSTATE(u32, smbase, 0x7f74);
2486 dt.size = GET_SMSTATE(u32, smbase, 0x7f70);
2487 ctxt->ops->set_gdt(ctxt, &dt);
2489 dt.address = GET_SMSTATE(u32, smbase, 0x7f58);
2490 dt.size = GET_SMSTATE(u32, smbase, 0x7f54);
2491 ctxt->ops->set_idt(ctxt, &dt);
2493 for (i = 0; i < 6; i++) {
2494 int r = rsm_load_seg_32(ctxt, smbase, i);
2495 if (r != X86EMUL_CONTINUE)
2499 cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
2501 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
2503 return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2506 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
2508 struct desc_struct desc;
2510 u64 val, cr0, cr3, cr4;
2515 for (i = 0; i < 16; i++)
2516 *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
2518 ctxt->_eip = GET_SMSTATE(u64, smbase, 0x7f78);
2519 ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
2521 val = GET_SMSTATE(u32, smbase, 0x7f68);
2522 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2523 val = GET_SMSTATE(u32, smbase, 0x7f60);
2524 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2526 cr0 = GET_SMSTATE(u64, smbase, 0x7f58);
2527 cr3 = GET_SMSTATE(u64, smbase, 0x7f50);
2528 cr4 = GET_SMSTATE(u64, smbase, 0x7f48);
2529 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
2530 val = GET_SMSTATE(u64, smbase, 0x7ed0);
2531 ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
2533 selector = GET_SMSTATE(u32, smbase, 0x7e90);
2534 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e92) << 8);
2535 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e94));
2536 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e98));
2537 base3 = GET_SMSTATE(u32, smbase, 0x7e9c);
2538 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2540 dt.size = GET_SMSTATE(u32, smbase, 0x7e84);
2541 dt.address = GET_SMSTATE(u64, smbase, 0x7e88);
2542 ctxt->ops->set_idt(ctxt, &dt);
2544 selector = GET_SMSTATE(u32, smbase, 0x7e70);
2545 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e72) << 8);
2546 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e74));
2547 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e78));
2548 base3 = GET_SMSTATE(u32, smbase, 0x7e7c);
2549 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2551 dt.size = GET_SMSTATE(u32, smbase, 0x7e64);
2552 dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
2553 ctxt->ops->set_gdt(ctxt, &dt);
2555 r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2556 if (r != X86EMUL_CONTINUE)
2559 for (i = 0; i < 6; i++) {
2560 r = rsm_load_seg_64(ctxt, smbase, i);
2561 if (r != X86EMUL_CONTINUE)
2565 return X86EMUL_CONTINUE;
2568 static int em_rsm(struct x86_emulate_ctxt *ctxt)
2570 unsigned long cr0, cr4, efer;
2574 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2575 return emulate_ud(ctxt);
2578 * Get back to real mode, to prepare a safe state in which to load
2579 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
2580 * supports long mode.
2582 if (emulator_has_longmode(ctxt)) {
2583 struct desc_struct cs_desc;
2585 /* Zero CR4.PCIDE before CR0.PG. */
2586 cr4 = ctxt->ops->get_cr(ctxt, 4);
2587 if (cr4 & X86_CR4_PCIDE)
2588 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2590 /* A 32-bit code segment is required to clear EFER.LMA. */
2591 memset(&cs_desc, 0, sizeof(cs_desc));
2593 cs_desc.s = cs_desc.g = cs_desc.p = 1;
2594 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2597 /* For the 64-bit case, this will clear EFER.LMA. */
2598 cr0 = ctxt->ops->get_cr(ctxt, 0);
2599 if (cr0 & X86_CR0_PE)
2600 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2602 if (emulator_has_longmode(ctxt)) {
2603 /* Clear CR4.PAE before clearing EFER.LME. */
2604 cr4 = ctxt->ops->get_cr(ctxt, 4);
2605 if (cr4 & X86_CR4_PAE)
2606 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2608 /* And finally go back to 32-bit mode. */
2610 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2613 smbase = ctxt->ops->get_smbase(ctxt);
2614 if (emulator_has_longmode(ctxt))
2615 ret = rsm_load_state_64(ctxt, smbase + 0x8000);
2617 ret = rsm_load_state_32(ctxt, smbase + 0x8000);
2619 if (ret != X86EMUL_CONTINUE) {
2620 /* FIXME: should triple fault */
2621 return X86EMUL_UNHANDLEABLE;
2624 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2625 ctxt->ops->set_nmi_mask(ctxt, false);
2627 ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
2628 ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
2629 return X86EMUL_CONTINUE;
2633 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2634 struct desc_struct *cs, struct desc_struct *ss)
2636 cs->l = 0; /* will be adjusted later */
2637 set_desc_base(cs, 0); /* flat segment */
2638 cs->g = 1; /* 4kb granularity */
2639 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2640 cs->type = 0x0b; /* Read, Execute, Accessed */
2642 cs->dpl = 0; /* will be adjusted later */
2647 set_desc_base(ss, 0); /* flat segment */
2648 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2649 ss->g = 1; /* 4kb granularity */
2651 ss->type = 0x03; /* Read/Write, Accessed */
2652 ss->d = 1; /* 32bit stack segment */
2659 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2661 u32 eax, ebx, ecx, edx;
2664 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2665 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2666 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2667 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2670 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2672 const struct x86_emulate_ops *ops = ctxt->ops;
2673 u32 eax, ebx, ecx, edx;
2676 * syscall should always be enabled in longmode - so only become
2677 * vendor specific (cpuid) if other modes are active...
2679 if (ctxt->mode == X86EMUL_MODE_PROT64)
2684 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2686 * Intel ("GenuineIntel")
2687 * remark: Intel CPUs only support "syscall" in 64bit
2688 * longmode. Also an 64bit guest with a
2689 * 32bit compat-app running will #UD !! While this
2690 * behaviour can be fixed (by emulating) into AMD
2691 * response - CPUs of AMD can't behave like Intel.
2693 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2694 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2695 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2698 /* AMD ("AuthenticAMD") */
2699 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2700 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2701 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2704 /* AMD ("AMDisbetter!") */
2705 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2706 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2707 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2710 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2714 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2716 const struct x86_emulate_ops *ops = ctxt->ops;
2717 struct desc_struct cs, ss;
2722 /* syscall is not available in real mode */
2723 if (ctxt->mode == X86EMUL_MODE_REAL ||
2724 ctxt->mode == X86EMUL_MODE_VM86)
2725 return emulate_ud(ctxt);
2727 if (!(em_syscall_is_enabled(ctxt)))
2728 return emulate_ud(ctxt);
2730 ops->get_msr(ctxt, MSR_EFER, &efer);
2731 setup_syscalls_segments(ctxt, &cs, &ss);
2733 if (!(efer & EFER_SCE))
2734 return emulate_ud(ctxt);
2736 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2738 cs_sel = (u16)(msr_data & 0xfffc);
2739 ss_sel = (u16)(msr_data + 8);
2741 if (efer & EFER_LMA) {
2745 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2746 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2748 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2749 if (efer & EFER_LMA) {
2750 #ifdef CONFIG_X86_64
2751 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2754 ctxt->mode == X86EMUL_MODE_PROT64 ?
2755 MSR_LSTAR : MSR_CSTAR, &msr_data);
2756 ctxt->_eip = msr_data;
2758 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2759 ctxt->eflags &= ~msr_data;
2760 ctxt->eflags |= X86_EFLAGS_FIXED;
2764 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2765 ctxt->_eip = (u32)msr_data;
2767 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2770 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2771 return X86EMUL_CONTINUE;
2774 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2776 const struct x86_emulate_ops *ops = ctxt->ops;
2777 struct desc_struct cs, ss;
2782 ops->get_msr(ctxt, MSR_EFER, &efer);
2783 /* inject #GP if in real mode */
2784 if (ctxt->mode == X86EMUL_MODE_REAL)
2785 return emulate_gp(ctxt, 0);
2788 * Not recognized on AMD in compat mode (but is recognized in legacy
2791 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2792 && !vendor_intel(ctxt))
2793 return emulate_ud(ctxt);
2795 /* sysenter/sysexit have not been tested in 64bit mode. */
2796 if (ctxt->mode == X86EMUL_MODE_PROT64)
2797 return X86EMUL_UNHANDLEABLE;
2799 setup_syscalls_segments(ctxt, &cs, &ss);
2801 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2802 if ((msr_data & 0xfffc) == 0x0)
2803 return emulate_gp(ctxt, 0);
2805 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2806 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2807 ss_sel = cs_sel + 8;
2808 if (efer & EFER_LMA) {
2813 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2814 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2816 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2817 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2819 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2820 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2823 return X86EMUL_CONTINUE;
2826 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2828 const struct x86_emulate_ops *ops = ctxt->ops;
2829 struct desc_struct cs, ss;
2830 u64 msr_data, rcx, rdx;
2832 u16 cs_sel = 0, ss_sel = 0;
2834 /* inject #GP if in real mode or Virtual 8086 mode */
2835 if (ctxt->mode == X86EMUL_MODE_REAL ||
2836 ctxt->mode == X86EMUL_MODE_VM86)
2837 return emulate_gp(ctxt, 0);
2839 setup_syscalls_segments(ctxt, &cs, &ss);
2841 if ((ctxt->rex_prefix & 0x8) != 0x0)
2842 usermode = X86EMUL_MODE_PROT64;
2844 usermode = X86EMUL_MODE_PROT32;
2846 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2847 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2851 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2853 case X86EMUL_MODE_PROT32:
2854 cs_sel = (u16)(msr_data + 16);
2855 if ((msr_data & 0xfffc) == 0x0)
2856 return emulate_gp(ctxt, 0);
2857 ss_sel = (u16)(msr_data + 24);
2861 case X86EMUL_MODE_PROT64:
2862 cs_sel = (u16)(msr_data + 32);
2863 if (msr_data == 0x0)
2864 return emulate_gp(ctxt, 0);
2865 ss_sel = cs_sel + 8;
2868 if (is_noncanonical_address(rcx) ||
2869 is_noncanonical_address(rdx))
2870 return emulate_gp(ctxt, 0);
2873 cs_sel |= SEGMENT_RPL_MASK;
2874 ss_sel |= SEGMENT_RPL_MASK;
2876 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2877 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2880 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2882 return X86EMUL_CONTINUE;
2885 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2888 if (ctxt->mode == X86EMUL_MODE_REAL)
2890 if (ctxt->mode == X86EMUL_MODE_VM86)
2892 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2893 return ctxt->ops->cpl(ctxt) > iopl;
2896 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2899 const struct x86_emulate_ops *ops = ctxt->ops;
2900 struct desc_struct tr_seg;
2903 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2904 unsigned mask = (1 << len) - 1;
2907 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2910 if (desc_limit_scaled(&tr_seg) < 103)
2912 base = get_desc_base(&tr_seg);
2913 #ifdef CONFIG_X86_64
2914 base |= ((u64)base3) << 32;
2916 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2917 if (r != X86EMUL_CONTINUE)
2919 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2921 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2922 if (r != X86EMUL_CONTINUE)
2924 if ((perm >> bit_idx) & mask)
2929 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2935 if (emulator_bad_iopl(ctxt))
2936 if (!emulator_io_port_access_allowed(ctxt, port, len))
2939 ctxt->perm_ok = true;
2944 static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2947 * Intel CPUs mask the counter and pointers in quite strange
2948 * manner when ECX is zero due to REP-string optimizations.
2950 #ifdef CONFIG_X86_64
2951 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2954 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
2957 case 0xa4: /* movsb */
2958 case 0xa5: /* movsd/w */
2959 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2961 case 0xaa: /* stosb */
2962 case 0xab: /* stosd/w */
2963 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2968 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2969 struct tss_segment_16 *tss)
2971 tss->ip = ctxt->_eip;
2972 tss->flag = ctxt->eflags;
2973 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2974 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2975 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2976 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2977 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2978 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2979 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2980 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2982 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2983 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2984 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2985 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2986 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2989 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2990 struct tss_segment_16 *tss)
2995 ctxt->_eip = tss->ip;
2996 ctxt->eflags = tss->flag | 2;
2997 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2998 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2999 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
3000 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
3001 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
3002 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
3003 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
3004 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
3007 * SDM says that segment selectors are loaded before segment
3010 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
3011 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3012 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3013 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3014 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3019 * Now load segment descriptors. If fault happens at this stage
3020 * it is handled in a context of new task
3022 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3023 X86_TRANSFER_TASK_SWITCH, NULL);
3024 if (ret != X86EMUL_CONTINUE)
3026 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3027 X86_TRANSFER_TASK_SWITCH, NULL);
3028 if (ret != X86EMUL_CONTINUE)
3030 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3031 X86_TRANSFER_TASK_SWITCH, NULL);
3032 if (ret != X86EMUL_CONTINUE)
3034 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3035 X86_TRANSFER_TASK_SWITCH, NULL);
3036 if (ret != X86EMUL_CONTINUE)
3038 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3039 X86_TRANSFER_TASK_SWITCH, NULL);
3040 if (ret != X86EMUL_CONTINUE)
3043 return X86EMUL_CONTINUE;
3046 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3047 u16 tss_selector, u16 old_tss_sel,
3048 ulong old_tss_base, struct desc_struct *new_desc)
3050 struct tss_segment_16 tss_seg;
3052 u32 new_tss_base = get_desc_base(new_desc);
3054 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
3055 if (ret != X86EMUL_CONTINUE)
3058 save_state_to_tss16(ctxt, &tss_seg);
3060 ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
3061 if (ret != X86EMUL_CONTINUE)
3064 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
3065 if (ret != X86EMUL_CONTINUE)
3068 if (old_tss_sel != 0xffff) {
3069 tss_seg.prev_task_link = old_tss_sel;
3071 ret = linear_write_system(ctxt, new_tss_base,
3072 &tss_seg.prev_task_link,
3073 sizeof tss_seg.prev_task_link);
3074 if (ret != X86EMUL_CONTINUE)
3078 return load_state_from_tss16(ctxt, &tss_seg);
3081 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3082 struct tss_segment_32 *tss)
3084 /* CR3 and ldt selector are not saved intentionally */
3085 tss->eip = ctxt->_eip;
3086 tss->eflags = ctxt->eflags;
3087 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3088 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3089 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3090 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3091 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3092 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3093 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3094 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3096 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3097 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3098 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3099 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3100 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3101 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3104 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3105 struct tss_segment_32 *tss)
3110 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3111 return emulate_gp(ctxt, 0);
3112 ctxt->_eip = tss->eip;
3113 ctxt->eflags = tss->eflags | 2;
3115 /* General purpose registers */
3116 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3117 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3118 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3119 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3120 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3121 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3122 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3123 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3126 * SDM says that segment selectors are loaded before segment
3127 * descriptors. This is important because CPL checks will
3130 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3131 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3132 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3133 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3134 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3135 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3136 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3139 * If we're switching between Protected Mode and VM86, we need to make
3140 * sure to update the mode before loading the segment descriptors so
3141 * that the selectors are interpreted correctly.
3143 if (ctxt->eflags & X86_EFLAGS_VM) {
3144 ctxt->mode = X86EMUL_MODE_VM86;
3147 ctxt->mode = X86EMUL_MODE_PROT32;
3152 * Now load segment descriptors. If fault happenes at this stage
3153 * it is handled in a context of new task
3155 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3156 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3157 if (ret != X86EMUL_CONTINUE)
3159 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3160 X86_TRANSFER_TASK_SWITCH, NULL);
3161 if (ret != X86EMUL_CONTINUE)
3163 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3164 X86_TRANSFER_TASK_SWITCH, NULL);
3165 if (ret != X86EMUL_CONTINUE)
3167 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3168 X86_TRANSFER_TASK_SWITCH, NULL);
3169 if (ret != X86EMUL_CONTINUE)
3171 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3172 X86_TRANSFER_TASK_SWITCH, NULL);
3173 if (ret != X86EMUL_CONTINUE)
3175 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3176 X86_TRANSFER_TASK_SWITCH, NULL);
3177 if (ret != X86EMUL_CONTINUE)
3179 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3180 X86_TRANSFER_TASK_SWITCH, NULL);
3185 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3186 u16 tss_selector, u16 old_tss_sel,
3187 ulong old_tss_base, struct desc_struct *new_desc)
3189 struct tss_segment_32 tss_seg;
3191 u32 new_tss_base = get_desc_base(new_desc);
3192 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3193 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3195 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
3196 if (ret != X86EMUL_CONTINUE)
3199 save_state_to_tss32(ctxt, &tss_seg);
3201 /* Only GP registers and segment selectors are saved */
3202 ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3203 ldt_sel_offset - eip_offset);
3204 if (ret != X86EMUL_CONTINUE)
3207 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
3208 if (ret != X86EMUL_CONTINUE)
3211 if (old_tss_sel != 0xffff) {
3212 tss_seg.prev_task_link = old_tss_sel;
3214 ret = linear_write_system(ctxt, new_tss_base,
3215 &tss_seg.prev_task_link,
3216 sizeof tss_seg.prev_task_link);
3217 if (ret != X86EMUL_CONTINUE)
3221 return load_state_from_tss32(ctxt, &tss_seg);
3224 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3225 u16 tss_selector, int idt_index, int reason,
3226 bool has_error_code, u32 error_code)
3228 const struct x86_emulate_ops *ops = ctxt->ops;
3229 struct desc_struct curr_tss_desc, next_tss_desc;
3231 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3232 ulong old_tss_base =
3233 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3235 ulong desc_addr, dr7;
3237 /* FIXME: old_tss_base == ~0 ? */
3239 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3240 if (ret != X86EMUL_CONTINUE)
3242 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3243 if (ret != X86EMUL_CONTINUE)
3246 /* FIXME: check that next_tss_desc is tss */
3249 * Check privileges. The three cases are task switch caused by...
3251 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3252 * 2. Exception/IRQ/iret: No check is performed
3253 * 3. jmp/call to TSS/task-gate: No check is performed since the
3254 * hardware checks it before exiting.
3256 if (reason == TASK_SWITCH_GATE) {
3257 if (idt_index != -1) {
3258 /* Software interrupts */
3259 struct desc_struct task_gate_desc;
3262 ret = read_interrupt_descriptor(ctxt, idt_index,
3264 if (ret != X86EMUL_CONTINUE)
3267 dpl = task_gate_desc.dpl;
3268 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3269 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3273 desc_limit = desc_limit_scaled(&next_tss_desc);
3274 if (!next_tss_desc.p ||
3275 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3276 desc_limit < 0x2b)) {
3277 return emulate_ts(ctxt, tss_selector & 0xfffc);
3280 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3281 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
3282 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3285 if (reason == TASK_SWITCH_IRET)
3286 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3288 /* set back link to prev task only if NT bit is set in eflags
3289 note that old_tss_sel is not used after this point */
3290 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3291 old_tss_sel = 0xffff;
3293 if (next_tss_desc.type & 8)
3294 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3295 old_tss_base, &next_tss_desc);
3297 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3298 old_tss_base, &next_tss_desc);
3299 if (ret != X86EMUL_CONTINUE)
3302 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3303 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3305 if (reason != TASK_SWITCH_IRET) {
3306 next_tss_desc.type |= (1 << 1); /* set busy flag */
3307 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3310 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3311 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3313 if (has_error_code) {
3314 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3315 ctxt->lock_prefix = 0;
3316 ctxt->src.val = (unsigned long) error_code;
3317 ret = em_push(ctxt);
3320 ops->get_dr(ctxt, 7, &dr7);
3321 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3326 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3327 u16 tss_selector, int idt_index, int reason,
3328 bool has_error_code, u32 error_code)
3332 invalidate_registers(ctxt);
3333 ctxt->_eip = ctxt->eip;
3334 ctxt->dst.type = OP_NONE;
3336 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3337 has_error_code, error_code);
3339 if (rc == X86EMUL_CONTINUE) {
3340 ctxt->eip = ctxt->_eip;
3341 writeback_registers(ctxt);
3344 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3347 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3350 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3352 register_address_increment(ctxt, reg, df * op->bytes);
3353 op->addr.mem.ea = register_address(ctxt, reg);
3356 static int em_das(struct x86_emulate_ctxt *ctxt)
3359 bool af, cf, old_cf;
3361 cf = ctxt->eflags & X86_EFLAGS_CF;
3367 af = ctxt->eflags & X86_EFLAGS_AF;
3368 if ((al & 0x0f) > 9 || af) {
3370 cf = old_cf | (al >= 250);
3375 if (old_al > 0x99 || old_cf) {
3381 /* Set PF, ZF, SF */
3382 ctxt->src.type = OP_IMM;
3384 ctxt->src.bytes = 1;
3385 fastop(ctxt, em_or);
3386 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3388 ctxt->eflags |= X86_EFLAGS_CF;
3390 ctxt->eflags |= X86_EFLAGS_AF;
3391 return X86EMUL_CONTINUE;
3394 static int em_aam(struct x86_emulate_ctxt *ctxt)
3398 if (ctxt->src.val == 0)
3399 return emulate_de(ctxt);
3401 al = ctxt->dst.val & 0xff;
3402 ah = al / ctxt->src.val;
3403 al %= ctxt->src.val;
3405 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3407 /* Set PF, ZF, SF */
3408 ctxt->src.type = OP_IMM;
3410 ctxt->src.bytes = 1;
3411 fastop(ctxt, em_or);
3413 return X86EMUL_CONTINUE;
3416 static int em_aad(struct x86_emulate_ctxt *ctxt)
3418 u8 al = ctxt->dst.val & 0xff;
3419 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3421 al = (al + (ah * ctxt->src.val)) & 0xff;
3423 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3425 /* Set PF, ZF, SF */
3426 ctxt->src.type = OP_IMM;
3428 ctxt->src.bytes = 1;
3429 fastop(ctxt, em_or);
3431 return X86EMUL_CONTINUE;
3434 static int em_call(struct x86_emulate_ctxt *ctxt)
3437 long rel = ctxt->src.val;
3439 ctxt->src.val = (unsigned long)ctxt->_eip;
3440 rc = jmp_rel(ctxt, rel);
3441 if (rc != X86EMUL_CONTINUE)
3443 return em_push(ctxt);
3446 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3451 struct desc_struct old_desc, new_desc;
3452 const struct x86_emulate_ops *ops = ctxt->ops;
3453 int cpl = ctxt->ops->cpl(ctxt);
3454 enum x86emul_mode prev_mode = ctxt->mode;
3456 old_eip = ctxt->_eip;
3457 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3459 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3460 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3461 X86_TRANSFER_CALL_JMP, &new_desc);
3462 if (rc != X86EMUL_CONTINUE)
3465 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3466 if (rc != X86EMUL_CONTINUE)
3469 ctxt->src.val = old_cs;
3471 if (rc != X86EMUL_CONTINUE)
3474 ctxt->src.val = old_eip;
3476 /* If we failed, we tainted the memory, but the very least we should
3478 if (rc != X86EMUL_CONTINUE) {
3479 pr_warn_once("faulting far call emulation tainted memory\n");
3484 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3485 ctxt->mode = prev_mode;
3490 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3495 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3496 if (rc != X86EMUL_CONTINUE)
3498 rc = assign_eip_near(ctxt, eip);
3499 if (rc != X86EMUL_CONTINUE)
3501 rsp_increment(ctxt, ctxt->src.val);
3502 return X86EMUL_CONTINUE;
3505 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3507 /* Write back the register source. */
3508 ctxt->src.val = ctxt->dst.val;
3509 write_register_operand(&ctxt->src);
3511 /* Write back the memory destination with implicit LOCK prefix. */
3512 ctxt->dst.val = ctxt->src.orig_val;
3513 ctxt->lock_prefix = 1;
3514 return X86EMUL_CONTINUE;
3517 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3519 ctxt->dst.val = ctxt->src2.val;
3520 return fastop(ctxt, em_imul);
3523 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3525 ctxt->dst.type = OP_REG;
3526 ctxt->dst.bytes = ctxt->src.bytes;
3527 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3528 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3530 return X86EMUL_CONTINUE;
3533 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3537 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3538 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3539 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3540 return X86EMUL_CONTINUE;
3543 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3547 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3548 return emulate_gp(ctxt, 0);
3549 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3550 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3551 return X86EMUL_CONTINUE;
3554 static int em_mov(struct x86_emulate_ctxt *ctxt)
3556 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3557 return X86EMUL_CONTINUE;
3560 #define FFL(x) bit(X86_FEATURE_##x)
3562 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3564 u32 ebx, ecx, edx, eax = 1;
3568 * Check MOVBE is set in the guest-visible CPUID leaf.
3570 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3571 if (!(ecx & FFL(MOVBE)))
3572 return emulate_ud(ctxt);
3574 switch (ctxt->op_bytes) {
3577 * From MOVBE definition: "...When the operand size is 16 bits,
3578 * the upper word of the destination register remains unchanged
3581 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3582 * rules so we have to do the operation almost per hand.
3584 tmp = (u16)ctxt->src.val;
3585 ctxt->dst.val &= ~0xffffUL;
3586 ctxt->dst.val |= (unsigned long)swab16(tmp);
3589 ctxt->dst.val = swab32((u32)ctxt->src.val);
3592 ctxt->dst.val = swab64(ctxt->src.val);
3597 return X86EMUL_CONTINUE;
3600 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3602 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3603 return emulate_gp(ctxt, 0);
3605 /* Disable writeback. */
3606 ctxt->dst.type = OP_NONE;
3607 return X86EMUL_CONTINUE;
3610 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3614 if (ctxt->mode == X86EMUL_MODE_PROT64)
3615 val = ctxt->src.val & ~0ULL;
3617 val = ctxt->src.val & ~0U;
3619 /* #UD condition is already handled. */
3620 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3621 return emulate_gp(ctxt, 0);
3623 /* Disable writeback. */
3624 ctxt->dst.type = OP_NONE;
3625 return X86EMUL_CONTINUE;
3628 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3632 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3633 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3634 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3635 return emulate_gp(ctxt, 0);
3637 return X86EMUL_CONTINUE;
3640 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3644 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3645 return emulate_gp(ctxt, 0);
3647 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3648 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3649 return X86EMUL_CONTINUE;
3652 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3654 if (ctxt->modrm_reg > VCPU_SREG_GS)
3655 return emulate_ud(ctxt);
3657 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3658 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3659 ctxt->dst.bytes = 2;
3660 return X86EMUL_CONTINUE;
3663 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3665 u16 sel = ctxt->src.val;
3667 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3668 return emulate_ud(ctxt);
3670 if (ctxt->modrm_reg == VCPU_SREG_SS)
3671 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3673 /* Disable writeback. */
3674 ctxt->dst.type = OP_NONE;
3675 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3678 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3680 u16 sel = ctxt->src.val;
3682 /* Disable writeback. */
3683 ctxt->dst.type = OP_NONE;
3684 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3687 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3689 u16 sel = ctxt->src.val;
3691 /* Disable writeback. */
3692 ctxt->dst.type = OP_NONE;
3693 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3696 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3701 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3702 if (rc == X86EMUL_CONTINUE)
3703 ctxt->ops->invlpg(ctxt, linear);
3704 /* Disable writeback. */
3705 ctxt->dst.type = OP_NONE;
3706 return X86EMUL_CONTINUE;
3709 static int em_clts(struct x86_emulate_ctxt *ctxt)
3713 cr0 = ctxt->ops->get_cr(ctxt, 0);
3715 ctxt->ops->set_cr(ctxt, 0, cr0);
3716 return X86EMUL_CONTINUE;
3719 static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3721 int rc = ctxt->ops->fix_hypercall(ctxt);
3723 if (rc != X86EMUL_CONTINUE)
3726 /* Let the processor re-execute the fixed hypercall */
3727 ctxt->_eip = ctxt->eip;
3728 /* Disable writeback. */
3729 ctxt->dst.type = OP_NONE;
3730 return X86EMUL_CONTINUE;
3733 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3734 void (*get)(struct x86_emulate_ctxt *ctxt,
3735 struct desc_ptr *ptr))
3737 struct desc_ptr desc_ptr;
3739 if (ctxt->mode == X86EMUL_MODE_PROT64)
3741 get(ctxt, &desc_ptr);
3742 if (ctxt->op_bytes == 2) {
3744 desc_ptr.address &= 0x00ffffff;
3746 /* Disable writeback. */
3747 ctxt->dst.type = OP_NONE;
3748 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3749 &desc_ptr, 2 + ctxt->op_bytes);
3752 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3754 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3757 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3759 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3762 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3764 struct desc_ptr desc_ptr;
3767 if (ctxt->mode == X86EMUL_MODE_PROT64)
3769 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3770 &desc_ptr.size, &desc_ptr.address,
3772 if (rc != X86EMUL_CONTINUE)
3774 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3775 is_noncanonical_address(desc_ptr.address))
3776 return emulate_gp(ctxt, 0);
3778 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3780 ctxt->ops->set_idt(ctxt, &desc_ptr);
3781 /* Disable writeback. */
3782 ctxt->dst.type = OP_NONE;
3783 return X86EMUL_CONTINUE;
3786 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3788 return em_lgdt_lidt(ctxt, true);
3791 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3793 return em_lgdt_lidt(ctxt, false);
3796 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3798 if (ctxt->dst.type == OP_MEM)
3799 ctxt->dst.bytes = 2;
3800 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3801 return X86EMUL_CONTINUE;
3804 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3806 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3807 | (ctxt->src.val & 0x0f));
3808 ctxt->dst.type = OP_NONE;
3809 return X86EMUL_CONTINUE;
3812 static int em_loop(struct x86_emulate_ctxt *ctxt)
3814 int rc = X86EMUL_CONTINUE;
3816 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3817 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3818 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3819 rc = jmp_rel(ctxt, ctxt->src.val);
3824 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3826 int rc = X86EMUL_CONTINUE;
3828 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3829 rc = jmp_rel(ctxt, ctxt->src.val);
3834 static int em_in(struct x86_emulate_ctxt *ctxt)
3836 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3838 return X86EMUL_IO_NEEDED;
3840 return X86EMUL_CONTINUE;
3843 static int em_out(struct x86_emulate_ctxt *ctxt)
3845 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3847 /* Disable writeback. */
3848 ctxt->dst.type = OP_NONE;
3849 return X86EMUL_CONTINUE;
3852 static int em_cli(struct x86_emulate_ctxt *ctxt)
3854 if (emulator_bad_iopl(ctxt))
3855 return emulate_gp(ctxt, 0);
3857 ctxt->eflags &= ~X86_EFLAGS_IF;
3858 return X86EMUL_CONTINUE;
3861 static int em_sti(struct x86_emulate_ctxt *ctxt)
3863 if (emulator_bad_iopl(ctxt))
3864 return emulate_gp(ctxt, 0);
3866 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3867 ctxt->eflags |= X86_EFLAGS_IF;
3868 return X86EMUL_CONTINUE;
3871 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3873 u32 eax, ebx, ecx, edx;
3875 eax = reg_read(ctxt, VCPU_REGS_RAX);
3876 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3877 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3878 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3879 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3880 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3881 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3882 return X86EMUL_CONTINUE;
3885 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3889 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3891 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3893 ctxt->eflags &= ~0xffUL;
3894 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3895 return X86EMUL_CONTINUE;
3898 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3900 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3901 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3902 return X86EMUL_CONTINUE;
3905 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3907 switch (ctxt->op_bytes) {
3908 #ifdef CONFIG_X86_64
3910 asm("bswap %0" : "+r"(ctxt->dst.val));
3914 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3917 return X86EMUL_CONTINUE;
3920 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3922 /* emulating clflush regardless of cpuid */
3923 return X86EMUL_CONTINUE;
3926 static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3928 ctxt->dst.val = (s32) ctxt->src.val;
3929 return X86EMUL_CONTINUE;
3932 static int check_fxsr(struct x86_emulate_ctxt *ctxt)
3934 u32 eax = 1, ebx, ecx = 0, edx;
3936 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3937 if (!(edx & FFL(FXSR)))
3938 return emulate_ud(ctxt);
3940 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
3941 return emulate_nm(ctxt);
3944 * Don't emulate a case that should never be hit, instead of working
3945 * around a lack of fxsave64/fxrstor64 on old compilers.
3947 if (ctxt->mode >= X86EMUL_MODE_PROT64)
3948 return X86EMUL_UNHANDLEABLE;
3950 return X86EMUL_CONTINUE;
3954 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
3957 * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
3958 * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
3960 * 3) 64-bit mode with REX.W prefix
3961 * - like (2), but XMM 8-15 are being saved and restored
3962 * 4) 64-bit mode without REX.W prefix
3963 * - like (3), but FIP and FDP are 64 bit
3965 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
3966 * desired result. (4) is not emulated.
3968 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
3969 * and FPU DS) should match.
3971 static int em_fxsave(struct x86_emulate_ctxt *ctxt)
3973 struct fxregs_state fx_state;
3977 rc = check_fxsr(ctxt);
3978 if (rc != X86EMUL_CONTINUE)
3981 ctxt->ops->get_fpu(ctxt);
3983 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
3985 ctxt->ops->put_fpu(ctxt);
3987 if (rc != X86EMUL_CONTINUE)
3990 if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR)
3991 size = offsetof(struct fxregs_state, xmm_space[8 * 16/4]);
3993 size = offsetof(struct fxregs_state, xmm_space[0]);
3995 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
3998 static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
3999 struct fxregs_state *new)
4001 int rc = X86EMUL_CONTINUE;
4002 struct fxregs_state old;
4004 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(old));
4005 if (rc != X86EMUL_CONTINUE)
4009 * 64 bit host will restore XMM 8-15, which is not correct on non-64
4010 * bit guests. Load the current values in order to preserve 64 bit
4011 * XMMs after fxrstor.
4013 #ifdef CONFIG_X86_64
4014 /* XXX: accessing XMM 8-15 very awkwardly */
4015 memcpy(&new->xmm_space[8 * 16/4], &old.xmm_space[8 * 16/4], 8 * 16);
4019 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but
4020 * does save and restore MXCSR.
4022 if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))
4023 memcpy(new->xmm_space, old.xmm_space, 8 * 16);
4028 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4030 struct fxregs_state fx_state;
4033 rc = check_fxsr(ctxt);
4034 if (rc != X86EMUL_CONTINUE)
4037 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
4038 if (rc != X86EMUL_CONTINUE)
4041 if (fx_state.mxcsr >> 16)
4042 return emulate_gp(ctxt, 0);
4044 ctxt->ops->get_fpu(ctxt);
4046 if (ctxt->mode < X86EMUL_MODE_PROT64)
4047 rc = fxrstor_fixup(ctxt, &fx_state);
4049 if (rc == X86EMUL_CONTINUE)
4050 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4052 ctxt->ops->put_fpu(ctxt);
4057 static bool valid_cr(int nr)
4069 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
4071 if (!valid_cr(ctxt->modrm_reg))
4072 return emulate_ud(ctxt);
4074 return X86EMUL_CONTINUE;
4077 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
4079 u64 new_val = ctxt->src.val64;
4080 int cr = ctxt->modrm_reg;
4083 static u64 cr_reserved_bits[] = {
4084 0xffffffff00000000ULL,
4085 0, 0, 0, /* CR3 checked later */
4092 return emulate_ud(ctxt);
4094 if (new_val & cr_reserved_bits[cr])
4095 return emulate_gp(ctxt, 0);
4100 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
4101 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
4102 return emulate_gp(ctxt, 0);
4104 cr4 = ctxt->ops->get_cr(ctxt, 4);
4105 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4107 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
4108 !(cr4 & X86_CR4_PAE))
4109 return emulate_gp(ctxt, 0);
4116 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4117 if (efer & EFER_LMA)
4118 rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
4121 return emulate_gp(ctxt, 0);
4126 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4128 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
4129 return emulate_gp(ctxt, 0);
4135 return X86EMUL_CONTINUE;
4138 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4142 ctxt->ops->get_dr(ctxt, 7, &dr7);
4144 /* Check if DR7.Global_Enable is set */
4145 return dr7 & (1 << 13);
4148 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4150 int dr = ctxt->modrm_reg;
4154 return emulate_ud(ctxt);
4156 cr4 = ctxt->ops->get_cr(ctxt, 4);
4157 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4158 return emulate_ud(ctxt);
4160 if (check_dr7_gd(ctxt)) {
4163 ctxt->ops->get_dr(ctxt, 6, &dr6);
4165 dr6 |= DR6_BD | DR6_RTM;
4166 ctxt->ops->set_dr(ctxt, 6, dr6);
4167 return emulate_db(ctxt);
4170 return X86EMUL_CONTINUE;
4173 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4175 u64 new_val = ctxt->src.val64;
4176 int dr = ctxt->modrm_reg;
4178 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4179 return emulate_gp(ctxt, 0);
4181 return check_dr_read(ctxt);
4184 static int check_svme(struct x86_emulate_ctxt *ctxt)
4188 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4190 if (!(efer & EFER_SVME))
4191 return emulate_ud(ctxt);
4193 return X86EMUL_CONTINUE;
4196 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4198 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4200 /* Valid physical address? */
4201 if (rax & 0xffff000000000000ULL)
4202 return emulate_gp(ctxt, 0);
4204 return check_svme(ctxt);
4207 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4209 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4211 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4212 return emulate_ud(ctxt);
4214 return X86EMUL_CONTINUE;
4217 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4219 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4220 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4222 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4223 ctxt->ops->check_pmc(ctxt, rcx))
4224 return emulate_gp(ctxt, 0);
4226 return X86EMUL_CONTINUE;
4229 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4231 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4232 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4233 return emulate_gp(ctxt, 0);
4235 return X86EMUL_CONTINUE;
4238 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4240 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4241 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4242 return emulate_gp(ctxt, 0);
4244 return X86EMUL_CONTINUE;
4247 #define D(_y) { .flags = (_y) }
4248 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4249 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4250 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4251 #define N D(NotImpl)
4252 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4253 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4254 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4255 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4256 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4257 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4258 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4259 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4260 #define II(_f, _e, _i) \
4261 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4262 #define IIP(_f, _e, _i, _p) \
4263 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4264 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4265 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4267 #define D2bv(_f) D((_f) | ByteOp), D(_f)
4268 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4269 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4270 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4271 #define I2bvIP(_f, _e, _i, _p) \
4272 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4274 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4275 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4276 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4278 static const struct opcode group7_rm0[] = {
4280 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
4284 static const struct opcode group7_rm1[] = {
4285 DI(SrcNone | Priv, monitor),
4286 DI(SrcNone | Priv, mwait),
4290 static const struct opcode group7_rm3[] = {
4291 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4292 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4293 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4294 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4295 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4296 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4297 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4298 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
4301 static const struct opcode group7_rm7[] = {
4303 DIP(SrcNone, rdtscp, check_rdtsc),
4307 static const struct opcode group1[] = {
4309 F(Lock | PageTable, em_or),
4312 F(Lock | PageTable, em_and),
4318 static const struct opcode group1A[] = {
4319 I(DstMem | SrcNone | Mov | Stack | IncSP, em_pop), N, N, N, N, N, N, N,
4322 static const struct opcode group2[] = {
4323 F(DstMem | ModRM, em_rol),
4324 F(DstMem | ModRM, em_ror),
4325 F(DstMem | ModRM, em_rcl),
4326 F(DstMem | ModRM, em_rcr),
4327 F(DstMem | ModRM, em_shl),
4328 F(DstMem | ModRM, em_shr),
4329 F(DstMem | ModRM, em_shl),
4330 F(DstMem | ModRM, em_sar),
4333 static const struct opcode group3[] = {
4334 F(DstMem | SrcImm | NoWrite, em_test),
4335 F(DstMem | SrcImm | NoWrite, em_test),
4336 F(DstMem | SrcNone | Lock, em_not),
4337 F(DstMem | SrcNone | Lock, em_neg),
4338 F(DstXacc | Src2Mem, em_mul_ex),
4339 F(DstXacc | Src2Mem, em_imul_ex),
4340 F(DstXacc | Src2Mem, em_div_ex),
4341 F(DstXacc | Src2Mem, em_idiv_ex),
4344 static const struct opcode group4[] = {
4345 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4346 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4350 static const struct opcode group5[] = {
4351 F(DstMem | SrcNone | Lock, em_inc),
4352 F(DstMem | SrcNone | Lock, em_dec),
4353 I(SrcMem | NearBranch, em_call_near_abs),
4354 I(SrcMemFAddr | ImplicitOps, em_call_far),
4355 I(SrcMem | NearBranch, em_jmp_abs),
4356 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
4357 I(SrcMem | Stack, em_push), D(Undefined),
4360 static const struct opcode group6[] = {
4361 DI(Prot | DstMem, sldt),
4362 DI(Prot | DstMem, str),
4363 II(Prot | Priv | SrcMem16, em_lldt, lldt),
4364 II(Prot | Priv | SrcMem16, em_ltr, ltr),
4368 static const struct group_dual group7 = { {
4369 II(Mov | DstMem, em_sgdt, sgdt),
4370 II(Mov | DstMem, em_sidt, sidt),
4371 II(SrcMem | Priv, em_lgdt, lgdt),
4372 II(SrcMem | Priv, em_lidt, lidt),
4373 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4374 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4375 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4379 N, EXT(0, group7_rm3),
4380 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4381 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4385 static const struct opcode group8[] = {
4387 F(DstMem | SrcImmByte | NoWrite, em_bt),
4388 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4389 F(DstMem | SrcImmByte | Lock, em_btr),
4390 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4393 static const struct group_dual group9 = { {
4394 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4396 N, N, N, N, N, N, N, N,
4399 static const struct opcode group11[] = {
4400 I(DstMem | SrcImm | Mov | PageTable, em_mov),
4404 static const struct gprefix pfx_0f_ae_7 = {
4405 I(SrcMem | ByteOp, em_clflush), N, N, N,
4408 static const struct group_dual group15 = { {
4409 I(ModRM | Aligned16, em_fxsave),
4410 I(ModRM | Aligned16, em_fxrstor),
4411 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4413 N, N, N, N, N, N, N, N,
4416 static const struct gprefix pfx_0f_6f_0f_7f = {
4417 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4420 static const struct instr_dual instr_dual_0f_2b = {
4424 static const struct gprefix pfx_0f_2b = {
4425 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4428 static const struct gprefix pfx_0f_28_0f_29 = {
4429 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4432 static const struct gprefix pfx_0f_e7 = {
4433 N, I(Sse, em_mov), N, N,
4436 static const struct escape escape_d9 = { {
4437 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4440 N, N, N, N, N, N, N, N,
4442 N, N, N, N, N, N, N, N,
4444 N, N, N, N, N, N, N, N,
4446 N, N, N, N, N, N, N, N,
4448 N, N, N, N, N, N, N, N,
4450 N, N, N, N, N, N, N, N,
4452 N, N, N, N, N, N, N, N,
4454 N, N, N, N, N, N, N, N,
4457 static const struct escape escape_db = { {
4458 N, N, N, N, N, N, N, N,
4461 N, N, N, N, N, N, N, N,
4463 N, N, N, N, N, N, N, N,
4465 N, N, N, N, N, N, N, N,
4467 N, N, N, N, N, N, N, N,
4469 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4471 N, N, N, N, N, N, N, N,
4473 N, N, N, N, N, N, N, N,
4475 N, N, N, N, N, N, N, N,
4478 static const struct escape escape_dd = { {
4479 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4482 N, N, N, N, N, N, N, N,
4484 N, N, N, N, N, N, N, N,
4486 N, N, N, N, N, N, N, N,
4488 N, N, N, N, N, N, N, N,
4490 N, N, N, N, N, N, N, N,
4492 N, N, N, N, N, N, N, N,
4494 N, N, N, N, N, N, N, N,
4496 N, N, N, N, N, N, N, N,
4499 static const struct instr_dual instr_dual_0f_c3 = {
4500 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4503 static const struct mode_dual mode_dual_63 = {
4504 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4507 static const struct opcode opcode_table[256] = {
4509 F6ALU(Lock, em_add),
4510 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4511 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4513 F6ALU(Lock | PageTable, em_or),
4514 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4517 F6ALU(Lock, em_adc),
4518 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4519 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4521 F6ALU(Lock, em_sbb),
4522 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4523 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4525 F6ALU(Lock | PageTable, em_and), N, N,
4527 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4529 F6ALU(Lock, em_xor), N, N,
4531 F6ALU(NoWrite, em_cmp), N, N,
4533 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4535 X8(I(SrcReg | Stack, em_push)),
4537 X8(I(DstReg | Stack, em_pop)),
4539 I(ImplicitOps | Stack | No64, em_pusha),
4540 I(ImplicitOps | Stack | No64, em_popa),
4541 N, MD(ModRM, &mode_dual_63),
4544 I(SrcImm | Mov | Stack, em_push),
4545 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4546 I(SrcImmByte | Mov | Stack, em_push),
4547 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4548 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4549 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4551 X16(D(SrcImmByte | NearBranch)),
4553 G(ByteOp | DstMem | SrcImm, group1),
4554 G(DstMem | SrcImm, group1),
4555 G(ByteOp | DstMem | SrcImm | No64, group1),
4556 G(DstMem | SrcImmByte, group1),
4557 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4558 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4560 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4561 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4562 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4563 D(ModRM | SrcMem | NoAccess | DstReg),
4564 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4567 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4569 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4570 I(SrcImmFAddr | No64, em_call_far), N,
4571 II(ImplicitOps | Stack, em_pushf, pushf),
4572 II(ImplicitOps | Stack, em_popf, popf),
4573 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4575 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4576 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4577 I2bv(SrcSI | DstDI | Mov | String, em_mov),
4578 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
4580 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4581 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4582 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4583 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4585 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4587 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4589 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4590 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4591 I(ImplicitOps | NearBranch, em_ret),
4592 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4593 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4594 G(ByteOp, group11), G(0, group11),
4596 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4597 I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4598 I(ImplicitOps, em_ret_far),
4599 D(ImplicitOps), DI(SrcImmByte, intn),
4600 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4602 G(Src2One | ByteOp, group2), G(Src2One, group2),
4603 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4604 I(DstAcc | SrcImmUByte | No64, em_aam),
4605 I(DstAcc | SrcImmUByte | No64, em_aad),
4606 F(DstAcc | ByteOp | No64, em_salc),
4607 I(DstAcc | SrcXLat | ByteOp, em_mov),
4609 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4611 X3(I(SrcImmByte | NearBranch, em_loop)),
4612 I(SrcImmByte | NearBranch, em_jcxz),
4613 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4614 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4616 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4617 I(SrcImmFAddr | No64, em_jmp_far),
4618 D(SrcImmByte | ImplicitOps | NearBranch),
4619 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4620 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4622 N, DI(ImplicitOps, icebp), N, N,
4623 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4624 G(ByteOp, group3), G(0, group3),
4626 D(ImplicitOps), D(ImplicitOps),
4627 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4628 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4631 static const struct opcode twobyte_table[256] = {
4633 G(0, group6), GD(0, &group7), N, N,
4634 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4635 II(ImplicitOps | Priv, em_clts, clts), N,
4636 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4637 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4639 N, N, N, N, N, N, N, N,
4640 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4641 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4643 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4644 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4645 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4647 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4650 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4651 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4652 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4655 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4656 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4657 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4658 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4659 I(ImplicitOps | EmulateOnUD, em_sysenter),
4660 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4662 N, N, N, N, N, N, N, N,
4664 X16(D(DstReg | SrcMem | ModRM)),
4666 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4671 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4676 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4678 X16(D(SrcImm | NearBranch)),
4680 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4682 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4683 II(ImplicitOps, em_cpuid, cpuid),
4684 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4685 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4686 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4688 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4689 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4690 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4691 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4692 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4693 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4695 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4696 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4697 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4698 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4699 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4700 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4704 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4705 I(DstReg | SrcMem | ModRM, em_bsf_c),
4706 I(DstReg | SrcMem | ModRM, em_bsr_c),
4707 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4709 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4710 N, ID(0, &instr_dual_0f_c3),
4711 N, N, N, GD(0, &group9),
4713 X8(I(DstReg, em_bswap)),
4715 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4717 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4718 N, N, N, N, N, N, N, N,
4720 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4723 static const struct instr_dual instr_dual_0f_38_f0 = {
4724 I(DstReg | SrcMem | Mov, em_movbe), N
4727 static const struct instr_dual instr_dual_0f_38_f1 = {
4728 I(DstMem | SrcReg | Mov, em_movbe), N
4731 static const struct gprefix three_byte_0f_38_f0 = {
4732 ID(0, &instr_dual_0f_38_f0), N, N, N
4735 static const struct gprefix three_byte_0f_38_f1 = {
4736 ID(0, &instr_dual_0f_38_f1), N, N, N
4740 * Insns below are selected by the prefix which indexed by the third opcode
4743 static const struct opcode opcode_map_0f_38[256] = {
4745 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4747 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4749 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4750 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4771 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4775 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4781 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4782 unsigned size, bool sign_extension)
4784 int rc = X86EMUL_CONTINUE;
4788 op->addr.mem.ea = ctxt->_eip;
4789 /* NB. Immediates are sign-extended as necessary. */
4790 switch (op->bytes) {
4792 op->val = insn_fetch(s8, ctxt);
4795 op->val = insn_fetch(s16, ctxt);
4798 op->val = insn_fetch(s32, ctxt);
4801 op->val = insn_fetch(s64, ctxt);
4804 if (!sign_extension) {
4805 switch (op->bytes) {
4813 op->val &= 0xffffffff;
4821 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4824 int rc = X86EMUL_CONTINUE;
4828 decode_register_operand(ctxt, op);
4831 rc = decode_imm(ctxt, op, 1, false);
4834 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4838 if (ctxt->d & BitOp)
4839 fetch_bit_operand(ctxt);
4840 op->orig_val = op->val;
4843 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4847 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4848 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4849 fetch_register_operand(op);
4850 op->orig_val = op->val;
4854 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4855 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4856 fetch_register_operand(op);
4857 op->orig_val = op->val;
4860 if (ctxt->d & ByteOp) {
4865 op->bytes = ctxt->op_bytes;
4866 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4867 fetch_register_operand(op);
4868 op->orig_val = op->val;
4872 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4874 register_address(ctxt, VCPU_REGS_RDI);
4875 op->addr.mem.seg = VCPU_SREG_ES;
4882 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4883 fetch_register_operand(op);
4888 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4891 rc = decode_imm(ctxt, op, 1, true);
4899 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4902 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4905 ctxt->memop.bytes = 1;
4906 if (ctxt->memop.type == OP_REG) {
4907 ctxt->memop.addr.reg = decode_register(ctxt,
4908 ctxt->modrm_rm, true);
4909 fetch_register_operand(&ctxt->memop);
4913 ctxt->memop.bytes = 2;
4916 ctxt->memop.bytes = 4;
4919 rc = decode_imm(ctxt, op, 2, false);
4922 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4926 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4928 register_address(ctxt, VCPU_REGS_RSI);
4929 op->addr.mem.seg = ctxt->seg_override;
4935 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4938 reg_read(ctxt, VCPU_REGS_RBX) +
4939 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4940 op->addr.mem.seg = ctxt->seg_override;
4945 op->addr.mem.ea = ctxt->_eip;
4946 op->bytes = ctxt->op_bytes + 2;
4947 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4950 ctxt->memop.bytes = ctxt->op_bytes + 2;
4954 op->val = VCPU_SREG_ES;
4958 op->val = VCPU_SREG_CS;
4962 op->val = VCPU_SREG_SS;
4966 op->val = VCPU_SREG_DS;
4970 op->val = VCPU_SREG_FS;
4974 op->val = VCPU_SREG_GS;
4977 /* Special instructions do their own operand decoding. */
4979 op->type = OP_NONE; /* Disable writeback. */
4987 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4989 int rc = X86EMUL_CONTINUE;
4990 int mode = ctxt->mode;
4991 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4992 bool op_prefix = false;
4993 bool has_seg_override = false;
4994 struct opcode opcode;
4996 struct desc_struct desc;
4998 ctxt->memop.type = OP_NONE;
4999 ctxt->memopp = NULL;
5000 ctxt->_eip = ctxt->eip;
5001 ctxt->fetch.ptr = ctxt->fetch.data;
5002 ctxt->fetch.end = ctxt->fetch.data + insn_len;
5003 ctxt->opcode_len = 1;
5005 memcpy(ctxt->fetch.data, insn, insn_len);
5007 rc = __do_insn_fetch_bytes(ctxt, 1);
5008 if (rc != X86EMUL_CONTINUE)
5013 case X86EMUL_MODE_REAL:
5014 case X86EMUL_MODE_VM86:
5015 def_op_bytes = def_ad_bytes = 2;
5016 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
5018 def_op_bytes = def_ad_bytes = 4;
5020 case X86EMUL_MODE_PROT16:
5021 def_op_bytes = def_ad_bytes = 2;
5023 case X86EMUL_MODE_PROT32:
5024 def_op_bytes = def_ad_bytes = 4;
5026 #ifdef CONFIG_X86_64
5027 case X86EMUL_MODE_PROT64:
5033 return EMULATION_FAILED;
5036 ctxt->op_bytes = def_op_bytes;
5037 ctxt->ad_bytes = def_ad_bytes;
5039 /* Legacy prefixes. */
5041 switch (ctxt->b = insn_fetch(u8, ctxt)) {
5042 case 0x66: /* operand-size override */
5044 /* switch between 2/4 bytes */
5045 ctxt->op_bytes = def_op_bytes ^ 6;
5047 case 0x67: /* address-size override */
5048 if (mode == X86EMUL_MODE_PROT64)
5049 /* switch between 4/8 bytes */
5050 ctxt->ad_bytes = def_ad_bytes ^ 12;
5052 /* switch between 2/4 bytes */
5053 ctxt->ad_bytes = def_ad_bytes ^ 6;
5055 case 0x26: /* ES override */
5056 case 0x2e: /* CS override */
5057 case 0x36: /* SS override */
5058 case 0x3e: /* DS override */
5059 has_seg_override = true;
5060 ctxt->seg_override = (ctxt->b >> 3) & 3;
5062 case 0x64: /* FS override */
5063 case 0x65: /* GS override */
5064 has_seg_override = true;
5065 ctxt->seg_override = ctxt->b & 7;
5067 case 0x40 ... 0x4f: /* REX */
5068 if (mode != X86EMUL_MODE_PROT64)
5070 ctxt->rex_prefix = ctxt->b;
5072 case 0xf0: /* LOCK */
5073 ctxt->lock_prefix = 1;
5075 case 0xf2: /* REPNE/REPNZ */
5076 case 0xf3: /* REP/REPE/REPZ */
5077 ctxt->rep_prefix = ctxt->b;
5083 /* Any legacy prefix after a REX prefix nullifies its effect. */
5085 ctxt->rex_prefix = 0;
5091 if (ctxt->rex_prefix & 8)
5092 ctxt->op_bytes = 8; /* REX.W */
5094 /* Opcode byte(s). */
5095 opcode = opcode_table[ctxt->b];
5096 /* Two-byte opcode? */
5097 if (ctxt->b == 0x0f) {
5098 ctxt->opcode_len = 2;
5099 ctxt->b = insn_fetch(u8, ctxt);
5100 opcode = twobyte_table[ctxt->b];
5102 /* 0F_38 opcode map */
5103 if (ctxt->b == 0x38) {
5104 ctxt->opcode_len = 3;
5105 ctxt->b = insn_fetch(u8, ctxt);
5106 opcode = opcode_map_0f_38[ctxt->b];
5109 ctxt->d = opcode.flags;
5111 if (ctxt->d & ModRM)
5112 ctxt->modrm = insn_fetch(u8, ctxt);
5114 /* vex-prefix instructions are not implemented */
5115 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5116 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5120 while (ctxt->d & GroupMask) {
5121 switch (ctxt->d & GroupMask) {
5123 goffset = (ctxt->modrm >> 3) & 7;
5124 opcode = opcode.u.group[goffset];
5127 goffset = (ctxt->modrm >> 3) & 7;
5128 if ((ctxt->modrm >> 6) == 3)
5129 opcode = opcode.u.gdual->mod3[goffset];
5131 opcode = opcode.u.gdual->mod012[goffset];
5134 goffset = ctxt->modrm & 7;
5135 opcode = opcode.u.group[goffset];
5138 if (ctxt->rep_prefix && op_prefix)
5139 return EMULATION_FAILED;
5140 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5141 switch (simd_prefix) {
5142 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5143 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5144 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5145 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5149 if (ctxt->modrm > 0xbf)
5150 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
5152 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5155 if ((ctxt->modrm >> 6) == 3)
5156 opcode = opcode.u.idual->mod3;
5158 opcode = opcode.u.idual->mod012;
5161 if (ctxt->mode == X86EMUL_MODE_PROT64)
5162 opcode = opcode.u.mdual->mode64;
5164 opcode = opcode.u.mdual->mode32;
5167 return EMULATION_FAILED;
5170 ctxt->d &= ~(u64)GroupMask;
5171 ctxt->d |= opcode.flags;
5176 return EMULATION_FAILED;
5178 ctxt->execute = opcode.u.execute;
5180 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
5181 return EMULATION_FAILED;
5183 if (unlikely(ctxt->d &
5184 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5187 * These are copied unconditionally here, and checked unconditionally
5188 * in x86_emulate_insn.
5190 ctxt->check_perm = opcode.check_perm;
5191 ctxt->intercept = opcode.intercept;
5193 if (ctxt->d & NotImpl)
5194 return EMULATION_FAILED;
5196 if (mode == X86EMUL_MODE_PROT64) {
5197 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5199 else if (ctxt->d & NearBranch)
5203 if (ctxt->d & Op3264) {
5204 if (mode == X86EMUL_MODE_PROT64)
5210 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5214 ctxt->op_bytes = 16;
5215 else if (ctxt->d & Mmx)
5219 /* ModRM and SIB bytes. */
5220 if (ctxt->d & ModRM) {
5221 rc = decode_modrm(ctxt, &ctxt->memop);
5222 if (!has_seg_override) {
5223 has_seg_override = true;
5224 ctxt->seg_override = ctxt->modrm_seg;
5226 } else if (ctxt->d & MemAbs)
5227 rc = decode_abs(ctxt, &ctxt->memop);
5228 if (rc != X86EMUL_CONTINUE)
5231 if (!has_seg_override)
5232 ctxt->seg_override = VCPU_SREG_DS;
5234 ctxt->memop.addr.mem.seg = ctxt->seg_override;
5237 * Decode and fetch the source operand: register, memory
5240 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5241 if (rc != X86EMUL_CONTINUE)
5245 * Decode and fetch the second source operand: register, memory
5248 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5249 if (rc != X86EMUL_CONTINUE)
5252 /* Decode and fetch the destination operand: register or memory. */
5253 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5255 if (ctxt->rip_relative && likely(ctxt->memopp))
5256 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5257 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5260 if (rc == X86EMUL_PROPAGATE_FAULT)
5261 ctxt->have_exception = true;
5262 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5265 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5267 return ctxt->d & PageTable;
5270 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5272 /* The second termination condition only applies for REPE
5273 * and REPNE. Test if the repeat string operation prefix is
5274 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5275 * corresponding termination condition according to:
5276 * - if REPE/REPZ and ZF = 0 then done
5277 * - if REPNE/REPNZ and ZF = 1 then done
5279 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5280 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5281 && (((ctxt->rep_prefix == REPE_PREFIX) &&
5282 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5283 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5284 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5290 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5294 ctxt->ops->get_fpu(ctxt);
5295 rc = asm_safe("fwait");
5296 ctxt->ops->put_fpu(ctxt);
5298 if (unlikely(rc != X86EMUL_CONTINUE))
5299 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5301 return X86EMUL_CONTINUE;
5304 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
5307 if (op->type == OP_MM)
5308 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
5311 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
5313 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5315 if (!(ctxt->d & ByteOp))
5316 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5318 asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5319 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5320 [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5321 : "c"(ctxt->src2.val));
5323 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5324 if (!fop) /* exception is returned in fop variable */
5325 return emulate_de(ctxt);
5326 return X86EMUL_CONTINUE;
5329 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5331 memset(&ctxt->rip_relative, 0,
5332 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5334 ctxt->io_read.pos = 0;
5335 ctxt->io_read.end = 0;
5336 ctxt->mem_read.end = 0;
5339 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5341 const struct x86_emulate_ops *ops = ctxt->ops;
5342 int rc = X86EMUL_CONTINUE;
5343 int saved_dst_type = ctxt->dst.type;
5344 unsigned emul_flags;
5346 ctxt->mem_read.pos = 0;
5348 /* LOCK prefix is allowed only with some instructions */
5349 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5350 rc = emulate_ud(ctxt);
5354 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5355 rc = emulate_ud(ctxt);
5359 emul_flags = ctxt->ops->get_hflags(ctxt);
5360 if (unlikely(ctxt->d &
5361 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5362 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5363 (ctxt->d & Undefined)) {
5364 rc = emulate_ud(ctxt);
5368 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5369 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5370 rc = emulate_ud(ctxt);
5374 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5375 rc = emulate_nm(ctxt);
5379 if (ctxt->d & Mmx) {
5380 rc = flush_pending_x87_faults(ctxt);
5381 if (rc != X86EMUL_CONTINUE)
5384 * Now that we know the fpu is exception safe, we can fetch
5387 fetch_possible_mmx_operand(ctxt, &ctxt->src);
5388 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
5389 if (!(ctxt->d & Mov))
5390 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
5393 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5394 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5395 X86_ICPT_PRE_EXCEPT);
5396 if (rc != X86EMUL_CONTINUE)
5400 /* Instruction can only be executed in protected mode */
5401 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5402 rc = emulate_ud(ctxt);
5406 /* Privileged instruction can be executed only in CPL=0 */
5407 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5408 if (ctxt->d & PrivUD)
5409 rc = emulate_ud(ctxt);
5411 rc = emulate_gp(ctxt, 0);
5415 /* Do instruction specific permission checks */
5416 if (ctxt->d & CheckPerm) {
5417 rc = ctxt->check_perm(ctxt);
5418 if (rc != X86EMUL_CONTINUE)
5422 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5423 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5424 X86_ICPT_POST_EXCEPT);
5425 if (rc != X86EMUL_CONTINUE)
5429 if (ctxt->rep_prefix && (ctxt->d & String)) {
5430 /* All REP prefixes have the same first termination condition */
5431 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5432 string_registers_quirk(ctxt);
5433 ctxt->eip = ctxt->_eip;
5434 ctxt->eflags &= ~X86_EFLAGS_RF;
5440 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5441 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5442 ctxt->src.valptr, ctxt->src.bytes);
5443 if (rc != X86EMUL_CONTINUE)
5445 ctxt->src.orig_val64 = ctxt->src.val64;
5448 if (ctxt->src2.type == OP_MEM) {
5449 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5450 &ctxt->src2.val, ctxt->src2.bytes);
5451 if (rc != X86EMUL_CONTINUE)
5455 if ((ctxt->d & DstMask) == ImplicitOps)
5459 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5460 /* optimisation - avoid slow emulated read if Mov */
5461 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5462 &ctxt->dst.val, ctxt->dst.bytes);
5463 if (rc != X86EMUL_CONTINUE) {
5464 if (!(ctxt->d & NoWrite) &&
5465 rc == X86EMUL_PROPAGATE_FAULT &&
5466 ctxt->exception.vector == PF_VECTOR)
5467 ctxt->exception.error_code |= PFERR_WRITE_MASK;
5471 /* Copy full 64-bit value for CMPXCHG8B. */
5472 ctxt->dst.orig_val64 = ctxt->dst.val64;
5476 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5477 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5478 X86_ICPT_POST_MEMACCESS);
5479 if (rc != X86EMUL_CONTINUE)
5483 if (ctxt->rep_prefix && (ctxt->d & String))
5484 ctxt->eflags |= X86_EFLAGS_RF;
5486 ctxt->eflags &= ~X86_EFLAGS_RF;
5488 if (ctxt->execute) {
5489 if (ctxt->d & Fastop) {
5490 void (*fop)(struct fastop *) = (void *)ctxt->execute;
5491 rc = fastop(ctxt, fop);
5492 if (rc != X86EMUL_CONTINUE)
5496 rc = ctxt->execute(ctxt);
5497 if (rc != X86EMUL_CONTINUE)
5502 if (ctxt->opcode_len == 2)
5504 else if (ctxt->opcode_len == 3)
5505 goto threebyte_insn;
5508 case 0x70 ... 0x7f: /* jcc (short) */
5509 if (test_cc(ctxt->b, ctxt->eflags))
5510 rc = jmp_rel(ctxt, ctxt->src.val);
5512 case 0x8d: /* lea r16/r32, m */
5513 ctxt->dst.val = ctxt->src.addr.mem.ea;
5515 case 0x90 ... 0x97: /* nop / xchg reg, rax */
5516 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5517 ctxt->dst.type = OP_NONE;
5521 case 0x98: /* cbw/cwde/cdqe */
5522 switch (ctxt->op_bytes) {
5523 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5524 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5525 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5528 case 0xcc: /* int3 */
5529 rc = emulate_int(ctxt, 3);
5531 case 0xcd: /* int n */
5532 rc = emulate_int(ctxt, ctxt->src.val);
5534 case 0xce: /* into */
5535 if (ctxt->eflags & X86_EFLAGS_OF)
5536 rc = emulate_int(ctxt, 4);
5538 case 0xe9: /* jmp rel */
5539 case 0xeb: /* jmp rel short */
5540 rc = jmp_rel(ctxt, ctxt->src.val);
5541 ctxt->dst.type = OP_NONE; /* Disable writeback. */
5543 case 0xf4: /* hlt */
5544 ctxt->ops->halt(ctxt);
5546 case 0xf5: /* cmc */
5547 /* complement carry flag from eflags reg */
5548 ctxt->eflags ^= X86_EFLAGS_CF;
5550 case 0xf8: /* clc */
5551 ctxt->eflags &= ~X86_EFLAGS_CF;
5553 case 0xf9: /* stc */
5554 ctxt->eflags |= X86_EFLAGS_CF;
5556 case 0xfc: /* cld */
5557 ctxt->eflags &= ~X86_EFLAGS_DF;
5559 case 0xfd: /* std */
5560 ctxt->eflags |= X86_EFLAGS_DF;
5563 goto cannot_emulate;
5566 if (rc != X86EMUL_CONTINUE)
5570 if (ctxt->d & SrcWrite) {
5571 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5572 rc = writeback(ctxt, &ctxt->src);
5573 if (rc != X86EMUL_CONTINUE)
5576 if (!(ctxt->d & NoWrite)) {
5577 rc = writeback(ctxt, &ctxt->dst);
5578 if (rc != X86EMUL_CONTINUE)
5583 * restore dst type in case the decoding will be reused
5584 * (happens for string instruction )
5586 ctxt->dst.type = saved_dst_type;
5588 if ((ctxt->d & SrcMask) == SrcSI)
5589 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5591 if ((ctxt->d & DstMask) == DstDI)
5592 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5594 if (ctxt->rep_prefix && (ctxt->d & String)) {
5596 struct read_cache *r = &ctxt->io_read;
5597 if ((ctxt->d & SrcMask) == SrcSI)
5598 count = ctxt->src.count;
5600 count = ctxt->dst.count;
5601 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5603 if (!string_insn_completed(ctxt)) {
5605 * Re-enter guest when pio read ahead buffer is empty
5606 * or, if it is not used, after each 1024 iteration.
5608 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5609 (r->end == 0 || r->end != r->pos)) {
5611 * Reset read cache. Usually happens before
5612 * decode, but since instruction is restarted
5613 * we have to do it here.
5615 ctxt->mem_read.end = 0;
5616 writeback_registers(ctxt);
5617 return EMULATION_RESTART;
5619 goto done; /* skip rip writeback */
5621 ctxt->eflags &= ~X86_EFLAGS_RF;
5624 ctxt->eip = ctxt->_eip;
5627 if (rc == X86EMUL_PROPAGATE_FAULT) {
5628 WARN_ON(ctxt->exception.vector > 0x1f);
5629 ctxt->have_exception = true;
5631 if (rc == X86EMUL_INTERCEPTED)
5632 return EMULATION_INTERCEPTED;
5634 if (rc == X86EMUL_CONTINUE)
5635 writeback_registers(ctxt);
5637 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5641 case 0x09: /* wbinvd */
5642 (ctxt->ops->wbinvd)(ctxt);
5644 case 0x08: /* invd */
5645 case 0x0d: /* GrpP (prefetch) */
5646 case 0x18: /* Grp16 (prefetch/nop) */
5647 case 0x1f: /* nop */
5649 case 0x20: /* mov cr, reg */
5650 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5652 case 0x21: /* mov from dr to reg */
5653 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5655 case 0x40 ... 0x4f: /* cmov */
5656 if (test_cc(ctxt->b, ctxt->eflags))
5657 ctxt->dst.val = ctxt->src.val;
5658 else if (ctxt->op_bytes != 4)
5659 ctxt->dst.type = OP_NONE; /* no writeback */
5661 case 0x80 ... 0x8f: /* jnz rel, etc*/
5662 if (test_cc(ctxt->b, ctxt->eflags))
5663 rc = jmp_rel(ctxt, ctxt->src.val);
5665 case 0x90 ... 0x9f: /* setcc r/m8 */
5666 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5668 case 0xb6 ... 0xb7: /* movzx */
5669 ctxt->dst.bytes = ctxt->op_bytes;
5670 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5671 : (u16) ctxt->src.val;
5673 case 0xbe ... 0xbf: /* movsx */
5674 ctxt->dst.bytes = ctxt->op_bytes;
5675 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5676 (s16) ctxt->src.val;
5679 goto cannot_emulate;
5684 if (rc != X86EMUL_CONTINUE)
5690 return EMULATION_FAILED;
5693 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5695 invalidate_registers(ctxt);
5698 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5700 writeback_registers(ctxt);