1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 #include <linux/stringify.h>
36 #define OpImplicit 1ull /* No generic decode */
37 #define OpReg 2ull /* Register */
38 #define OpMem 3ull /* Memory */
39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40 #define OpDI 5ull /* ES:DI/EDI/RDI */
41 #define OpMem64 6ull /* Memory, 64-bit */
42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43 #define OpDX 8ull /* DX register */
44 #define OpCL 9ull /* CL register (for shifts) */
45 #define OpImmByte 10ull /* 8-bit sign extended immediate */
46 #define OpOne 11ull /* Implied 1 */
47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
48 #define OpMem16 13ull /* Memory operand (16-bit). */
49 #define OpMem32 14ull /* Memory operand (32-bit). */
50 #define OpImmU 15ull /* Immediate operand, zero extended */
51 #define OpSI 16ull /* SI/ESI/RSI */
52 #define OpImmFAddr 17ull /* Immediate far address */
53 #define OpMemFAddr 18ull /* Far address in memory */
54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
55 #define OpES 20ull /* ES */
56 #define OpCS 21ull /* CS */
57 #define OpSS 22ull /* SS */
58 #define OpDS 23ull /* DS */
59 #define OpFS 24ull /* FS */
60 #define OpGS 25ull /* GS */
61 #define OpMem8 26ull /* 8-bit zero extended memory operand */
62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
67 #define OpBits 5 /* Width of operand field */
68 #define OpMask ((1ull << OpBits) - 1)
71 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
79 /* Operand sizes: 8-bit operands or specified/overridden size. */
80 #define ByteOp (1<<0) /* 8-bit operands. */
81 /* Destination operand type. */
83 #define ImplicitOps (OpImplicit << DstShift)
84 #define DstReg (OpReg << DstShift)
85 #define DstMem (OpMem << DstShift)
86 #define DstAcc (OpAcc << DstShift)
87 #define DstDI (OpDI << DstShift)
88 #define DstMem64 (OpMem64 << DstShift)
89 #define DstImmUByte (OpImmUByte << DstShift)
90 #define DstDX (OpDX << DstShift)
91 #define DstAccLo (OpAccLo << DstShift)
92 #define DstMask (OpMask << DstShift)
93 /* Source operand type. */
95 #define SrcNone (OpNone << SrcShift)
96 #define SrcReg (OpReg << SrcShift)
97 #define SrcMem (OpMem << SrcShift)
98 #define SrcMem16 (OpMem16 << SrcShift)
99 #define SrcMem32 (OpMem32 << SrcShift)
100 #define SrcImm (OpImm << SrcShift)
101 #define SrcImmByte (OpImmByte << SrcShift)
102 #define SrcOne (OpOne << SrcShift)
103 #define SrcImmUByte (OpImmUByte << SrcShift)
104 #define SrcImmU (OpImmU << SrcShift)
105 #define SrcSI (OpSI << SrcShift)
106 #define SrcXLat (OpXLat << SrcShift)
107 #define SrcImmFAddr (OpImmFAddr << SrcShift)
108 #define SrcMemFAddr (OpMemFAddr << SrcShift)
109 #define SrcAcc (OpAcc << SrcShift)
110 #define SrcImmU16 (OpImmU16 << SrcShift)
111 #define SrcImm64 (OpImm64 << SrcShift)
112 #define SrcDX (OpDX << SrcShift)
113 #define SrcMem8 (OpMem8 << SrcShift)
114 #define SrcAccHi (OpAccHi << SrcShift)
115 #define SrcMask (OpMask << SrcShift)
116 #define BitOp (1<<11)
117 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
118 #define String (1<<13) /* String instruction (rep capable) */
119 #define Stack (1<<14) /* Stack instruction (push/pop) */
120 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
121 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
122 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
123 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
124 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
125 #define Escape (5<<15) /* Escape to coprocessor instruction */
126 #define Sse (1<<18) /* SSE Vector instruction */
127 /* Generic ModRM decode. */
128 #define ModRM (1<<19)
129 /* Destination is only written; never read. */
132 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
133 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
134 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
135 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
136 #define Undefined (1<<25) /* No Such Instruction */
137 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
138 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
140 #define PageTable (1 << 29) /* instruction used to write page table */
141 #define NotImpl (1 << 30) /* instruction is not implemented */
142 /* Source 2 operand type */
143 #define Src2Shift (31)
144 #define Src2None (OpNone << Src2Shift)
145 #define Src2Mem (OpMem << Src2Shift)
146 #define Src2CL (OpCL << Src2Shift)
147 #define Src2ImmByte (OpImmByte << Src2Shift)
148 #define Src2One (OpOne << Src2Shift)
149 #define Src2Imm (OpImm << Src2Shift)
150 #define Src2ES (OpES << Src2Shift)
151 #define Src2CS (OpCS << Src2Shift)
152 #define Src2SS (OpSS << Src2Shift)
153 #define Src2DS (OpDS << Src2Shift)
154 #define Src2FS (OpFS << Src2Shift)
155 #define Src2GS (OpGS << Src2Shift)
156 #define Src2Mask (OpMask << Src2Shift)
157 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
158 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
159 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
160 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
161 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
162 #define NoWrite ((u64)1 << 45) /* No writeback */
163 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
164 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
165 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
166 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
167 #define NoBigReal ((u64)1 << 50) /* No big real mode */
168 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
169 #define NearBranch ((u64)1 << 52) /* Near branches */
170 #define No16 ((u64)1 << 53) /* No 16 bit operand */
172 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
174 #define X2(x...) x, x
175 #define X3(x...) X2(x), x
176 #define X4(x...) X2(x), X2(x)
177 #define X5(x...) X4(x), x
178 #define X6(x...) X4(x), X2(x)
179 #define X7(x...) X4(x), X3(x)
180 #define X8(x...) X4(x), X4(x)
181 #define X16(x...) X8(x), X8(x)
183 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
184 #define FASTOP_SIZE 8
187 * fastop functions have a special calling convention:
192 * flags: rflags (in/out)
193 * ex: rsi (in:fastop pointer, out:zero if exception)
195 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
196 * different operand sizes can be reached by calculation, rather than a jump
197 * table (which would be bigger than the code).
199 * fastop functions are declared as taking a never-defined fastop parameter,
200 * so they can't be called from C directly.
209 int (*execute)(struct x86_emulate_ctxt *ctxt);
210 const struct opcode *group;
211 const struct group_dual *gdual;
212 const struct gprefix *gprefix;
213 const struct escape *esc;
214 void (*fastop)(struct fastop *fake);
216 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
220 struct opcode mod012[8];
221 struct opcode mod3[8];
225 struct opcode pfx_no;
226 struct opcode pfx_66;
227 struct opcode pfx_f2;
228 struct opcode pfx_f3;
233 struct opcode high[64];
236 /* EFLAGS bit definitions. */
237 #define EFLG_ID (1<<21)
238 #define EFLG_VIP (1<<20)
239 #define EFLG_VIF (1<<19)
240 #define EFLG_AC (1<<18)
241 #define EFLG_VM (1<<17)
242 #define EFLG_RF (1<<16)
243 #define EFLG_IOPL (3<<12)
244 #define EFLG_NT (1<<14)
245 #define EFLG_OF (1<<11)
246 #define EFLG_DF (1<<10)
247 #define EFLG_IF (1<<9)
248 #define EFLG_TF (1<<8)
249 #define EFLG_SF (1<<7)
250 #define EFLG_ZF (1<<6)
251 #define EFLG_AF (1<<4)
252 #define EFLG_PF (1<<2)
253 #define EFLG_CF (1<<0)
255 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
256 #define EFLG_RESERVED_ONE_MASK 2
258 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
260 if (!(ctxt->regs_valid & (1 << nr))) {
261 ctxt->regs_valid |= 1 << nr;
262 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
264 return ctxt->_regs[nr];
267 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
269 ctxt->regs_valid |= 1 << nr;
270 ctxt->regs_dirty |= 1 << nr;
271 return &ctxt->_regs[nr];
274 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
277 return reg_write(ctxt, nr);
280 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
284 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
285 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
288 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
290 ctxt->regs_dirty = 0;
291 ctxt->regs_valid = 0;
295 * These EFLAGS bits are restored from saved value during emulation, and
296 * any changes are written back to the saved value after emulation.
298 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
306 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
308 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
309 #define FOP_RET "ret \n\t"
311 #define FOP_START(op) \
312 extern void em_##op(struct fastop *fake); \
313 asm(".pushsection .text, \"ax\" \n\t" \
314 ".global em_" #op " \n\t" \
321 #define FOPNOP() FOP_ALIGN FOP_RET
323 #define FOP1E(op, dst) \
324 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
326 #define FOP1EEX(op, dst) \
327 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
329 #define FASTOP1(op) \
334 ON64(FOP1E(op##q, rax)) \
337 /* 1-operand, using src2 (for MUL/DIV r/m) */
338 #define FASTOP1SRC2(op, name) \
343 ON64(FOP1E(op, rcx)) \
346 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
347 #define FASTOP1SRC2EX(op, name) \
352 ON64(FOP1EEX(op, rcx)) \
355 #define FOP2E(op, dst, src) \
356 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
358 #define FASTOP2(op) \
360 FOP2E(op##b, al, dl) \
361 FOP2E(op##w, ax, dx) \
362 FOP2E(op##l, eax, edx) \
363 ON64(FOP2E(op##q, rax, rdx)) \
366 /* 2 operand, word only */
367 #define FASTOP2W(op) \
370 FOP2E(op##w, ax, dx) \
371 FOP2E(op##l, eax, edx) \
372 ON64(FOP2E(op##q, rax, rdx)) \
375 /* 2 operand, src is CL */
376 #define FASTOP2CL(op) \
378 FOP2E(op##b, al, cl) \
379 FOP2E(op##w, ax, cl) \
380 FOP2E(op##l, eax, cl) \
381 ON64(FOP2E(op##q, rax, cl)) \
384 /* 2 operand, src and dest are reversed */
385 #define FASTOP2R(op, name) \
387 FOP2E(op##b, dl, al) \
388 FOP2E(op##w, dx, ax) \
389 FOP2E(op##l, edx, eax) \
390 ON64(FOP2E(op##q, rdx, rax)) \
393 #define FOP3E(op, dst, src, src2) \
394 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
396 /* 3-operand, word-only, src2=cl */
397 #define FASTOP3WCL(op) \
400 FOP3E(op##w, ax, dx, cl) \
401 FOP3E(op##l, eax, edx, cl) \
402 ON64(FOP3E(op##q, rax, rdx, cl)) \
405 /* Special case for SETcc - 1 instruction per cc */
406 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
408 asm(".global kvm_fastop_exception \n"
409 "kvm_fastop_exception: xor %esi, %esi; ret");
430 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
433 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
434 enum x86_intercept intercept,
435 enum x86_intercept_stage stage)
437 struct x86_instruction_info info = {
438 .intercept = intercept,
439 .rep_prefix = ctxt->rep_prefix,
440 .modrm_mod = ctxt->modrm_mod,
441 .modrm_reg = ctxt->modrm_reg,
442 .modrm_rm = ctxt->modrm_rm,
443 .src_val = ctxt->src.val64,
444 .dst_val = ctxt->dst.val64,
445 .src_bytes = ctxt->src.bytes,
446 .dst_bytes = ctxt->dst.bytes,
447 .ad_bytes = ctxt->ad_bytes,
448 .next_rip = ctxt->eip,
451 return ctxt->ops->intercept(ctxt, &info, stage);
454 static void assign_masked(ulong *dest, ulong src, ulong mask)
456 *dest = (*dest & ~mask) | (src & mask);
459 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
461 return (1UL << (ctxt->ad_bytes << 3)) - 1;
464 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
467 struct desc_struct ss;
469 if (ctxt->mode == X86EMUL_MODE_PROT64)
471 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
472 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
475 static int stack_size(struct x86_emulate_ctxt *ctxt)
477 return (__fls(stack_mask(ctxt)) + 1) >> 3;
480 /* Access/update address held in a register, based on addressing mode. */
481 static inline unsigned long
482 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
484 if (ctxt->ad_bytes == sizeof(unsigned long))
487 return reg & ad_mask(ctxt);
490 static inline unsigned long
491 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
493 return address_mask(ctxt, reg);
496 static void masked_increment(ulong *reg, ulong mask, int inc)
498 assign_masked(reg, *reg + inc, mask);
502 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
506 if (ctxt->ad_bytes == sizeof(unsigned long))
509 mask = ad_mask(ctxt);
510 masked_increment(reg, mask, inc);
513 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
515 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
518 static u32 desc_limit_scaled(struct desc_struct *desc)
520 u32 limit = get_desc_limit(desc);
522 return desc->g ? (limit << 12) | 0xfff : limit;
525 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
527 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
530 return ctxt->ops->get_cached_segment_base(ctxt, seg);
533 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
534 u32 error, bool valid)
537 ctxt->exception.vector = vec;
538 ctxt->exception.error_code = error;
539 ctxt->exception.error_code_valid = valid;
540 return X86EMUL_PROPAGATE_FAULT;
543 static int emulate_db(struct x86_emulate_ctxt *ctxt)
545 return emulate_exception(ctxt, DB_VECTOR, 0, false);
548 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
550 return emulate_exception(ctxt, GP_VECTOR, err, true);
553 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
555 return emulate_exception(ctxt, SS_VECTOR, err, true);
558 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
560 return emulate_exception(ctxt, UD_VECTOR, 0, false);
563 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
565 return emulate_exception(ctxt, TS_VECTOR, err, true);
568 static int emulate_de(struct x86_emulate_ctxt *ctxt)
570 return emulate_exception(ctxt, DE_VECTOR, 0, false);
573 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
575 return emulate_exception(ctxt, NM_VECTOR, 0, false);
578 static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
581 switch (ctxt->op_bytes) {
583 ctxt->_eip = (u16)dst;
586 ctxt->_eip = (u32)dst;
590 if ((cs_l && is_noncanonical_address(dst)) ||
591 (!cs_l && (dst >> 32) != 0))
592 return emulate_gp(ctxt, 0);
597 WARN(1, "unsupported eip assignment size\n");
599 return X86EMUL_CONTINUE;
602 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
604 return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
607 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
609 return assign_eip_near(ctxt, ctxt->_eip + rel);
612 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
615 struct desc_struct desc;
617 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
621 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
626 struct desc_struct desc;
628 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
629 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
633 * x86 defines three classes of vector instructions: explicitly
634 * aligned, explicitly unaligned, and the rest, which change behaviour
635 * depending on whether they're AVX encoded or not.
637 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
638 * subject to the same check.
640 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
642 if (likely(size < 16))
645 if (ctxt->d & Aligned)
647 else if (ctxt->d & Unaligned)
649 else if (ctxt->d & Avx)
655 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
656 struct segmented_address addr,
657 unsigned *max_size, unsigned size,
658 bool write, bool fetch,
661 struct desc_struct desc;
668 la = seg_base(ctxt, addr.seg) +
669 (fetch || ctxt->ad_bytes == 8 ? addr.ea : (u32)addr.ea);
671 switch (ctxt->mode) {
672 case X86EMUL_MODE_PROT64:
673 if (is_noncanonical_address(la))
674 return emulate_gp(ctxt, 0);
676 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
677 if (size > *max_size)
681 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
685 /* code segment in protected mode or read-only data segment */
686 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
687 || !(desc.type & 2)) && write)
689 /* unreadable code segment */
690 if (!fetch && (desc.type & 8) && !(desc.type & 2))
692 lim = desc_limit_scaled(&desc);
693 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
694 (ctxt->d & NoBigReal)) {
695 /* la is between zero and 0xffff */
698 *max_size = 0x10000 - la;
699 } else if ((desc.type & 8) || !(desc.type & 4)) {
700 /* expand-up segment */
703 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
705 /* expand-down segment */
708 lim = desc.d ? 0xffffffff : 0xffff;
711 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
713 if (size > *max_size)
715 cpl = ctxt->ops->cpl(ctxt);
717 /* data segment or readable code segment */
720 } else if ((desc.type & 8) && !(desc.type & 4)) {
721 /* nonconforming code segment */
724 } else if ((desc.type & 8) && (desc.type & 4)) {
725 /* conforming code segment */
731 if (ctxt->mode != X86EMUL_MODE_PROT64)
733 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
734 return emulate_gp(ctxt, 0);
736 return X86EMUL_CONTINUE;
738 if (addr.seg == VCPU_SREG_SS)
739 return emulate_ss(ctxt, 0);
741 return emulate_gp(ctxt, 0);
744 static int linearize(struct x86_emulate_ctxt *ctxt,
745 struct segmented_address addr,
746 unsigned size, bool write,
750 return __linearize(ctxt, addr, &max_size, size, write, false, linear);
754 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
755 struct segmented_address addr,
762 rc = linearize(ctxt, addr, size, false, &linear);
763 if (rc != X86EMUL_CONTINUE)
765 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
769 * Prefetch the remaining bytes of the instruction without crossing page
770 * boundary if they are not in fetch_cache yet.
772 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
775 unsigned size, max_size;
776 unsigned long linear;
777 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
778 struct segmented_address addr = { .seg = VCPU_SREG_CS,
779 .ea = ctxt->eip + cur_size };
782 * We do not know exactly how many bytes will be needed, and
783 * __linearize is expensive, so fetch as much as possible. We
784 * just have to avoid going beyond the 15 byte limit, the end
785 * of the segment, or the end of the page.
787 * __linearize is called with size 0 so that it does not do any
788 * boundary check itself. Instead, we use max_size to check
791 rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
792 if (unlikely(rc != X86EMUL_CONTINUE))
795 size = min_t(unsigned, 15UL ^ cur_size, max_size);
796 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
799 * One instruction can only straddle two pages,
800 * and one has been loaded at the beginning of
801 * x86_decode_insn. So, if not enough bytes
802 * still, we must have hit the 15-byte boundary.
804 if (unlikely(size < op_size))
805 return emulate_gp(ctxt, 0);
807 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
808 size, &ctxt->exception);
809 if (unlikely(rc != X86EMUL_CONTINUE))
811 ctxt->fetch.end += size;
812 return X86EMUL_CONTINUE;
815 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
818 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
820 if (unlikely(done_size < size))
821 return __do_insn_fetch_bytes(ctxt, size - done_size);
823 return X86EMUL_CONTINUE;
826 /* Fetch next part of the instruction being emulated. */
827 #define insn_fetch(_type, _ctxt) \
830 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
831 if (rc != X86EMUL_CONTINUE) \
833 ctxt->_eip += sizeof(_type); \
834 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
835 ctxt->fetch.ptr += sizeof(_type); \
839 #define insn_fetch_arr(_arr, _size, _ctxt) \
841 rc = do_insn_fetch_bytes(_ctxt, _size); \
842 if (rc != X86EMUL_CONTINUE) \
844 ctxt->_eip += (_size); \
845 memcpy(_arr, ctxt->fetch.ptr, _size); \
846 ctxt->fetch.ptr += (_size); \
850 * Given the 'reg' portion of a ModRM byte, and a register block, return a
851 * pointer into the block that addresses the relevant register.
852 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
854 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
858 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
860 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
861 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
863 p = reg_rmw(ctxt, modrm_reg);
867 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
868 struct segmented_address addr,
869 u16 *size, unsigned long *address, int op_bytes)
876 rc = segmented_read_std(ctxt, addr, size, 2);
877 if (rc != X86EMUL_CONTINUE)
880 rc = segmented_read_std(ctxt, addr, address, op_bytes);
894 FASTOP1SRC2(mul, mul_ex);
895 FASTOP1SRC2(imul, imul_ex);
896 FASTOP1SRC2EX(div, div_ex);
897 FASTOP1SRC2EX(idiv, idiv_ex);
926 FASTOP2R(cmp, cmp_r);
928 static u8 test_cc(unsigned int condition, unsigned long flags)
931 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
933 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
934 asm("push %[flags]; popf; call *%[fastop]"
935 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
939 static void fetch_register_operand(struct operand *op)
943 op->val = *(u8 *)op->addr.reg;
946 op->val = *(u16 *)op->addr.reg;
949 op->val = *(u32 *)op->addr.reg;
952 op->val = *(u64 *)op->addr.reg;
957 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
959 ctxt->ops->get_fpu(ctxt);
961 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
962 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
963 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
964 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
965 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
966 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
967 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
968 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
970 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
971 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
972 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
973 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
974 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
975 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
976 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
977 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
981 ctxt->ops->put_fpu(ctxt);
984 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
987 ctxt->ops->get_fpu(ctxt);
989 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
990 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
991 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
992 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
993 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
994 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
995 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
996 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
998 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
999 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1000 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1001 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1002 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1003 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1004 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1005 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1009 ctxt->ops->put_fpu(ctxt);
1012 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1014 ctxt->ops->get_fpu(ctxt);
1016 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1017 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1018 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1019 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1020 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1021 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1022 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1023 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1026 ctxt->ops->put_fpu(ctxt);
1029 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1031 ctxt->ops->get_fpu(ctxt);
1033 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1034 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1035 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1036 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1037 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1038 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1039 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1040 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1043 ctxt->ops->put_fpu(ctxt);
1046 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1048 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1049 return emulate_nm(ctxt);
1051 ctxt->ops->get_fpu(ctxt);
1052 asm volatile("fninit");
1053 ctxt->ops->put_fpu(ctxt);
1054 return X86EMUL_CONTINUE;
1057 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1061 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1062 return emulate_nm(ctxt);
1064 ctxt->ops->get_fpu(ctxt);
1065 asm volatile("fnstcw %0": "+m"(fcw));
1066 ctxt->ops->put_fpu(ctxt);
1068 /* force 2 byte destination */
1069 ctxt->dst.bytes = 2;
1070 ctxt->dst.val = fcw;
1072 return X86EMUL_CONTINUE;
1075 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1079 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1080 return emulate_nm(ctxt);
1082 ctxt->ops->get_fpu(ctxt);
1083 asm volatile("fnstsw %0": "+m"(fsw));
1084 ctxt->ops->put_fpu(ctxt);
1086 /* force 2 byte destination */
1087 ctxt->dst.bytes = 2;
1088 ctxt->dst.val = fsw;
1090 return X86EMUL_CONTINUE;
1093 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1096 unsigned reg = ctxt->modrm_reg;
1098 if (!(ctxt->d & ModRM))
1099 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1101 if (ctxt->d & Sse) {
1105 read_sse_reg(ctxt, &op->vec_val, reg);
1108 if (ctxt->d & Mmx) {
1117 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1118 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1120 fetch_register_operand(op);
1121 op->orig_val = op->val;
1124 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1126 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1127 ctxt->modrm_seg = VCPU_SREG_SS;
1130 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1134 int index_reg, base_reg, scale;
1135 int rc = X86EMUL_CONTINUE;
1138 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1139 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1140 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1142 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1143 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1144 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1145 ctxt->modrm_seg = VCPU_SREG_DS;
1147 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1149 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1150 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1152 if (ctxt->d & Sse) {
1155 op->addr.xmm = ctxt->modrm_rm;
1156 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1159 if (ctxt->d & Mmx) {
1162 op->addr.mm = ctxt->modrm_rm & 7;
1165 fetch_register_operand(op);
1171 if (ctxt->ad_bytes == 2) {
1172 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1173 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1174 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1175 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1177 /* 16-bit ModR/M decode. */
1178 switch (ctxt->modrm_mod) {
1180 if (ctxt->modrm_rm == 6)
1181 modrm_ea += insn_fetch(u16, ctxt);
1184 modrm_ea += insn_fetch(s8, ctxt);
1187 modrm_ea += insn_fetch(u16, ctxt);
1190 switch (ctxt->modrm_rm) {
1192 modrm_ea += bx + si;
1195 modrm_ea += bx + di;
1198 modrm_ea += bp + si;
1201 modrm_ea += bp + di;
1210 if (ctxt->modrm_mod != 0)
1217 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1218 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1219 ctxt->modrm_seg = VCPU_SREG_SS;
1220 modrm_ea = (u16)modrm_ea;
1222 /* 32/64-bit ModR/M decode. */
1223 if ((ctxt->modrm_rm & 7) == 4) {
1224 sib = insn_fetch(u8, ctxt);
1225 index_reg |= (sib >> 3) & 7;
1226 base_reg |= sib & 7;
1229 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1230 modrm_ea += insn_fetch(s32, ctxt);
1232 modrm_ea += reg_read(ctxt, base_reg);
1233 adjust_modrm_seg(ctxt, base_reg);
1236 modrm_ea += reg_read(ctxt, index_reg) << scale;
1237 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1238 modrm_ea += insn_fetch(s32, ctxt);
1239 if (ctxt->mode == X86EMUL_MODE_PROT64)
1240 ctxt->rip_relative = 1;
1242 base_reg = ctxt->modrm_rm;
1243 modrm_ea += reg_read(ctxt, base_reg);
1244 adjust_modrm_seg(ctxt, base_reg);
1246 switch (ctxt->modrm_mod) {
1248 modrm_ea += insn_fetch(s8, ctxt);
1251 modrm_ea += insn_fetch(s32, ctxt);
1255 op->addr.mem.ea = modrm_ea;
1256 if (ctxt->ad_bytes != 8)
1257 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1263 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1266 int rc = X86EMUL_CONTINUE;
1269 switch (ctxt->ad_bytes) {
1271 op->addr.mem.ea = insn_fetch(u16, ctxt);
1274 op->addr.mem.ea = insn_fetch(u32, ctxt);
1277 op->addr.mem.ea = insn_fetch(u64, ctxt);
1284 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1288 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1289 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1291 if (ctxt->src.bytes == 2)
1292 sv = (s16)ctxt->src.val & (s16)mask;
1293 else if (ctxt->src.bytes == 4)
1294 sv = (s32)ctxt->src.val & (s32)mask;
1296 sv = (s64)ctxt->src.val & (s64)mask;
1298 ctxt->dst.addr.mem.ea += (sv >> 3);
1301 /* only subword offset */
1302 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1305 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1306 unsigned long addr, void *dest, unsigned size)
1309 struct read_cache *mc = &ctxt->mem_read;
1311 if (mc->pos < mc->end)
1314 WARN_ON((mc->end + size) >= sizeof(mc->data));
1316 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1318 if (rc != X86EMUL_CONTINUE)
1324 memcpy(dest, mc->data + mc->pos, size);
1326 return X86EMUL_CONTINUE;
1329 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1330 struct segmented_address addr,
1337 rc = linearize(ctxt, addr, size, false, &linear);
1338 if (rc != X86EMUL_CONTINUE)
1340 return read_emulated(ctxt, linear, data, size);
1343 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1344 struct segmented_address addr,
1351 rc = linearize(ctxt, addr, size, true, &linear);
1352 if (rc != X86EMUL_CONTINUE)
1354 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1358 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1359 struct segmented_address addr,
1360 const void *orig_data, const void *data,
1366 rc = linearize(ctxt, addr, size, true, &linear);
1367 if (rc != X86EMUL_CONTINUE)
1369 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1370 size, &ctxt->exception);
1373 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1374 unsigned int size, unsigned short port,
1377 struct read_cache *rc = &ctxt->io_read;
1379 if (rc->pos == rc->end) { /* refill pio read ahead */
1380 unsigned int in_page, n;
1381 unsigned int count = ctxt->rep_prefix ?
1382 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1383 in_page = (ctxt->eflags & EFLG_DF) ?
1384 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1385 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1386 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1389 rc->pos = rc->end = 0;
1390 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1395 if (ctxt->rep_prefix && (ctxt->d & String) &&
1396 !(ctxt->eflags & EFLG_DF)) {
1397 ctxt->dst.data = rc->data + rc->pos;
1398 ctxt->dst.type = OP_MEM_STR;
1399 ctxt->dst.count = (rc->end - rc->pos) / size;
1402 memcpy(dest, rc->data + rc->pos, size);
1408 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1409 u16 index, struct desc_struct *desc)
1414 ctxt->ops->get_idt(ctxt, &dt);
1416 if (dt.size < index * 8 + 7)
1417 return emulate_gp(ctxt, index << 3 | 0x2);
1419 addr = dt.address + index * 8;
1420 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1424 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1425 u16 selector, struct desc_ptr *dt)
1427 const struct x86_emulate_ops *ops = ctxt->ops;
1430 if (selector & 1 << 2) {
1431 struct desc_struct desc;
1434 memset (dt, 0, sizeof *dt);
1435 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1439 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1440 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1442 ops->get_gdt(ctxt, dt);
1445 /* allowed just for 8 bytes segments */
1446 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1447 u16 selector, struct desc_struct *desc,
1451 u16 index = selector >> 3;
1454 get_descriptor_table_ptr(ctxt, selector, &dt);
1456 if (dt.size < index * 8 + 7)
1457 return emulate_gp(ctxt, selector & 0xfffc);
1459 *desc_addr_p = addr = dt.address + index * 8;
1460 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1464 /* allowed just for 8 bytes segments */
1465 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1466 u16 selector, struct desc_struct *desc)
1469 u16 index = selector >> 3;
1472 get_descriptor_table_ptr(ctxt, selector, &dt);
1474 if (dt.size < index * 8 + 7)
1475 return emulate_gp(ctxt, selector & 0xfffc);
1477 addr = dt.address + index * 8;
1478 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1482 /* Does not support long mode */
1483 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1484 u16 selector, int seg, u8 cpl,
1485 bool in_task_switch,
1486 struct desc_struct *desc)
1488 struct desc_struct seg_desc, old_desc;
1490 unsigned err_vec = GP_VECTOR;
1492 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1498 memset(&seg_desc, 0, sizeof seg_desc);
1500 if (ctxt->mode == X86EMUL_MODE_REAL) {
1501 /* set real mode segment descriptor (keep limit etc. for
1503 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1504 set_desc_base(&seg_desc, selector << 4);
1506 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1507 /* VM86 needs a clean new segment descriptor */
1508 set_desc_base(&seg_desc, selector << 4);
1509 set_desc_limit(&seg_desc, 0xffff);
1519 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1520 if ((seg == VCPU_SREG_CS
1521 || (seg == VCPU_SREG_SS
1522 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1523 || seg == VCPU_SREG_TR)
1527 /* TR should be in GDT only */
1528 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1531 if (null_selector) /* for NULL selector skip all following checks */
1534 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1535 if (ret != X86EMUL_CONTINUE)
1538 err_code = selector & 0xfffc;
1539 err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
1541 /* can't load system descriptor into segment selector */
1542 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1546 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1555 * segment is not a writable data segment or segment
1556 * selector's RPL != CPL or segment selector's RPL != CPL
1558 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1562 if (!(seg_desc.type & 8))
1565 if (seg_desc.type & 4) {
1571 if (rpl > cpl || dpl != cpl)
1574 /* in long-mode d/b must be clear if l is set */
1575 if (seg_desc.d && seg_desc.l) {
1578 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1579 if (efer & EFER_LMA)
1583 /* CS(RPL) <- CPL */
1584 selector = (selector & 0xfffc) | cpl;
1587 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1589 old_desc = seg_desc;
1590 seg_desc.type |= 2; /* busy */
1591 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1592 sizeof(seg_desc), &ctxt->exception);
1593 if (ret != X86EMUL_CONTINUE)
1596 case VCPU_SREG_LDTR:
1597 if (seg_desc.s || seg_desc.type != 2)
1600 default: /* DS, ES, FS, or GS */
1602 * segment is not a data or readable code segment or
1603 * ((segment is a data or nonconforming code segment)
1604 * and (both RPL and CPL > DPL))
1606 if ((seg_desc.type & 0xa) == 0x8 ||
1607 (((seg_desc.type & 0xc) != 0xc) &&
1608 (rpl > dpl && cpl > dpl)))
1614 /* mark segment as accessed */
1616 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1617 if (ret != X86EMUL_CONTINUE)
1619 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1620 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1621 sizeof(base3), &ctxt->exception);
1622 if (ret != X86EMUL_CONTINUE)
1624 if (is_noncanonical_address(get_desc_base(&seg_desc) |
1625 ((u64)base3 << 32)))
1626 return emulate_gp(ctxt, 0);
1629 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1632 return X86EMUL_CONTINUE;
1634 return emulate_exception(ctxt, err_vec, err_code, true);
1637 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1638 u16 selector, int seg)
1640 u8 cpl = ctxt->ops->cpl(ctxt);
1641 return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
1644 static void write_register_operand(struct operand *op)
1646 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1647 switch (op->bytes) {
1649 *(u8 *)op->addr.reg = (u8)op->val;
1652 *(u16 *)op->addr.reg = (u16)op->val;
1655 *op->addr.reg = (u32)op->val;
1656 break; /* 64b: zero-extend */
1658 *op->addr.reg = op->val;
1663 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1667 write_register_operand(op);
1670 if (ctxt->lock_prefix)
1671 return segmented_cmpxchg(ctxt,
1677 return segmented_write(ctxt,
1683 return segmented_write(ctxt,
1686 op->bytes * op->count);
1689 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1692 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1700 return X86EMUL_CONTINUE;
1703 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1705 struct segmented_address addr;
1707 rsp_increment(ctxt, -bytes);
1708 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1709 addr.seg = VCPU_SREG_SS;
1711 return segmented_write(ctxt, addr, data, bytes);
1714 static int em_push(struct x86_emulate_ctxt *ctxt)
1716 /* Disable writeback. */
1717 ctxt->dst.type = OP_NONE;
1718 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1721 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1722 void *dest, int len)
1725 struct segmented_address addr;
1727 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1728 addr.seg = VCPU_SREG_SS;
1729 rc = segmented_read(ctxt, addr, dest, len);
1730 if (rc != X86EMUL_CONTINUE)
1733 rsp_increment(ctxt, len);
1737 static int em_pop(struct x86_emulate_ctxt *ctxt)
1739 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1742 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1743 void *dest, int len)
1746 unsigned long val, change_mask;
1747 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1748 int cpl = ctxt->ops->cpl(ctxt);
1750 rc = emulate_pop(ctxt, &val, len);
1751 if (rc != X86EMUL_CONTINUE)
1754 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1755 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
1757 switch(ctxt->mode) {
1758 case X86EMUL_MODE_PROT64:
1759 case X86EMUL_MODE_PROT32:
1760 case X86EMUL_MODE_PROT16:
1762 change_mask |= EFLG_IOPL;
1764 change_mask |= EFLG_IF;
1766 case X86EMUL_MODE_VM86:
1768 return emulate_gp(ctxt, 0);
1769 change_mask |= EFLG_IF;
1771 default: /* real mode */
1772 change_mask |= (EFLG_IOPL | EFLG_IF);
1776 *(unsigned long *)dest =
1777 (ctxt->eflags & ~change_mask) | (val & change_mask);
1782 static int em_popf(struct x86_emulate_ctxt *ctxt)
1784 ctxt->dst.type = OP_REG;
1785 ctxt->dst.addr.reg = &ctxt->eflags;
1786 ctxt->dst.bytes = ctxt->op_bytes;
1787 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1790 static int em_enter(struct x86_emulate_ctxt *ctxt)
1793 unsigned frame_size = ctxt->src.val;
1794 unsigned nesting_level = ctxt->src2.val & 31;
1798 return X86EMUL_UNHANDLEABLE;
1800 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1801 rc = push(ctxt, &rbp, stack_size(ctxt));
1802 if (rc != X86EMUL_CONTINUE)
1804 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1806 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1807 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1809 return X86EMUL_CONTINUE;
1812 static int em_leave(struct x86_emulate_ctxt *ctxt)
1814 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1816 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1819 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1821 int seg = ctxt->src2.val;
1823 ctxt->src.val = get_segment_selector(ctxt, seg);
1824 if (ctxt->op_bytes == 4) {
1825 rsp_increment(ctxt, -2);
1829 return em_push(ctxt);
1832 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1834 int seg = ctxt->src2.val;
1835 unsigned long selector;
1838 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1839 if (rc != X86EMUL_CONTINUE)
1842 if (ctxt->modrm_reg == VCPU_SREG_SS)
1843 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1845 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1849 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1851 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1852 int rc = X86EMUL_CONTINUE;
1853 int reg = VCPU_REGS_RAX;
1855 while (reg <= VCPU_REGS_RDI) {
1856 (reg == VCPU_REGS_RSP) ?
1857 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1860 if (rc != X86EMUL_CONTINUE)
1869 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1871 ctxt->src.val = (unsigned long)ctxt->eflags;
1872 return em_push(ctxt);
1875 static int em_popa(struct x86_emulate_ctxt *ctxt)
1877 int rc = X86EMUL_CONTINUE;
1878 int reg = VCPU_REGS_RDI;
1880 while (reg >= VCPU_REGS_RAX) {
1881 if (reg == VCPU_REGS_RSP) {
1882 rsp_increment(ctxt, ctxt->op_bytes);
1886 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1887 if (rc != X86EMUL_CONTINUE)
1894 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1896 const struct x86_emulate_ops *ops = ctxt->ops;
1903 /* TODO: Add limit checks */
1904 ctxt->src.val = ctxt->eflags;
1906 if (rc != X86EMUL_CONTINUE)
1909 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1911 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1913 if (rc != X86EMUL_CONTINUE)
1916 ctxt->src.val = ctxt->_eip;
1918 if (rc != X86EMUL_CONTINUE)
1921 ops->get_idt(ctxt, &dt);
1923 eip_addr = dt.address + (irq << 2);
1924 cs_addr = dt.address + (irq << 2) + 2;
1926 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1927 if (rc != X86EMUL_CONTINUE)
1930 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1931 if (rc != X86EMUL_CONTINUE)
1934 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1935 if (rc != X86EMUL_CONTINUE)
1943 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1947 invalidate_registers(ctxt);
1948 rc = __emulate_int_real(ctxt, irq);
1949 if (rc == X86EMUL_CONTINUE)
1950 writeback_registers(ctxt);
1954 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1956 switch(ctxt->mode) {
1957 case X86EMUL_MODE_REAL:
1958 return __emulate_int_real(ctxt, irq);
1959 case X86EMUL_MODE_VM86:
1960 case X86EMUL_MODE_PROT16:
1961 case X86EMUL_MODE_PROT32:
1962 case X86EMUL_MODE_PROT64:
1964 /* Protected mode interrupts unimplemented yet */
1965 return X86EMUL_UNHANDLEABLE;
1969 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1971 int rc = X86EMUL_CONTINUE;
1972 unsigned long temp_eip = 0;
1973 unsigned long temp_eflags = 0;
1974 unsigned long cs = 0;
1975 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1976 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1977 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1978 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1980 /* TODO: Add stack limit check */
1982 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1984 if (rc != X86EMUL_CONTINUE)
1987 if (temp_eip & ~0xffff)
1988 return emulate_gp(ctxt, 0);
1990 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1992 if (rc != X86EMUL_CONTINUE)
1995 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1997 if (rc != X86EMUL_CONTINUE)
2000 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2002 if (rc != X86EMUL_CONTINUE)
2005 ctxt->_eip = temp_eip;
2008 if (ctxt->op_bytes == 4)
2009 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2010 else if (ctxt->op_bytes == 2) {
2011 ctxt->eflags &= ~0xffff;
2012 ctxt->eflags |= temp_eflags;
2015 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2016 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2021 static int em_iret(struct x86_emulate_ctxt *ctxt)
2023 switch(ctxt->mode) {
2024 case X86EMUL_MODE_REAL:
2025 return emulate_iret_real(ctxt);
2026 case X86EMUL_MODE_VM86:
2027 case X86EMUL_MODE_PROT16:
2028 case X86EMUL_MODE_PROT32:
2029 case X86EMUL_MODE_PROT64:
2031 /* iret from protected mode unimplemented yet */
2032 return X86EMUL_UNHANDLEABLE;
2036 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2039 unsigned short sel, old_sel;
2040 struct desc_struct old_desc, new_desc;
2041 const struct x86_emulate_ops *ops = ctxt->ops;
2042 u8 cpl = ctxt->ops->cpl(ctxt);
2044 /* Assignment of RIP may only fail in 64-bit mode */
2045 if (ctxt->mode == X86EMUL_MODE_PROT64)
2046 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2049 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2051 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
2053 if (rc != X86EMUL_CONTINUE)
2056 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
2057 if (rc != X86EMUL_CONTINUE) {
2058 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2059 /* assigning eip failed; restore the old cs */
2060 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2066 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2068 return assign_eip_near(ctxt, ctxt->src.val);
2071 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2076 old_eip = ctxt->_eip;
2077 rc = assign_eip_near(ctxt, ctxt->src.val);
2078 if (rc != X86EMUL_CONTINUE)
2080 ctxt->src.val = old_eip;
2085 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2087 u64 old = ctxt->dst.orig_val64;
2089 if (ctxt->dst.bytes == 16)
2090 return X86EMUL_UNHANDLEABLE;
2092 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2093 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2094 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2095 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2096 ctxt->eflags &= ~EFLG_ZF;
2098 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2099 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2101 ctxt->eflags |= EFLG_ZF;
2103 return X86EMUL_CONTINUE;
2106 static int em_ret(struct x86_emulate_ctxt *ctxt)
2111 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2112 if (rc != X86EMUL_CONTINUE)
2115 return assign_eip_near(ctxt, eip);
2118 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2121 unsigned long eip, cs;
2123 int cpl = ctxt->ops->cpl(ctxt);
2124 struct desc_struct old_desc, new_desc;
2125 const struct x86_emulate_ops *ops = ctxt->ops;
2127 if (ctxt->mode == X86EMUL_MODE_PROT64)
2128 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2131 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2132 if (rc != X86EMUL_CONTINUE)
2134 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2135 if (rc != X86EMUL_CONTINUE)
2137 /* Outer-privilege level return is not implemented */
2138 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2139 return X86EMUL_UNHANDLEABLE;
2140 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
2142 if (rc != X86EMUL_CONTINUE)
2144 rc = assign_eip_far(ctxt, eip, new_desc.l);
2145 if (rc != X86EMUL_CONTINUE) {
2146 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2147 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2152 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2156 rc = em_ret_far(ctxt);
2157 if (rc != X86EMUL_CONTINUE)
2159 rsp_increment(ctxt, ctxt->src.val);
2160 return X86EMUL_CONTINUE;
2163 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2165 /* Save real source value, then compare EAX against destination. */
2166 ctxt->dst.orig_val = ctxt->dst.val;
2167 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2168 ctxt->src.orig_val = ctxt->src.val;
2169 ctxt->src.val = ctxt->dst.orig_val;
2170 fastop(ctxt, em_cmp);
2172 if (ctxt->eflags & EFLG_ZF) {
2173 /* Success: write back to memory. */
2174 ctxt->dst.val = ctxt->src.orig_val;
2176 /* Failure: write the value we saw to EAX. */
2177 ctxt->dst.type = OP_REG;
2178 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2179 ctxt->dst.val = ctxt->dst.orig_val;
2181 return X86EMUL_CONTINUE;
2184 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2186 int seg = ctxt->src2.val;
2190 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2192 rc = load_segment_descriptor(ctxt, sel, seg);
2193 if (rc != X86EMUL_CONTINUE)
2196 ctxt->dst.val = ctxt->src.val;
2201 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2202 struct desc_struct *cs, struct desc_struct *ss)
2204 cs->l = 0; /* will be adjusted later */
2205 set_desc_base(cs, 0); /* flat segment */
2206 cs->g = 1; /* 4kb granularity */
2207 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2208 cs->type = 0x0b; /* Read, Execute, Accessed */
2210 cs->dpl = 0; /* will be adjusted later */
2215 set_desc_base(ss, 0); /* flat segment */
2216 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2217 ss->g = 1; /* 4kb granularity */
2219 ss->type = 0x03; /* Read/Write, Accessed */
2220 ss->d = 1; /* 32bit stack segment */
2227 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2229 u32 eax, ebx, ecx, edx;
2232 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2233 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2234 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2235 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2238 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2240 const struct x86_emulate_ops *ops = ctxt->ops;
2241 u32 eax, ebx, ecx, edx;
2244 * syscall should always be enabled in longmode - so only become
2245 * vendor specific (cpuid) if other modes are active...
2247 if (ctxt->mode == X86EMUL_MODE_PROT64)
2252 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2254 * Intel ("GenuineIntel")
2255 * remark: Intel CPUs only support "syscall" in 64bit
2256 * longmode. Also an 64bit guest with a
2257 * 32bit compat-app running will #UD !! While this
2258 * behaviour can be fixed (by emulating) into AMD
2259 * response - CPUs of AMD can't behave like Intel.
2261 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2262 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2263 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2266 /* AMD ("AuthenticAMD") */
2267 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2268 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2269 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2272 /* AMD ("AMDisbetter!") */
2273 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2274 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2275 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2278 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2282 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2284 const struct x86_emulate_ops *ops = ctxt->ops;
2285 struct desc_struct cs, ss;
2290 /* syscall is not available in real mode */
2291 if (ctxt->mode == X86EMUL_MODE_REAL ||
2292 ctxt->mode == X86EMUL_MODE_VM86)
2293 return emulate_ud(ctxt);
2295 if (!(em_syscall_is_enabled(ctxt)))
2296 return emulate_ud(ctxt);
2298 ops->get_msr(ctxt, MSR_EFER, &efer);
2299 setup_syscalls_segments(ctxt, &cs, &ss);
2301 if (!(efer & EFER_SCE))
2302 return emulate_ud(ctxt);
2304 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2306 cs_sel = (u16)(msr_data & 0xfffc);
2307 ss_sel = (u16)(msr_data + 8);
2309 if (efer & EFER_LMA) {
2313 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2314 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2316 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2317 if (efer & EFER_LMA) {
2318 #ifdef CONFIG_X86_64
2319 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2322 ctxt->mode == X86EMUL_MODE_PROT64 ?
2323 MSR_LSTAR : MSR_CSTAR, &msr_data);
2324 ctxt->_eip = msr_data;
2326 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2327 ctxt->eflags &= ~msr_data;
2328 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2332 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2333 ctxt->_eip = (u32)msr_data;
2335 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2338 return X86EMUL_CONTINUE;
2341 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2343 const struct x86_emulate_ops *ops = ctxt->ops;
2344 struct desc_struct cs, ss;
2349 ops->get_msr(ctxt, MSR_EFER, &efer);
2350 /* inject #GP if in real mode */
2351 if (ctxt->mode == X86EMUL_MODE_REAL)
2352 return emulate_gp(ctxt, 0);
2355 * Not recognized on AMD in compat mode (but is recognized in legacy
2358 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2359 && !vendor_intel(ctxt))
2360 return emulate_ud(ctxt);
2362 /* sysenter/sysexit have not been tested in 64bit mode. */
2363 if (ctxt->mode == X86EMUL_MODE_PROT64)
2364 return X86EMUL_UNHANDLEABLE;
2366 setup_syscalls_segments(ctxt, &cs, &ss);
2368 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2369 switch (ctxt->mode) {
2370 case X86EMUL_MODE_PROT32:
2371 if ((msr_data & 0xfffc) == 0x0)
2372 return emulate_gp(ctxt, 0);
2374 case X86EMUL_MODE_PROT64:
2375 if (msr_data == 0x0)
2376 return emulate_gp(ctxt, 0);
2382 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2383 cs_sel = (u16)msr_data;
2384 cs_sel &= ~SELECTOR_RPL_MASK;
2385 ss_sel = cs_sel + 8;
2386 ss_sel &= ~SELECTOR_RPL_MASK;
2387 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2392 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2393 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2395 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2396 ctxt->_eip = msr_data;
2398 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2399 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
2401 return X86EMUL_CONTINUE;
2404 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2406 const struct x86_emulate_ops *ops = ctxt->ops;
2407 struct desc_struct cs, ss;
2408 u64 msr_data, rcx, rdx;
2410 u16 cs_sel = 0, ss_sel = 0;
2412 /* inject #GP if in real mode or Virtual 8086 mode */
2413 if (ctxt->mode == X86EMUL_MODE_REAL ||
2414 ctxt->mode == X86EMUL_MODE_VM86)
2415 return emulate_gp(ctxt, 0);
2417 setup_syscalls_segments(ctxt, &cs, &ss);
2419 if ((ctxt->rex_prefix & 0x8) != 0x0)
2420 usermode = X86EMUL_MODE_PROT64;
2422 usermode = X86EMUL_MODE_PROT32;
2424 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2425 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2429 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2431 case X86EMUL_MODE_PROT32:
2432 cs_sel = (u16)(msr_data + 16);
2433 if ((msr_data & 0xfffc) == 0x0)
2434 return emulate_gp(ctxt, 0);
2435 ss_sel = (u16)(msr_data + 24);
2439 case X86EMUL_MODE_PROT64:
2440 cs_sel = (u16)(msr_data + 32);
2441 if (msr_data == 0x0)
2442 return emulate_gp(ctxt, 0);
2443 ss_sel = cs_sel + 8;
2446 if (is_noncanonical_address(rcx) ||
2447 is_noncanonical_address(rdx))
2448 return emulate_gp(ctxt, 0);
2451 cs_sel |= SELECTOR_RPL_MASK;
2452 ss_sel |= SELECTOR_RPL_MASK;
2454 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2455 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2458 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2460 return X86EMUL_CONTINUE;
2463 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2466 if (ctxt->mode == X86EMUL_MODE_REAL)
2468 if (ctxt->mode == X86EMUL_MODE_VM86)
2470 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2471 return ctxt->ops->cpl(ctxt) > iopl;
2474 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2477 const struct x86_emulate_ops *ops = ctxt->ops;
2478 struct desc_struct tr_seg;
2481 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2482 unsigned mask = (1 << len) - 1;
2485 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2488 if (desc_limit_scaled(&tr_seg) < 103)
2490 base = get_desc_base(&tr_seg);
2491 #ifdef CONFIG_X86_64
2492 base |= ((u64)base3) << 32;
2494 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2495 if (r != X86EMUL_CONTINUE)
2497 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2499 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2500 if (r != X86EMUL_CONTINUE)
2502 if ((perm >> bit_idx) & mask)
2507 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2513 if (emulator_bad_iopl(ctxt))
2514 if (!emulator_io_port_access_allowed(ctxt, port, len))
2517 ctxt->perm_ok = true;
2522 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2523 struct tss_segment_16 *tss)
2525 tss->ip = ctxt->_eip;
2526 tss->flag = ctxt->eflags;
2527 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2528 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2529 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2530 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2531 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2532 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2533 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2534 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2536 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2537 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2538 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2539 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2540 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2543 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2544 struct tss_segment_16 *tss)
2549 ctxt->_eip = tss->ip;
2550 ctxt->eflags = tss->flag | 2;
2551 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2552 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2553 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2554 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2555 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2556 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2557 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2558 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2561 * SDM says that segment selectors are loaded before segment
2564 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2565 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2566 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2567 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2568 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2573 * Now load segment descriptors. If fault happens at this stage
2574 * it is handled in a context of new task
2576 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2578 if (ret != X86EMUL_CONTINUE)
2580 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2582 if (ret != X86EMUL_CONTINUE)
2584 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2586 if (ret != X86EMUL_CONTINUE)
2588 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2590 if (ret != X86EMUL_CONTINUE)
2592 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2594 if (ret != X86EMUL_CONTINUE)
2597 return X86EMUL_CONTINUE;
2600 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2601 u16 tss_selector, u16 old_tss_sel,
2602 ulong old_tss_base, struct desc_struct *new_desc)
2604 const struct x86_emulate_ops *ops = ctxt->ops;
2605 struct tss_segment_16 tss_seg;
2607 u32 new_tss_base = get_desc_base(new_desc);
2609 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2611 if (ret != X86EMUL_CONTINUE)
2612 /* FIXME: need to provide precise fault address */
2615 save_state_to_tss16(ctxt, &tss_seg);
2617 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2619 if (ret != X86EMUL_CONTINUE)
2620 /* FIXME: need to provide precise fault address */
2623 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2625 if (ret != X86EMUL_CONTINUE)
2626 /* FIXME: need to provide precise fault address */
2629 if (old_tss_sel != 0xffff) {
2630 tss_seg.prev_task_link = old_tss_sel;
2632 ret = ops->write_std(ctxt, new_tss_base,
2633 &tss_seg.prev_task_link,
2634 sizeof tss_seg.prev_task_link,
2636 if (ret != X86EMUL_CONTINUE)
2637 /* FIXME: need to provide precise fault address */
2641 return load_state_from_tss16(ctxt, &tss_seg);
2644 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2645 struct tss_segment_32 *tss)
2647 /* CR3 and ldt selector are not saved intentionally */
2648 tss->eip = ctxt->_eip;
2649 tss->eflags = ctxt->eflags;
2650 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2651 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2652 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2653 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2654 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2655 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2656 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2657 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2659 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2660 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2661 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2662 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2663 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2664 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2667 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2668 struct tss_segment_32 *tss)
2673 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2674 return emulate_gp(ctxt, 0);
2675 ctxt->_eip = tss->eip;
2676 ctxt->eflags = tss->eflags | 2;
2678 /* General purpose registers */
2679 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2680 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2681 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2682 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2683 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2684 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2685 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2686 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2689 * SDM says that segment selectors are loaded before segment
2690 * descriptors. This is important because CPL checks will
2693 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2694 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2695 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2696 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2697 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2698 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2699 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2702 * If we're switching between Protected Mode and VM86, we need to make
2703 * sure to update the mode before loading the segment descriptors so
2704 * that the selectors are interpreted correctly.
2706 if (ctxt->eflags & X86_EFLAGS_VM) {
2707 ctxt->mode = X86EMUL_MODE_VM86;
2710 ctxt->mode = X86EMUL_MODE_PROT32;
2715 * Now load segment descriptors. If fault happenes at this stage
2716 * it is handled in a context of new task
2718 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2720 if (ret != X86EMUL_CONTINUE)
2722 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2724 if (ret != X86EMUL_CONTINUE)
2726 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2728 if (ret != X86EMUL_CONTINUE)
2730 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2732 if (ret != X86EMUL_CONTINUE)
2734 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2736 if (ret != X86EMUL_CONTINUE)
2738 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2740 if (ret != X86EMUL_CONTINUE)
2742 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2744 if (ret != X86EMUL_CONTINUE)
2747 return X86EMUL_CONTINUE;
2750 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2751 u16 tss_selector, u16 old_tss_sel,
2752 ulong old_tss_base, struct desc_struct *new_desc)
2754 const struct x86_emulate_ops *ops = ctxt->ops;
2755 struct tss_segment_32 tss_seg;
2757 u32 new_tss_base = get_desc_base(new_desc);
2758 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2759 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2761 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2763 if (ret != X86EMUL_CONTINUE)
2764 /* FIXME: need to provide precise fault address */
2767 save_state_to_tss32(ctxt, &tss_seg);
2769 /* Only GP registers and segment selectors are saved */
2770 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2771 ldt_sel_offset - eip_offset, &ctxt->exception);
2772 if (ret != X86EMUL_CONTINUE)
2773 /* FIXME: need to provide precise fault address */
2776 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2778 if (ret != X86EMUL_CONTINUE)
2779 /* FIXME: need to provide precise fault address */
2782 if (old_tss_sel != 0xffff) {
2783 tss_seg.prev_task_link = old_tss_sel;
2785 ret = ops->write_std(ctxt, new_tss_base,
2786 &tss_seg.prev_task_link,
2787 sizeof tss_seg.prev_task_link,
2789 if (ret != X86EMUL_CONTINUE)
2790 /* FIXME: need to provide precise fault address */
2794 return load_state_from_tss32(ctxt, &tss_seg);
2797 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2798 u16 tss_selector, int idt_index, int reason,
2799 bool has_error_code, u32 error_code)
2801 const struct x86_emulate_ops *ops = ctxt->ops;
2802 struct desc_struct curr_tss_desc, next_tss_desc;
2804 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2805 ulong old_tss_base =
2806 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2810 /* FIXME: old_tss_base == ~0 ? */
2812 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2813 if (ret != X86EMUL_CONTINUE)
2815 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2816 if (ret != X86EMUL_CONTINUE)
2819 /* FIXME: check that next_tss_desc is tss */
2822 * Check privileges. The three cases are task switch caused by...
2824 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2825 * 2. Exception/IRQ/iret: No check is performed
2826 * 3. jmp/call to TSS/task-gate: No check is performed since the
2827 * hardware checks it before exiting.
2829 if (reason == TASK_SWITCH_GATE) {
2830 if (idt_index != -1) {
2831 /* Software interrupts */
2832 struct desc_struct task_gate_desc;
2835 ret = read_interrupt_descriptor(ctxt, idt_index,
2837 if (ret != X86EMUL_CONTINUE)
2840 dpl = task_gate_desc.dpl;
2841 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2842 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2846 desc_limit = desc_limit_scaled(&next_tss_desc);
2847 if (!next_tss_desc.p ||
2848 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2849 desc_limit < 0x2b)) {
2850 return emulate_ts(ctxt, tss_selector & 0xfffc);
2853 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2854 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2855 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2858 if (reason == TASK_SWITCH_IRET)
2859 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2861 /* set back link to prev task only if NT bit is set in eflags
2862 note that old_tss_sel is not used after this point */
2863 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2864 old_tss_sel = 0xffff;
2866 if (next_tss_desc.type & 8)
2867 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2868 old_tss_base, &next_tss_desc);
2870 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2871 old_tss_base, &next_tss_desc);
2872 if (ret != X86EMUL_CONTINUE)
2875 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2876 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2878 if (reason != TASK_SWITCH_IRET) {
2879 next_tss_desc.type |= (1 << 1); /* set busy flag */
2880 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2883 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2884 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2886 if (has_error_code) {
2887 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2888 ctxt->lock_prefix = 0;
2889 ctxt->src.val = (unsigned long) error_code;
2890 ret = em_push(ctxt);
2896 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2897 u16 tss_selector, int idt_index, int reason,
2898 bool has_error_code, u32 error_code)
2902 invalidate_registers(ctxt);
2903 ctxt->_eip = ctxt->eip;
2904 ctxt->dst.type = OP_NONE;
2906 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2907 has_error_code, error_code);
2909 if (rc == X86EMUL_CONTINUE) {
2910 ctxt->eip = ctxt->_eip;
2911 writeback_registers(ctxt);
2914 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2917 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2920 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2922 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
2923 op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
2926 static int em_das(struct x86_emulate_ctxt *ctxt)
2929 bool af, cf, old_cf;
2931 cf = ctxt->eflags & X86_EFLAGS_CF;
2937 af = ctxt->eflags & X86_EFLAGS_AF;
2938 if ((al & 0x0f) > 9 || af) {
2940 cf = old_cf | (al >= 250);
2945 if (old_al > 0x99 || old_cf) {
2951 /* Set PF, ZF, SF */
2952 ctxt->src.type = OP_IMM;
2954 ctxt->src.bytes = 1;
2955 fastop(ctxt, em_or);
2956 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2958 ctxt->eflags |= X86_EFLAGS_CF;
2960 ctxt->eflags |= X86_EFLAGS_AF;
2961 return X86EMUL_CONTINUE;
2964 static int em_aam(struct x86_emulate_ctxt *ctxt)
2968 if (ctxt->src.val == 0)
2969 return emulate_de(ctxt);
2971 al = ctxt->dst.val & 0xff;
2972 ah = al / ctxt->src.val;
2973 al %= ctxt->src.val;
2975 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2977 /* Set PF, ZF, SF */
2978 ctxt->src.type = OP_IMM;
2980 ctxt->src.bytes = 1;
2981 fastop(ctxt, em_or);
2983 return X86EMUL_CONTINUE;
2986 static int em_aad(struct x86_emulate_ctxt *ctxt)
2988 u8 al = ctxt->dst.val & 0xff;
2989 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2991 al = (al + (ah * ctxt->src.val)) & 0xff;
2993 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2995 /* Set PF, ZF, SF */
2996 ctxt->src.type = OP_IMM;
2998 ctxt->src.bytes = 1;
2999 fastop(ctxt, em_or);
3001 return X86EMUL_CONTINUE;
3004 static int em_call(struct x86_emulate_ctxt *ctxt)
3007 long rel = ctxt->src.val;
3009 ctxt->src.val = (unsigned long)ctxt->_eip;
3010 rc = jmp_rel(ctxt, rel);
3011 if (rc != X86EMUL_CONTINUE)
3013 return em_push(ctxt);
3016 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3021 struct desc_struct old_desc, new_desc;
3022 const struct x86_emulate_ops *ops = ctxt->ops;
3023 int cpl = ctxt->ops->cpl(ctxt);
3025 old_eip = ctxt->_eip;
3026 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3028 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3029 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
3031 if (rc != X86EMUL_CONTINUE)
3032 return X86EMUL_CONTINUE;
3034 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
3035 if (rc != X86EMUL_CONTINUE)
3038 ctxt->src.val = old_cs;
3040 if (rc != X86EMUL_CONTINUE)
3043 ctxt->src.val = old_eip;
3045 /* If we failed, we tainted the memory, but the very least we should
3047 if (rc != X86EMUL_CONTINUE)
3051 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3056 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3061 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3062 if (rc != X86EMUL_CONTINUE)
3064 rc = assign_eip_near(ctxt, eip);
3065 if (rc != X86EMUL_CONTINUE)
3067 rsp_increment(ctxt, ctxt->src.val);
3068 return X86EMUL_CONTINUE;
3071 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3073 /* Write back the register source. */
3074 ctxt->src.val = ctxt->dst.val;
3075 write_register_operand(&ctxt->src);
3077 /* Write back the memory destination with implicit LOCK prefix. */
3078 ctxt->dst.val = ctxt->src.orig_val;
3079 ctxt->lock_prefix = 1;
3080 return X86EMUL_CONTINUE;
3083 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3085 ctxt->dst.val = ctxt->src2.val;
3086 return fastop(ctxt, em_imul);
3089 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3091 ctxt->dst.type = OP_REG;
3092 ctxt->dst.bytes = ctxt->src.bytes;
3093 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3094 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3096 return X86EMUL_CONTINUE;
3099 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3103 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3104 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3105 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3106 return X86EMUL_CONTINUE;
3109 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3113 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3114 return emulate_gp(ctxt, 0);
3115 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3116 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3117 return X86EMUL_CONTINUE;
3120 static int em_mov(struct x86_emulate_ctxt *ctxt)
3122 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3123 return X86EMUL_CONTINUE;
3126 #define FFL(x) bit(X86_FEATURE_##x)
3128 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3130 u32 ebx, ecx, edx, eax = 1;
3134 * Check MOVBE is set in the guest-visible CPUID leaf.
3136 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3137 if (!(ecx & FFL(MOVBE)))
3138 return emulate_ud(ctxt);
3140 switch (ctxt->op_bytes) {
3143 * From MOVBE definition: "...When the operand size is 16 bits,
3144 * the upper word of the destination register remains unchanged
3147 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3148 * rules so we have to do the operation almost per hand.
3150 tmp = (u16)ctxt->src.val;
3151 ctxt->dst.val &= ~0xffffUL;
3152 ctxt->dst.val |= (unsigned long)swab16(tmp);
3155 ctxt->dst.val = swab32((u32)ctxt->src.val);
3158 ctxt->dst.val = swab64(ctxt->src.val);
3163 return X86EMUL_CONTINUE;
3166 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3168 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3169 return emulate_gp(ctxt, 0);
3171 /* Disable writeback. */
3172 ctxt->dst.type = OP_NONE;
3173 return X86EMUL_CONTINUE;
3176 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3180 if (ctxt->mode == X86EMUL_MODE_PROT64)
3181 val = ctxt->src.val & ~0ULL;
3183 val = ctxt->src.val & ~0U;
3185 /* #UD condition is already handled. */
3186 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3187 return emulate_gp(ctxt, 0);
3189 /* Disable writeback. */
3190 ctxt->dst.type = OP_NONE;
3191 return X86EMUL_CONTINUE;
3194 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3198 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3199 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3200 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3201 return emulate_gp(ctxt, 0);
3203 return X86EMUL_CONTINUE;
3206 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3210 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3211 return emulate_gp(ctxt, 0);
3213 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3214 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3215 return X86EMUL_CONTINUE;
3218 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3220 if (ctxt->modrm_reg > VCPU_SREG_GS)
3221 return emulate_ud(ctxt);
3223 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3224 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3225 ctxt->dst.bytes = 2;
3226 return X86EMUL_CONTINUE;
3229 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3231 u16 sel = ctxt->src.val;
3233 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3234 return emulate_ud(ctxt);
3236 if (ctxt->modrm_reg == VCPU_SREG_SS)
3237 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3239 /* Disable writeback. */
3240 ctxt->dst.type = OP_NONE;
3241 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3244 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3246 u16 sel = ctxt->src.val;
3248 /* Disable writeback. */
3249 ctxt->dst.type = OP_NONE;
3250 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3253 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3255 u16 sel = ctxt->src.val;
3257 /* Disable writeback. */
3258 ctxt->dst.type = OP_NONE;
3259 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3262 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3267 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3268 if (rc == X86EMUL_CONTINUE)
3269 ctxt->ops->invlpg(ctxt, linear);
3270 /* Disable writeback. */
3271 ctxt->dst.type = OP_NONE;
3272 return X86EMUL_CONTINUE;
3275 static int em_clts(struct x86_emulate_ctxt *ctxt)
3279 cr0 = ctxt->ops->get_cr(ctxt, 0);
3281 ctxt->ops->set_cr(ctxt, 0, cr0);
3282 return X86EMUL_CONTINUE;
3285 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3287 int rc = ctxt->ops->fix_hypercall(ctxt);
3289 if (rc != X86EMUL_CONTINUE)
3292 /* Let the processor re-execute the fixed hypercall */
3293 ctxt->_eip = ctxt->eip;
3294 /* Disable writeback. */
3295 ctxt->dst.type = OP_NONE;
3296 return X86EMUL_CONTINUE;
3299 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3300 void (*get)(struct x86_emulate_ctxt *ctxt,
3301 struct desc_ptr *ptr))
3303 struct desc_ptr desc_ptr;
3305 if (ctxt->mode == X86EMUL_MODE_PROT64)
3307 get(ctxt, &desc_ptr);
3308 if (ctxt->op_bytes == 2) {
3310 desc_ptr.address &= 0x00ffffff;
3312 /* Disable writeback. */
3313 ctxt->dst.type = OP_NONE;
3314 return segmented_write(ctxt, ctxt->dst.addr.mem,
3315 &desc_ptr, 2 + ctxt->op_bytes);
3318 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3320 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3323 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3325 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3328 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3330 struct desc_ptr desc_ptr;
3333 if (ctxt->mode == X86EMUL_MODE_PROT64)
3335 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3336 &desc_ptr.size, &desc_ptr.address,
3338 if (rc != X86EMUL_CONTINUE)
3340 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3341 is_noncanonical_address(desc_ptr.address))
3342 return emulate_gp(ctxt, 0);
3344 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3346 ctxt->ops->set_idt(ctxt, &desc_ptr);
3347 /* Disable writeback. */
3348 ctxt->dst.type = OP_NONE;
3349 return X86EMUL_CONTINUE;
3352 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3354 return em_lgdt_lidt(ctxt, true);
3357 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3361 rc = ctxt->ops->fix_hypercall(ctxt);
3363 /* Disable writeback. */
3364 ctxt->dst.type = OP_NONE;
3368 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3370 return em_lgdt_lidt(ctxt, false);
3373 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3375 if (ctxt->dst.type == OP_MEM)
3376 ctxt->dst.bytes = 2;
3377 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3378 return X86EMUL_CONTINUE;
3381 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3383 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3384 | (ctxt->src.val & 0x0f));
3385 ctxt->dst.type = OP_NONE;
3386 return X86EMUL_CONTINUE;
3389 static int em_loop(struct x86_emulate_ctxt *ctxt)
3391 int rc = X86EMUL_CONTINUE;
3393 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
3394 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3395 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3396 rc = jmp_rel(ctxt, ctxt->src.val);
3401 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3403 int rc = X86EMUL_CONTINUE;
3405 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3406 rc = jmp_rel(ctxt, ctxt->src.val);
3411 static int em_in(struct x86_emulate_ctxt *ctxt)
3413 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3415 return X86EMUL_IO_NEEDED;
3417 return X86EMUL_CONTINUE;
3420 static int em_out(struct x86_emulate_ctxt *ctxt)
3422 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3424 /* Disable writeback. */
3425 ctxt->dst.type = OP_NONE;
3426 return X86EMUL_CONTINUE;
3429 static int em_cli(struct x86_emulate_ctxt *ctxt)
3431 if (emulator_bad_iopl(ctxt))
3432 return emulate_gp(ctxt, 0);
3434 ctxt->eflags &= ~X86_EFLAGS_IF;
3435 return X86EMUL_CONTINUE;
3438 static int em_sti(struct x86_emulate_ctxt *ctxt)
3440 if (emulator_bad_iopl(ctxt))
3441 return emulate_gp(ctxt, 0);
3443 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3444 ctxt->eflags |= X86_EFLAGS_IF;
3445 return X86EMUL_CONTINUE;
3448 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3450 u32 eax, ebx, ecx, edx;
3452 eax = reg_read(ctxt, VCPU_REGS_RAX);
3453 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3454 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3455 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3456 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3457 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3458 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3459 return X86EMUL_CONTINUE;
3462 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3466 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3467 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3469 ctxt->eflags &= ~0xffUL;
3470 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3471 return X86EMUL_CONTINUE;
3474 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3476 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3477 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3478 return X86EMUL_CONTINUE;
3481 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3483 switch (ctxt->op_bytes) {
3484 #ifdef CONFIG_X86_64
3486 asm("bswap %0" : "+r"(ctxt->dst.val));
3490 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3493 return X86EMUL_CONTINUE;
3496 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3498 /* emulating clflush regardless of cpuid */
3499 return X86EMUL_CONTINUE;
3502 static bool valid_cr(int nr)
3514 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3516 if (!valid_cr(ctxt->modrm_reg))
3517 return emulate_ud(ctxt);
3519 return X86EMUL_CONTINUE;
3522 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3524 u64 new_val = ctxt->src.val64;
3525 int cr = ctxt->modrm_reg;
3528 static u64 cr_reserved_bits[] = {
3529 0xffffffff00000000ULL,
3530 0, 0, 0, /* CR3 checked later */
3537 return emulate_ud(ctxt);
3539 if (new_val & cr_reserved_bits[cr])
3540 return emulate_gp(ctxt, 0);
3545 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3546 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3547 return emulate_gp(ctxt, 0);
3549 cr4 = ctxt->ops->get_cr(ctxt, 4);
3550 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3552 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3553 !(cr4 & X86_CR4_PAE))
3554 return emulate_gp(ctxt, 0);
3561 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3562 if (efer & EFER_LMA)
3563 rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
3566 return emulate_gp(ctxt, 0);
3571 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3573 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3574 return emulate_gp(ctxt, 0);
3580 return X86EMUL_CONTINUE;
3583 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3587 ctxt->ops->get_dr(ctxt, 7, &dr7);
3589 /* Check if DR7.Global_Enable is set */
3590 return dr7 & (1 << 13);
3593 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3595 int dr = ctxt->modrm_reg;
3599 return emulate_ud(ctxt);
3601 cr4 = ctxt->ops->get_cr(ctxt, 4);
3602 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3603 return emulate_ud(ctxt);
3605 if (check_dr7_gd(ctxt)) {
3608 ctxt->ops->get_dr(ctxt, 6, &dr6);
3610 dr6 |= DR6_BD | DR6_RTM;
3611 ctxt->ops->set_dr(ctxt, 6, dr6);
3612 return emulate_db(ctxt);
3615 return X86EMUL_CONTINUE;
3618 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3620 u64 new_val = ctxt->src.val64;
3621 int dr = ctxt->modrm_reg;
3623 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3624 return emulate_gp(ctxt, 0);
3626 return check_dr_read(ctxt);
3629 static int check_svme(struct x86_emulate_ctxt *ctxt)
3633 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3635 if (!(efer & EFER_SVME))
3636 return emulate_ud(ctxt);
3638 return X86EMUL_CONTINUE;
3641 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3643 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3645 /* Valid physical address? */
3646 if (rax & 0xffff000000000000ULL)
3647 return emulate_gp(ctxt, 0);
3649 return check_svme(ctxt);
3652 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3654 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3656 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3657 return emulate_ud(ctxt);
3659 return X86EMUL_CONTINUE;
3662 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3664 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3665 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3667 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3668 ctxt->ops->check_pmc(ctxt, rcx))
3669 return emulate_gp(ctxt, 0);
3671 return X86EMUL_CONTINUE;
3674 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3676 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3677 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3678 return emulate_gp(ctxt, 0);
3680 return X86EMUL_CONTINUE;
3683 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3685 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3686 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3687 return emulate_gp(ctxt, 0);
3689 return X86EMUL_CONTINUE;
3692 #define D(_y) { .flags = (_y) }
3693 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3694 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3695 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3696 #define N D(NotImpl)
3697 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3698 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3699 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3700 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3701 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3702 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3703 #define II(_f, _e, _i) \
3704 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3705 #define IIP(_f, _e, _i, _p) \
3706 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3707 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3708 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3710 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3711 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3712 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3713 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
3714 #define I2bvIP(_f, _e, _i, _p) \
3715 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3717 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3718 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3719 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3721 static const struct opcode group7_rm0[] = {
3723 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3727 static const struct opcode group7_rm1[] = {
3728 DI(SrcNone | Priv, monitor),
3729 DI(SrcNone | Priv, mwait),
3733 static const struct opcode group7_rm3[] = {
3734 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
3735 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
3736 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3737 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3738 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3739 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3740 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3741 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
3744 static const struct opcode group7_rm7[] = {
3746 DIP(SrcNone, rdtscp, check_rdtsc),
3750 static const struct opcode group1[] = {
3752 F(Lock | PageTable, em_or),
3755 F(Lock | PageTable, em_and),
3761 static const struct opcode group1A[] = {
3762 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3765 static const struct opcode group2[] = {
3766 F(DstMem | ModRM, em_rol),
3767 F(DstMem | ModRM, em_ror),
3768 F(DstMem | ModRM, em_rcl),
3769 F(DstMem | ModRM, em_rcr),
3770 F(DstMem | ModRM, em_shl),
3771 F(DstMem | ModRM, em_shr),
3772 F(DstMem | ModRM, em_shl),
3773 F(DstMem | ModRM, em_sar),
3776 static const struct opcode group3[] = {
3777 F(DstMem | SrcImm | NoWrite, em_test),
3778 F(DstMem | SrcImm | NoWrite, em_test),
3779 F(DstMem | SrcNone | Lock, em_not),
3780 F(DstMem | SrcNone | Lock, em_neg),
3781 F(DstXacc | Src2Mem, em_mul_ex),
3782 F(DstXacc | Src2Mem, em_imul_ex),
3783 F(DstXacc | Src2Mem, em_div_ex),
3784 F(DstXacc | Src2Mem, em_idiv_ex),
3787 static const struct opcode group4[] = {
3788 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3789 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3793 static const struct opcode group5[] = {
3794 F(DstMem | SrcNone | Lock, em_inc),
3795 F(DstMem | SrcNone | Lock, em_dec),
3796 I(SrcMem | NearBranch, em_call_near_abs),
3797 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
3798 I(SrcMem | NearBranch, em_jmp_abs),
3799 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
3800 I(SrcMem | Stack, em_push), D(Undefined),
3803 static const struct opcode group6[] = {
3806 II(Prot | Priv | SrcMem16, em_lldt, lldt),
3807 II(Prot | Priv | SrcMem16, em_ltr, ltr),
3811 static const struct group_dual group7 = { {
3812 II(Mov | DstMem, em_sgdt, sgdt),
3813 II(Mov | DstMem, em_sidt, sidt),
3814 II(SrcMem | Priv, em_lgdt, lgdt),
3815 II(SrcMem | Priv, em_lidt, lidt),
3816 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3817 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3818 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3822 N, EXT(0, group7_rm3),
3823 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3824 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3828 static const struct opcode group8[] = {
3830 F(DstMem | SrcImmByte | NoWrite, em_bt),
3831 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3832 F(DstMem | SrcImmByte | Lock, em_btr),
3833 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
3836 static const struct group_dual group9 = { {
3837 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3839 N, N, N, N, N, N, N, N,
3842 static const struct opcode group11[] = {
3843 I(DstMem | SrcImm | Mov | PageTable, em_mov),
3847 static const struct gprefix pfx_0f_ae_7 = {
3848 I(SrcMem | ByteOp, em_clflush), N, N, N,
3851 static const struct group_dual group15 = { {
3852 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3854 N, N, N, N, N, N, N, N,
3857 static const struct gprefix pfx_0f_6f_0f_7f = {
3858 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3861 static const struct gprefix pfx_0f_2b = {
3862 I(0, em_mov), I(0, em_mov), N, N,
3865 static const struct gprefix pfx_0f_28_0f_29 = {
3866 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
3869 static const struct gprefix pfx_0f_e7 = {
3870 N, I(Sse, em_mov), N, N,
3873 static const struct escape escape_d9 = { {
3874 N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
3877 N, N, N, N, N, N, N, N,
3879 N, N, N, N, N, N, N, N,
3881 N, N, N, N, N, N, N, N,
3883 N, N, N, N, N, N, N, N,
3885 N, N, N, N, N, N, N, N,
3887 N, N, N, N, N, N, N, N,
3889 N, N, N, N, N, N, N, N,
3891 N, N, N, N, N, N, N, N,
3894 static const struct escape escape_db = { {
3895 N, N, N, N, N, N, N, N,
3898 N, N, N, N, N, N, N, N,
3900 N, N, N, N, N, N, N, N,
3902 N, N, N, N, N, N, N, N,
3904 N, N, N, N, N, N, N, N,
3906 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3908 N, N, N, N, N, N, N, N,
3910 N, N, N, N, N, N, N, N,
3912 N, N, N, N, N, N, N, N,
3915 static const struct escape escape_dd = { {
3916 N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
3919 N, N, N, N, N, N, N, N,
3921 N, N, N, N, N, N, N, N,
3923 N, N, N, N, N, N, N, N,
3925 N, N, N, N, N, N, N, N,
3927 N, N, N, N, N, N, N, N,
3929 N, N, N, N, N, N, N, N,
3931 N, N, N, N, N, N, N, N,
3933 N, N, N, N, N, N, N, N,
3936 static const struct opcode opcode_table[256] = {
3938 F6ALU(Lock, em_add),
3939 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3940 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3942 F6ALU(Lock | PageTable, em_or),
3943 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3946 F6ALU(Lock, em_adc),
3947 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3948 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3950 F6ALU(Lock, em_sbb),
3951 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3952 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3954 F6ALU(Lock | PageTable, em_and), N, N,
3956 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3958 F6ALU(Lock, em_xor), N, N,
3960 F6ALU(NoWrite, em_cmp), N, N,
3962 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
3964 X8(I(SrcReg | Stack, em_push)),
3966 X8(I(DstReg | Stack, em_pop)),
3968 I(ImplicitOps | Stack | No64, em_pusha),
3969 I(ImplicitOps | Stack | No64, em_popa),
3970 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3973 I(SrcImm | Mov | Stack, em_push),
3974 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3975 I(SrcImmByte | Mov | Stack, em_push),
3976 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3977 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
3978 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3980 X16(D(SrcImmByte | NearBranch)),
3982 G(ByteOp | DstMem | SrcImm, group1),
3983 G(DstMem | SrcImm, group1),
3984 G(ByteOp | DstMem | SrcImm | No64, group1),
3985 G(DstMem | SrcImmByte, group1),
3986 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
3987 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3989 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3990 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3991 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3992 D(ModRM | SrcMem | NoAccess | DstReg),
3993 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3996 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3998 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3999 I(SrcImmFAddr | No64, em_call_far), N,
4000 II(ImplicitOps | Stack, em_pushf, pushf),
4001 II(ImplicitOps | Stack, em_popf, popf),
4002 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4004 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4005 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4006 I2bv(SrcSI | DstDI | Mov | String, em_mov),
4007 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
4009 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4010 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4011 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4012 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4014 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4016 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4018 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4019 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4020 I(ImplicitOps | NearBranch, em_ret),
4021 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4022 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4023 G(ByteOp, group11), G(0, group11),
4025 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4026 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
4027 I(ImplicitOps | Stack, em_ret_far),
4028 D(ImplicitOps), DI(SrcImmByte, intn),
4029 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4031 G(Src2One | ByteOp, group2), G(Src2One, group2),
4032 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4033 I(DstAcc | SrcImmUByte | No64, em_aam),
4034 I(DstAcc | SrcImmUByte | No64, em_aad),
4035 F(DstAcc | ByteOp | No64, em_salc),
4036 I(DstAcc | SrcXLat | ByteOp, em_mov),
4038 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4040 X3(I(SrcImmByte | NearBranch, em_loop)),
4041 I(SrcImmByte | NearBranch, em_jcxz),
4042 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4043 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4045 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4046 I(SrcImmFAddr | No64, em_jmp_far),
4047 D(SrcImmByte | ImplicitOps | NearBranch),
4048 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4049 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4051 N, DI(ImplicitOps, icebp), N, N,
4052 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4053 G(ByteOp, group3), G(0, group3),
4055 D(ImplicitOps), D(ImplicitOps),
4056 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4057 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4060 static const struct opcode twobyte_table[256] = {
4062 G(0, group6), GD(0, &group7), N, N,
4063 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4064 II(ImplicitOps | Priv, em_clts, clts), N,
4065 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4066 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4068 N, N, N, N, N, N, N, N,
4069 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4070 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4072 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4073 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4074 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4076 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4079 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4080 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4081 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4084 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4085 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4086 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4087 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4088 I(ImplicitOps | EmulateOnUD, em_sysenter),
4089 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4091 N, N, N, N, N, N, N, N,
4093 X16(D(DstReg | SrcMem | ModRM)),
4095 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4100 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4105 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4107 X16(D(SrcImm | NearBranch)),
4109 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4111 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4112 II(ImplicitOps, em_cpuid, cpuid),
4113 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4114 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4115 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4117 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4118 DI(ImplicitOps, rsm),
4119 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4120 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4121 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4122 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4124 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
4125 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4126 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4127 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4128 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4129 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4133 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4134 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
4135 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4137 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4138 N, I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov),
4139 N, N, N, GD(0, &group9),
4141 X8(I(DstReg, em_bswap)),
4143 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4145 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4146 N, N, N, N, N, N, N, N,
4148 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4151 static const struct gprefix three_byte_0f_38_f0 = {
4152 I(DstReg | SrcMem | Mov, em_movbe), N, N, N
4155 static const struct gprefix three_byte_0f_38_f1 = {
4156 I(DstMem | SrcReg | Mov, em_movbe), N, N, N
4160 * Insns below are selected by the prefix which indexed by the third opcode
4163 static const struct opcode opcode_map_0f_38[256] = {
4165 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4167 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4169 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
4170 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
4189 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4193 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4199 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4200 unsigned size, bool sign_extension)
4202 int rc = X86EMUL_CONTINUE;
4206 op->addr.mem.ea = ctxt->_eip;
4207 /* NB. Immediates are sign-extended as necessary. */
4208 switch (op->bytes) {
4210 op->val = insn_fetch(s8, ctxt);
4213 op->val = insn_fetch(s16, ctxt);
4216 op->val = insn_fetch(s32, ctxt);
4219 op->val = insn_fetch(s64, ctxt);
4222 if (!sign_extension) {
4223 switch (op->bytes) {
4231 op->val &= 0xffffffff;
4239 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4242 int rc = X86EMUL_CONTINUE;
4246 decode_register_operand(ctxt, op);
4249 rc = decode_imm(ctxt, op, 1, false);
4252 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4256 if (ctxt->d & BitOp)
4257 fetch_bit_operand(ctxt);
4258 op->orig_val = op->val;
4261 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4265 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4266 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4267 fetch_register_operand(op);
4268 op->orig_val = op->val;
4272 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4273 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4274 fetch_register_operand(op);
4275 op->orig_val = op->val;
4278 if (ctxt->d & ByteOp) {
4283 op->bytes = ctxt->op_bytes;
4284 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4285 fetch_register_operand(op);
4286 op->orig_val = op->val;
4290 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4292 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
4293 op->addr.mem.seg = VCPU_SREG_ES;
4300 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4301 fetch_register_operand(op);
4305 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4308 rc = decode_imm(ctxt, op, 1, true);
4315 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4318 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4321 ctxt->memop.bytes = 1;
4322 if (ctxt->memop.type == OP_REG) {
4323 ctxt->memop.addr.reg = decode_register(ctxt,
4324 ctxt->modrm_rm, true);
4325 fetch_register_operand(&ctxt->memop);
4329 ctxt->memop.bytes = 2;
4332 ctxt->memop.bytes = 4;
4335 rc = decode_imm(ctxt, op, 2, false);
4338 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4342 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4344 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
4345 op->addr.mem.seg = ctxt->seg_override;
4351 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4353 register_address(ctxt,
4354 reg_read(ctxt, VCPU_REGS_RBX) +
4355 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4356 op->addr.mem.seg = ctxt->seg_override;
4361 op->addr.mem.ea = ctxt->_eip;
4362 op->bytes = ctxt->op_bytes + 2;
4363 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4366 ctxt->memop.bytes = ctxt->op_bytes + 2;
4369 op->val = VCPU_SREG_ES;
4372 op->val = VCPU_SREG_CS;
4375 op->val = VCPU_SREG_SS;
4378 op->val = VCPU_SREG_DS;
4381 op->val = VCPU_SREG_FS;
4384 op->val = VCPU_SREG_GS;
4387 /* Special instructions do their own operand decoding. */
4389 op->type = OP_NONE; /* Disable writeback. */
4397 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4399 int rc = X86EMUL_CONTINUE;
4400 int mode = ctxt->mode;
4401 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4402 bool op_prefix = false;
4403 bool has_seg_override = false;
4404 struct opcode opcode;
4406 ctxt->memop.type = OP_NONE;
4407 ctxt->memopp = NULL;
4408 ctxt->_eip = ctxt->eip;
4409 ctxt->fetch.ptr = ctxt->fetch.data;
4410 ctxt->fetch.end = ctxt->fetch.data + insn_len;
4411 ctxt->opcode_len = 1;
4413 memcpy(ctxt->fetch.data, insn, insn_len);
4415 rc = __do_insn_fetch_bytes(ctxt, 1);
4416 if (rc != X86EMUL_CONTINUE)
4421 case X86EMUL_MODE_REAL:
4422 case X86EMUL_MODE_VM86:
4423 case X86EMUL_MODE_PROT16:
4424 def_op_bytes = def_ad_bytes = 2;
4426 case X86EMUL_MODE_PROT32:
4427 def_op_bytes = def_ad_bytes = 4;
4429 #ifdef CONFIG_X86_64
4430 case X86EMUL_MODE_PROT64:
4436 return EMULATION_FAILED;
4439 ctxt->op_bytes = def_op_bytes;
4440 ctxt->ad_bytes = def_ad_bytes;
4442 /* Legacy prefixes. */
4444 switch (ctxt->b = insn_fetch(u8, ctxt)) {
4445 case 0x66: /* operand-size override */
4447 /* switch between 2/4 bytes */
4448 ctxt->op_bytes = def_op_bytes ^ 6;
4450 case 0x67: /* address-size override */
4451 if (mode == X86EMUL_MODE_PROT64)
4452 /* switch between 4/8 bytes */
4453 ctxt->ad_bytes = def_ad_bytes ^ 12;
4455 /* switch between 2/4 bytes */
4456 ctxt->ad_bytes = def_ad_bytes ^ 6;
4458 case 0x26: /* ES override */
4459 case 0x2e: /* CS override */
4460 case 0x36: /* SS override */
4461 case 0x3e: /* DS override */
4462 has_seg_override = true;
4463 ctxt->seg_override = (ctxt->b >> 3) & 3;
4465 case 0x64: /* FS override */
4466 case 0x65: /* GS override */
4467 has_seg_override = true;
4468 ctxt->seg_override = ctxt->b & 7;
4470 case 0x40 ... 0x4f: /* REX */
4471 if (mode != X86EMUL_MODE_PROT64)
4473 ctxt->rex_prefix = ctxt->b;
4475 case 0xf0: /* LOCK */
4476 ctxt->lock_prefix = 1;
4478 case 0xf2: /* REPNE/REPNZ */
4479 case 0xf3: /* REP/REPE/REPZ */
4480 ctxt->rep_prefix = ctxt->b;
4486 /* Any legacy prefix after a REX prefix nullifies its effect. */
4488 ctxt->rex_prefix = 0;
4494 if (ctxt->rex_prefix & 8)
4495 ctxt->op_bytes = 8; /* REX.W */
4497 /* Opcode byte(s). */
4498 opcode = opcode_table[ctxt->b];
4499 /* Two-byte opcode? */
4500 if (ctxt->b == 0x0f) {
4501 ctxt->opcode_len = 2;
4502 ctxt->b = insn_fetch(u8, ctxt);
4503 opcode = twobyte_table[ctxt->b];
4505 /* 0F_38 opcode map */
4506 if (ctxt->b == 0x38) {
4507 ctxt->opcode_len = 3;
4508 ctxt->b = insn_fetch(u8, ctxt);
4509 opcode = opcode_map_0f_38[ctxt->b];
4512 ctxt->d = opcode.flags;
4514 if (ctxt->d & ModRM)
4515 ctxt->modrm = insn_fetch(u8, ctxt);
4517 /* vex-prefix instructions are not implemented */
4518 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4519 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
4523 while (ctxt->d & GroupMask) {
4524 switch (ctxt->d & GroupMask) {
4526 goffset = (ctxt->modrm >> 3) & 7;
4527 opcode = opcode.u.group[goffset];
4530 goffset = (ctxt->modrm >> 3) & 7;
4531 if ((ctxt->modrm >> 6) == 3)
4532 opcode = opcode.u.gdual->mod3[goffset];
4534 opcode = opcode.u.gdual->mod012[goffset];
4537 goffset = ctxt->modrm & 7;
4538 opcode = opcode.u.group[goffset];
4541 if (ctxt->rep_prefix && op_prefix)
4542 return EMULATION_FAILED;
4543 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4544 switch (simd_prefix) {
4545 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4546 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4547 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4548 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4552 if (ctxt->modrm > 0xbf)
4553 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4555 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4558 return EMULATION_FAILED;
4561 ctxt->d &= ~(u64)GroupMask;
4562 ctxt->d |= opcode.flags;
4567 return EMULATION_FAILED;
4569 ctxt->execute = opcode.u.execute;
4571 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4572 return EMULATION_FAILED;
4574 if (unlikely(ctxt->d &
4575 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
4578 * These are copied unconditionally here, and checked unconditionally
4579 * in x86_emulate_insn.
4581 ctxt->check_perm = opcode.check_perm;
4582 ctxt->intercept = opcode.intercept;
4584 if (ctxt->d & NotImpl)
4585 return EMULATION_FAILED;
4587 if (mode == X86EMUL_MODE_PROT64) {
4588 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4590 else if (ctxt->d & NearBranch)
4594 if (ctxt->d & Op3264) {
4595 if (mode == X86EMUL_MODE_PROT64)
4601 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
4605 ctxt->op_bytes = 16;
4606 else if (ctxt->d & Mmx)
4610 /* ModRM and SIB bytes. */
4611 if (ctxt->d & ModRM) {
4612 rc = decode_modrm(ctxt, &ctxt->memop);
4613 if (!has_seg_override) {
4614 has_seg_override = true;
4615 ctxt->seg_override = ctxt->modrm_seg;
4617 } else if (ctxt->d & MemAbs)
4618 rc = decode_abs(ctxt, &ctxt->memop);
4619 if (rc != X86EMUL_CONTINUE)
4622 if (!has_seg_override)
4623 ctxt->seg_override = VCPU_SREG_DS;
4625 ctxt->memop.addr.mem.seg = ctxt->seg_override;
4628 * Decode and fetch the source operand: register, memory
4631 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4632 if (rc != X86EMUL_CONTINUE)
4636 * Decode and fetch the second source operand: register, memory
4639 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4640 if (rc != X86EMUL_CONTINUE)
4643 /* Decode and fetch the destination operand: register or memory. */
4644 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4646 if (ctxt->rip_relative)
4647 ctxt->memopp->addr.mem.ea += ctxt->_eip;
4650 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4653 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4655 return ctxt->d & PageTable;
4658 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4660 /* The second termination condition only applies for REPE
4661 * and REPNE. Test if the repeat string operation prefix is
4662 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4663 * corresponding termination condition according to:
4664 * - if REPE/REPZ and ZF = 0 then done
4665 * - if REPNE/REPNZ and ZF = 1 then done
4667 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4668 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4669 && (((ctxt->rep_prefix == REPE_PREFIX) &&
4670 ((ctxt->eflags & EFLG_ZF) == 0))
4671 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
4672 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4678 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4682 ctxt->ops->get_fpu(ctxt);
4683 asm volatile("1: fwait \n\t"
4685 ".pushsection .fixup,\"ax\" \n\t"
4687 "movb $1, %[fault] \n\t"
4690 _ASM_EXTABLE(1b, 3b)
4691 : [fault]"+qm"(fault));
4692 ctxt->ops->put_fpu(ctxt);
4694 if (unlikely(fault))
4695 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4697 return X86EMUL_CONTINUE;
4700 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4703 if (op->type == OP_MM)
4704 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4707 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4709 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4710 if (!(ctxt->d & ByteOp))
4711 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4712 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4713 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4715 : "c"(ctxt->src2.val));
4716 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4717 if (!fop) /* exception is returned in fop variable */
4718 return emulate_de(ctxt);
4719 return X86EMUL_CONTINUE;
4722 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4724 memset(&ctxt->rip_relative, 0,
4725 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
4727 ctxt->io_read.pos = 0;
4728 ctxt->io_read.end = 0;
4729 ctxt->mem_read.end = 0;
4732 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4734 const struct x86_emulate_ops *ops = ctxt->ops;
4735 int rc = X86EMUL_CONTINUE;
4736 int saved_dst_type = ctxt->dst.type;
4738 ctxt->mem_read.pos = 0;
4740 /* LOCK prefix is allowed only with some instructions */
4741 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4742 rc = emulate_ud(ctxt);
4746 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4747 rc = emulate_ud(ctxt);
4751 if (unlikely(ctxt->d &
4752 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4753 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4754 (ctxt->d & Undefined)) {
4755 rc = emulate_ud(ctxt);
4759 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4760 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4761 rc = emulate_ud(ctxt);
4765 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4766 rc = emulate_nm(ctxt);
4770 if (ctxt->d & Mmx) {
4771 rc = flush_pending_x87_faults(ctxt);
4772 if (rc != X86EMUL_CONTINUE)
4775 * Now that we know the fpu is exception safe, we can fetch
4778 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4779 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4780 if (!(ctxt->d & Mov))
4781 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4784 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4785 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4786 X86_ICPT_PRE_EXCEPT);
4787 if (rc != X86EMUL_CONTINUE)
4791 /* Privileged instruction can be executed only in CPL=0 */
4792 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4793 if (ctxt->d & PrivUD)
4794 rc = emulate_ud(ctxt);
4796 rc = emulate_gp(ctxt, 0);
4800 /* Instruction can only be executed in protected mode */
4801 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4802 rc = emulate_ud(ctxt);
4806 /* Do instruction specific permission checks */
4807 if (ctxt->d & CheckPerm) {
4808 rc = ctxt->check_perm(ctxt);
4809 if (rc != X86EMUL_CONTINUE)
4813 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4814 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4815 X86_ICPT_POST_EXCEPT);
4816 if (rc != X86EMUL_CONTINUE)
4820 if (ctxt->rep_prefix && (ctxt->d & String)) {
4821 /* All REP prefixes have the same first termination condition */
4822 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4823 ctxt->eip = ctxt->_eip;
4824 ctxt->eflags &= ~EFLG_RF;
4830 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4831 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4832 ctxt->src.valptr, ctxt->src.bytes);
4833 if (rc != X86EMUL_CONTINUE)
4835 ctxt->src.orig_val64 = ctxt->src.val64;
4838 if (ctxt->src2.type == OP_MEM) {
4839 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4840 &ctxt->src2.val, ctxt->src2.bytes);
4841 if (rc != X86EMUL_CONTINUE)
4845 if ((ctxt->d & DstMask) == ImplicitOps)
4849 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4850 /* optimisation - avoid slow emulated read if Mov */
4851 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4852 &ctxt->dst.val, ctxt->dst.bytes);
4853 if (rc != X86EMUL_CONTINUE)
4856 ctxt->dst.orig_val = ctxt->dst.val;
4860 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4861 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4862 X86_ICPT_POST_MEMACCESS);
4863 if (rc != X86EMUL_CONTINUE)
4867 if (ctxt->rep_prefix && (ctxt->d & String))
4868 ctxt->eflags |= EFLG_RF;
4870 ctxt->eflags &= ~EFLG_RF;
4872 if (ctxt->execute) {
4873 if (ctxt->d & Fastop) {
4874 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4875 rc = fastop(ctxt, fop);
4876 if (rc != X86EMUL_CONTINUE)
4880 rc = ctxt->execute(ctxt);
4881 if (rc != X86EMUL_CONTINUE)
4886 if (ctxt->opcode_len == 2)
4888 else if (ctxt->opcode_len == 3)
4889 goto threebyte_insn;
4892 case 0x63: /* movsxd */
4893 if (ctxt->mode != X86EMUL_MODE_PROT64)
4894 goto cannot_emulate;
4895 ctxt->dst.val = (s32) ctxt->src.val;
4897 case 0x70 ... 0x7f: /* jcc (short) */
4898 if (test_cc(ctxt->b, ctxt->eflags))
4899 rc = jmp_rel(ctxt, ctxt->src.val);
4901 case 0x8d: /* lea r16/r32, m */
4902 ctxt->dst.val = ctxt->src.addr.mem.ea;
4904 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4905 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4906 ctxt->dst.type = OP_NONE;
4910 case 0x98: /* cbw/cwde/cdqe */
4911 switch (ctxt->op_bytes) {
4912 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4913 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4914 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4917 case 0xcc: /* int3 */
4918 rc = emulate_int(ctxt, 3);
4920 case 0xcd: /* int n */
4921 rc = emulate_int(ctxt, ctxt->src.val);
4923 case 0xce: /* into */
4924 if (ctxt->eflags & EFLG_OF)
4925 rc = emulate_int(ctxt, 4);
4927 case 0xe9: /* jmp rel */
4928 case 0xeb: /* jmp rel short */
4929 rc = jmp_rel(ctxt, ctxt->src.val);
4930 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4932 case 0xf4: /* hlt */
4933 ctxt->ops->halt(ctxt);
4935 case 0xf5: /* cmc */
4936 /* complement carry flag from eflags reg */
4937 ctxt->eflags ^= EFLG_CF;
4939 case 0xf8: /* clc */
4940 ctxt->eflags &= ~EFLG_CF;
4942 case 0xf9: /* stc */
4943 ctxt->eflags |= EFLG_CF;
4945 case 0xfc: /* cld */
4946 ctxt->eflags &= ~EFLG_DF;
4948 case 0xfd: /* std */
4949 ctxt->eflags |= EFLG_DF;
4952 goto cannot_emulate;
4955 if (rc != X86EMUL_CONTINUE)
4959 if (ctxt->d & SrcWrite) {
4960 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4961 rc = writeback(ctxt, &ctxt->src);
4962 if (rc != X86EMUL_CONTINUE)
4965 if (!(ctxt->d & NoWrite)) {
4966 rc = writeback(ctxt, &ctxt->dst);
4967 if (rc != X86EMUL_CONTINUE)
4972 * restore dst type in case the decoding will be reused
4973 * (happens for string instruction )
4975 ctxt->dst.type = saved_dst_type;
4977 if ((ctxt->d & SrcMask) == SrcSI)
4978 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
4980 if ((ctxt->d & DstMask) == DstDI)
4981 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
4983 if (ctxt->rep_prefix && (ctxt->d & String)) {
4985 struct read_cache *r = &ctxt->io_read;
4986 if ((ctxt->d & SrcMask) == SrcSI)
4987 count = ctxt->src.count;
4989 count = ctxt->dst.count;
4990 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
4993 if (!string_insn_completed(ctxt)) {
4995 * Re-enter guest when pio read ahead buffer is empty
4996 * or, if it is not used, after each 1024 iteration.
4998 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
4999 (r->end == 0 || r->end != r->pos)) {
5001 * Reset read cache. Usually happens before
5002 * decode, but since instruction is restarted
5003 * we have to do it here.
5005 ctxt->mem_read.end = 0;
5006 writeback_registers(ctxt);
5007 return EMULATION_RESTART;
5009 goto done; /* skip rip writeback */
5011 ctxt->eflags &= ~EFLG_RF;
5014 ctxt->eip = ctxt->_eip;
5017 if (rc == X86EMUL_PROPAGATE_FAULT) {
5018 WARN_ON(ctxt->exception.vector > 0x1f);
5019 ctxt->have_exception = true;
5021 if (rc == X86EMUL_INTERCEPTED)
5022 return EMULATION_INTERCEPTED;
5024 if (rc == X86EMUL_CONTINUE)
5025 writeback_registers(ctxt);
5027 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5031 case 0x09: /* wbinvd */
5032 (ctxt->ops->wbinvd)(ctxt);
5034 case 0x08: /* invd */
5035 case 0x0d: /* GrpP (prefetch) */
5036 case 0x18: /* Grp16 (prefetch/nop) */
5037 case 0x1f: /* nop */
5039 case 0x20: /* mov cr, reg */
5040 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5042 case 0x21: /* mov from dr to reg */
5043 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5045 case 0x40 ... 0x4f: /* cmov */
5046 if (test_cc(ctxt->b, ctxt->eflags))
5047 ctxt->dst.val = ctxt->src.val;
5048 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5049 ctxt->op_bytes != 4)
5050 ctxt->dst.type = OP_NONE; /* no writeback */
5052 case 0x80 ... 0x8f: /* jnz rel, etc*/
5053 if (test_cc(ctxt->b, ctxt->eflags))
5054 rc = jmp_rel(ctxt, ctxt->src.val);
5056 case 0x90 ... 0x9f: /* setcc r/m8 */
5057 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5059 case 0xb6 ... 0xb7: /* movzx */
5060 ctxt->dst.bytes = ctxt->op_bytes;
5061 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5062 : (u16) ctxt->src.val;
5064 case 0xbe ... 0xbf: /* movsx */
5065 ctxt->dst.bytes = ctxt->op_bytes;
5066 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5067 (s16) ctxt->src.val;
5070 goto cannot_emulate;
5075 if (rc != X86EMUL_CONTINUE)
5081 return EMULATION_FAILED;
5084 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5086 invalidate_registers(ctxt);
5089 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5091 writeback_registers(ctxt);