1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 #include <linux/stringify.h>
36 #define OpImplicit 1ull /* No generic decode */
37 #define OpReg 2ull /* Register */
38 #define OpMem 3ull /* Memory */
39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40 #define OpDI 5ull /* ES:DI/EDI/RDI */
41 #define OpMem64 6ull /* Memory, 64-bit */
42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43 #define OpDX 8ull /* DX register */
44 #define OpCL 9ull /* CL register (for shifts) */
45 #define OpImmByte 10ull /* 8-bit sign extended immediate */
46 #define OpOne 11ull /* Implied 1 */
47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
48 #define OpMem16 13ull /* Memory operand (16-bit). */
49 #define OpMem32 14ull /* Memory operand (32-bit). */
50 #define OpImmU 15ull /* Immediate operand, zero extended */
51 #define OpSI 16ull /* SI/ESI/RSI */
52 #define OpImmFAddr 17ull /* Immediate far address */
53 #define OpMemFAddr 18ull /* Far address in memory */
54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
55 #define OpES 20ull /* ES */
56 #define OpCS 21ull /* CS */
57 #define OpSS 22ull /* SS */
58 #define OpDS 23ull /* DS */
59 #define OpFS 24ull /* FS */
60 #define OpGS 25ull /* GS */
61 #define OpMem8 26ull /* 8-bit zero extended memory operand */
62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
67 #define OpBits 5 /* Width of operand field */
68 #define OpMask ((1ull << OpBits) - 1)
71 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
79 /* Operand sizes: 8-bit operands or specified/overridden size. */
80 #define ByteOp (1<<0) /* 8-bit operands. */
81 /* Destination operand type. */
83 #define ImplicitOps (OpImplicit << DstShift)
84 #define DstReg (OpReg << DstShift)
85 #define DstMem (OpMem << DstShift)
86 #define DstAcc (OpAcc << DstShift)
87 #define DstDI (OpDI << DstShift)
88 #define DstMem64 (OpMem64 << DstShift)
89 #define DstImmUByte (OpImmUByte << DstShift)
90 #define DstDX (OpDX << DstShift)
91 #define DstAccLo (OpAccLo << DstShift)
92 #define DstMask (OpMask << DstShift)
93 /* Source operand type. */
95 #define SrcNone (OpNone << SrcShift)
96 #define SrcReg (OpReg << SrcShift)
97 #define SrcMem (OpMem << SrcShift)
98 #define SrcMem16 (OpMem16 << SrcShift)
99 #define SrcMem32 (OpMem32 << SrcShift)
100 #define SrcImm (OpImm << SrcShift)
101 #define SrcImmByte (OpImmByte << SrcShift)
102 #define SrcOne (OpOne << SrcShift)
103 #define SrcImmUByte (OpImmUByte << SrcShift)
104 #define SrcImmU (OpImmU << SrcShift)
105 #define SrcSI (OpSI << SrcShift)
106 #define SrcXLat (OpXLat << SrcShift)
107 #define SrcImmFAddr (OpImmFAddr << SrcShift)
108 #define SrcMemFAddr (OpMemFAddr << SrcShift)
109 #define SrcAcc (OpAcc << SrcShift)
110 #define SrcImmU16 (OpImmU16 << SrcShift)
111 #define SrcImm64 (OpImm64 << SrcShift)
112 #define SrcDX (OpDX << SrcShift)
113 #define SrcMem8 (OpMem8 << SrcShift)
114 #define SrcAccHi (OpAccHi << SrcShift)
115 #define SrcMask (OpMask << SrcShift)
116 #define BitOp (1<<11)
117 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
118 #define String (1<<13) /* String instruction (rep capable) */
119 #define Stack (1<<14) /* Stack instruction (push/pop) */
120 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
121 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
122 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
123 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
124 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
125 #define Escape (5<<15) /* Escape to coprocessor instruction */
126 #define Sse (1<<18) /* SSE Vector instruction */
127 /* Generic ModRM decode. */
128 #define ModRM (1<<19)
129 /* Destination is only written; never read. */
132 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
133 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
134 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
135 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
136 #define Undefined (1<<25) /* No Such Instruction */
137 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
138 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
140 #define PageTable (1 << 29) /* instruction used to write page table */
141 #define NotImpl (1 << 30) /* instruction is not implemented */
142 /* Source 2 operand type */
143 #define Src2Shift (31)
144 #define Src2None (OpNone << Src2Shift)
145 #define Src2Mem (OpMem << Src2Shift)
146 #define Src2CL (OpCL << Src2Shift)
147 #define Src2ImmByte (OpImmByte << Src2Shift)
148 #define Src2One (OpOne << Src2Shift)
149 #define Src2Imm (OpImm << Src2Shift)
150 #define Src2ES (OpES << Src2Shift)
151 #define Src2CS (OpCS << Src2Shift)
152 #define Src2SS (OpSS << Src2Shift)
153 #define Src2DS (OpDS << Src2Shift)
154 #define Src2FS (OpFS << Src2Shift)
155 #define Src2GS (OpGS << Src2Shift)
156 #define Src2Mask (OpMask << Src2Shift)
157 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
158 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
159 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
160 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
161 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
162 #define NoWrite ((u64)1 << 45) /* No writeback */
163 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
165 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
167 #define X2(x...) x, x
168 #define X3(x...) X2(x), x
169 #define X4(x...) X2(x), X2(x)
170 #define X5(x...) X4(x), x
171 #define X6(x...) X4(x), X2(x)
172 #define X7(x...) X4(x), X3(x)
173 #define X8(x...) X4(x), X4(x)
174 #define X16(x...) X8(x), X8(x)
176 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
177 #define FASTOP_SIZE 8
180 * fastop functions have a special calling convention:
185 * flags: rflags (in/out)
186 * ex: rsi (in:fastop pointer, out:zero if exception)
188 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
189 * different operand sizes can be reached by calculation, rather than a jump
190 * table (which would be bigger than the code).
192 * fastop functions are declared as taking a never-defined fastop parameter,
193 * so they can't be called from C directly.
202 int (*execute)(struct x86_emulate_ctxt *ctxt);
203 const struct opcode *group;
204 const struct group_dual *gdual;
205 const struct gprefix *gprefix;
206 const struct escape *esc;
207 void (*fastop)(struct fastop *fake);
209 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
213 struct opcode mod012[8];
214 struct opcode mod3[8];
218 struct opcode pfx_no;
219 struct opcode pfx_66;
220 struct opcode pfx_f2;
221 struct opcode pfx_f3;
226 struct opcode high[64];
229 /* EFLAGS bit definitions. */
230 #define EFLG_ID (1<<21)
231 #define EFLG_VIP (1<<20)
232 #define EFLG_VIF (1<<19)
233 #define EFLG_AC (1<<18)
234 #define EFLG_VM (1<<17)
235 #define EFLG_RF (1<<16)
236 #define EFLG_IOPL (3<<12)
237 #define EFLG_NT (1<<14)
238 #define EFLG_OF (1<<11)
239 #define EFLG_DF (1<<10)
240 #define EFLG_IF (1<<9)
241 #define EFLG_TF (1<<8)
242 #define EFLG_SF (1<<7)
243 #define EFLG_ZF (1<<6)
244 #define EFLG_AF (1<<4)
245 #define EFLG_PF (1<<2)
246 #define EFLG_CF (1<<0)
248 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
249 #define EFLG_RESERVED_ONE_MASK 2
251 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
253 if (!(ctxt->regs_valid & (1 << nr))) {
254 ctxt->regs_valid |= 1 << nr;
255 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
257 return ctxt->_regs[nr];
260 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
262 ctxt->regs_valid |= 1 << nr;
263 ctxt->regs_dirty |= 1 << nr;
264 return &ctxt->_regs[nr];
267 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
270 return reg_write(ctxt, nr);
273 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
277 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
278 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
281 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
283 ctxt->regs_dirty = 0;
284 ctxt->regs_valid = 0;
288 * These EFLAGS bits are restored from saved value during emulation, and
289 * any changes are written back to the saved value after emulation.
291 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
299 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
301 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
302 #define FOP_RET "ret \n\t"
304 #define FOP_START(op) \
305 extern void em_##op(struct fastop *fake); \
306 asm(".pushsection .text, \"ax\" \n\t" \
307 ".global em_" #op " \n\t" \
314 #define FOPNOP() FOP_ALIGN FOP_RET
316 #define FOP1E(op, dst) \
317 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
319 #define FOP1EEX(op, dst) \
320 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
322 #define FASTOP1(op) \
327 ON64(FOP1E(op##q, rax)) \
330 /* 1-operand, using src2 (for MUL/DIV r/m) */
331 #define FASTOP1SRC2(op, name) \
336 ON64(FOP1E(op, rcx)) \
339 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
340 #define FASTOP1SRC2EX(op, name) \
345 ON64(FOP1EEX(op, rcx)) \
348 #define FOP2E(op, dst, src) \
349 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
351 #define FASTOP2(op) \
353 FOP2E(op##b, al, dl) \
354 FOP2E(op##w, ax, dx) \
355 FOP2E(op##l, eax, edx) \
356 ON64(FOP2E(op##q, rax, rdx)) \
359 /* 2 operand, word only */
360 #define FASTOP2W(op) \
363 FOP2E(op##w, ax, dx) \
364 FOP2E(op##l, eax, edx) \
365 ON64(FOP2E(op##q, rax, rdx)) \
368 /* 2 operand, src is CL */
369 #define FASTOP2CL(op) \
371 FOP2E(op##b, al, cl) \
372 FOP2E(op##w, ax, cl) \
373 FOP2E(op##l, eax, cl) \
374 ON64(FOP2E(op##q, rax, cl)) \
377 #define FOP3E(op, dst, src, src2) \
378 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
380 /* 3-operand, word-only, src2=cl */
381 #define FASTOP3WCL(op) \
384 FOP3E(op##w, ax, dx, cl) \
385 FOP3E(op##l, eax, edx, cl) \
386 ON64(FOP3E(op##q, rax, rdx, cl)) \
389 /* Special case for SETcc - 1 instruction per cc */
390 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
392 asm(".global kvm_fastop_exception \n"
393 "kvm_fastop_exception: xor %esi, %esi; ret");
414 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
417 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
418 enum x86_intercept intercept,
419 enum x86_intercept_stage stage)
421 struct x86_instruction_info info = {
422 .intercept = intercept,
423 .rep_prefix = ctxt->rep_prefix,
424 .modrm_mod = ctxt->modrm_mod,
425 .modrm_reg = ctxt->modrm_reg,
426 .modrm_rm = ctxt->modrm_rm,
427 .src_val = ctxt->src.val64,
428 .src_bytes = ctxt->src.bytes,
429 .dst_bytes = ctxt->dst.bytes,
430 .ad_bytes = ctxt->ad_bytes,
431 .next_rip = ctxt->eip,
434 return ctxt->ops->intercept(ctxt, &info, stage);
437 static void assign_masked(ulong *dest, ulong src, ulong mask)
439 *dest = (*dest & ~mask) | (src & mask);
442 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
444 return (1UL << (ctxt->ad_bytes << 3)) - 1;
447 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
450 struct desc_struct ss;
452 if (ctxt->mode == X86EMUL_MODE_PROT64)
454 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
455 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
458 static int stack_size(struct x86_emulate_ctxt *ctxt)
460 return (__fls(stack_mask(ctxt)) + 1) >> 3;
463 /* Access/update address held in a register, based on addressing mode. */
464 static inline unsigned long
465 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
467 if (ctxt->ad_bytes == sizeof(unsigned long))
470 return reg & ad_mask(ctxt);
473 static inline unsigned long
474 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
476 return address_mask(ctxt, reg);
479 static void masked_increment(ulong *reg, ulong mask, int inc)
481 assign_masked(reg, *reg + inc, mask);
485 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
489 if (ctxt->ad_bytes == sizeof(unsigned long))
492 mask = ad_mask(ctxt);
493 masked_increment(reg, mask, inc);
496 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
498 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
501 static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
503 register_address_increment(ctxt, &ctxt->_eip, rel);
506 static u32 desc_limit_scaled(struct desc_struct *desc)
508 u32 limit = get_desc_limit(desc);
510 return desc->g ? (limit << 12) | 0xfff : limit;
513 static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
515 ctxt->has_seg_override = true;
516 ctxt->seg_override = seg;
519 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
521 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
524 return ctxt->ops->get_cached_segment_base(ctxt, seg);
527 static unsigned seg_override(struct x86_emulate_ctxt *ctxt)
529 if (!ctxt->has_seg_override)
532 return ctxt->seg_override;
535 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
536 u32 error, bool valid)
538 ctxt->exception.vector = vec;
539 ctxt->exception.error_code = error;
540 ctxt->exception.error_code_valid = valid;
541 return X86EMUL_PROPAGATE_FAULT;
544 static int emulate_db(struct x86_emulate_ctxt *ctxt)
546 return emulate_exception(ctxt, DB_VECTOR, 0, false);
549 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
551 return emulate_exception(ctxt, GP_VECTOR, err, true);
554 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
556 return emulate_exception(ctxt, SS_VECTOR, err, true);
559 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
561 return emulate_exception(ctxt, UD_VECTOR, 0, false);
564 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
566 return emulate_exception(ctxt, TS_VECTOR, err, true);
569 static int emulate_de(struct x86_emulate_ctxt *ctxt)
571 return emulate_exception(ctxt, DE_VECTOR, 0, false);
574 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
576 return emulate_exception(ctxt, NM_VECTOR, 0, false);
579 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
582 struct desc_struct desc;
584 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
588 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
593 struct desc_struct desc;
595 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
596 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
600 * x86 defines three classes of vector instructions: explicitly
601 * aligned, explicitly unaligned, and the rest, which change behaviour
602 * depending on whether they're AVX encoded or not.
604 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
605 * subject to the same check.
607 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
609 if (likely(size < 16))
612 if (ctxt->d & Aligned)
614 else if (ctxt->d & Unaligned)
616 else if (ctxt->d & Avx)
622 static int __linearize(struct x86_emulate_ctxt *ctxt,
623 struct segmented_address addr,
624 unsigned size, bool write, bool fetch,
627 struct desc_struct desc;
634 la = seg_base(ctxt, addr.seg) + addr.ea;
635 switch (ctxt->mode) {
636 case X86EMUL_MODE_PROT64:
637 if (((signed long)la << 16) >> 16 != la)
638 return emulate_gp(ctxt, 0);
641 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
645 /* code segment in protected mode or read-only data segment */
646 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
647 || !(desc.type & 2)) && write)
649 /* unreadable code segment */
650 if (!fetch && (desc.type & 8) && !(desc.type & 2))
652 lim = desc_limit_scaled(&desc);
653 if ((desc.type & 8) || !(desc.type & 4)) {
654 /* expand-up segment */
655 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
658 /* expand-down segment */
659 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
661 lim = desc.d ? 0xffffffff : 0xffff;
662 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
665 cpl = ctxt->ops->cpl(ctxt);
666 if (!(desc.type & 8)) {
670 } else if ((desc.type & 8) && !(desc.type & 4)) {
671 /* nonconforming code segment */
674 } else if ((desc.type & 8) && (desc.type & 4)) {
675 /* conforming code segment */
681 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
683 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
684 return emulate_gp(ctxt, 0);
686 return X86EMUL_CONTINUE;
688 if (addr.seg == VCPU_SREG_SS)
689 return emulate_ss(ctxt, sel);
691 return emulate_gp(ctxt, sel);
694 static int linearize(struct x86_emulate_ctxt *ctxt,
695 struct segmented_address addr,
696 unsigned size, bool write,
699 return __linearize(ctxt, addr, size, write, false, linear);
703 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
704 struct segmented_address addr,
711 rc = linearize(ctxt, addr, size, false, &linear);
712 if (rc != X86EMUL_CONTINUE)
714 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
718 * Fetch the next byte of the instruction being emulated which is pointed to
719 * by ctxt->_eip, then increment ctxt->_eip.
721 * Also prefetch the remaining bytes of the instruction without crossing page
722 * boundary if they are not in fetch_cache yet.
724 static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest)
726 struct fetch_cache *fc = &ctxt->fetch;
730 if (ctxt->_eip == fc->end) {
731 unsigned long linear;
732 struct segmented_address addr = { .seg = VCPU_SREG_CS,
734 cur_size = fc->end - fc->start;
735 size = min(15UL - cur_size,
736 PAGE_SIZE - offset_in_page(ctxt->_eip));
737 rc = __linearize(ctxt, addr, size, false, true, &linear);
738 if (unlikely(rc != X86EMUL_CONTINUE))
740 rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
741 size, &ctxt->exception);
742 if (unlikely(rc != X86EMUL_CONTINUE))
746 *dest = fc->data[ctxt->_eip - fc->start];
748 return X86EMUL_CONTINUE;
751 static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
752 void *dest, unsigned size)
756 /* x86 instructions are limited to 15 bytes. */
757 if (unlikely(ctxt->_eip + size - ctxt->eip > 15))
758 return X86EMUL_UNHANDLEABLE;
760 rc = do_insn_fetch_byte(ctxt, dest++);
761 if (rc != X86EMUL_CONTINUE)
764 return X86EMUL_CONTINUE;
767 /* Fetch next part of the instruction being emulated. */
768 #define insn_fetch(_type, _ctxt) \
769 ({ unsigned long _x; \
770 rc = do_insn_fetch(_ctxt, &_x, sizeof(_type)); \
771 if (rc != X86EMUL_CONTINUE) \
776 #define insn_fetch_arr(_arr, _size, _ctxt) \
777 ({ rc = do_insn_fetch(_ctxt, _arr, (_size)); \
778 if (rc != X86EMUL_CONTINUE) \
783 * Given the 'reg' portion of a ModRM byte, and a register block, return a
784 * pointer into the block that addresses the relevant register.
785 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
787 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
791 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
793 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
794 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
796 p = reg_rmw(ctxt, modrm_reg);
800 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
801 struct segmented_address addr,
802 u16 *size, unsigned long *address, int op_bytes)
809 rc = segmented_read_std(ctxt, addr, size, 2);
810 if (rc != X86EMUL_CONTINUE)
813 rc = segmented_read_std(ctxt, addr, address, op_bytes);
827 FASTOP1SRC2(mul, mul_ex);
828 FASTOP1SRC2(imul, imul_ex);
829 FASTOP1SRC2EX(div, div_ex);
830 FASTOP1SRC2EX(idiv, idiv_ex);
859 static u8 test_cc(unsigned int condition, unsigned long flags)
862 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
864 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
865 asm("push %[flags]; popf; call *%[fastop]"
866 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
870 static void fetch_register_operand(struct operand *op)
874 op->val = *(u8 *)op->addr.reg;
877 op->val = *(u16 *)op->addr.reg;
880 op->val = *(u32 *)op->addr.reg;
883 op->val = *(u64 *)op->addr.reg;
888 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
890 ctxt->ops->get_fpu(ctxt);
892 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
893 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
894 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
895 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
896 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
897 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
898 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
899 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
901 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
902 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
903 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
904 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
905 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
906 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
907 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
908 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
912 ctxt->ops->put_fpu(ctxt);
915 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
918 ctxt->ops->get_fpu(ctxt);
920 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
921 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
922 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
923 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
924 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
925 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
926 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
927 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
929 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
930 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
931 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
932 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
933 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
934 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
935 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
936 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
940 ctxt->ops->put_fpu(ctxt);
943 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
945 ctxt->ops->get_fpu(ctxt);
947 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
948 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
949 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
950 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
951 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
952 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
953 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
954 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
957 ctxt->ops->put_fpu(ctxt);
960 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
962 ctxt->ops->get_fpu(ctxt);
964 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
965 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
966 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
967 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
968 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
969 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
970 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
971 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
974 ctxt->ops->put_fpu(ctxt);
977 static int em_fninit(struct x86_emulate_ctxt *ctxt)
979 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
980 return emulate_nm(ctxt);
982 ctxt->ops->get_fpu(ctxt);
983 asm volatile("fninit");
984 ctxt->ops->put_fpu(ctxt);
985 return X86EMUL_CONTINUE;
988 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
992 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
993 return emulate_nm(ctxt);
995 ctxt->ops->get_fpu(ctxt);
996 asm volatile("fnstcw %0": "+m"(fcw));
997 ctxt->ops->put_fpu(ctxt);
999 /* force 2 byte destination */
1000 ctxt->dst.bytes = 2;
1001 ctxt->dst.val = fcw;
1003 return X86EMUL_CONTINUE;
1006 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1010 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1011 return emulate_nm(ctxt);
1013 ctxt->ops->get_fpu(ctxt);
1014 asm volatile("fnstsw %0": "+m"(fsw));
1015 ctxt->ops->put_fpu(ctxt);
1017 /* force 2 byte destination */
1018 ctxt->dst.bytes = 2;
1019 ctxt->dst.val = fsw;
1021 return X86EMUL_CONTINUE;
1024 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1027 unsigned reg = ctxt->modrm_reg;
1029 if (!(ctxt->d & ModRM))
1030 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1032 if (ctxt->d & Sse) {
1036 read_sse_reg(ctxt, &op->vec_val, reg);
1039 if (ctxt->d & Mmx) {
1048 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1049 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1051 fetch_register_operand(op);
1052 op->orig_val = op->val;
1055 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1057 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1058 ctxt->modrm_seg = VCPU_SREG_SS;
1061 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1065 int index_reg = 0, base_reg = 0, scale;
1066 int rc = X86EMUL_CONTINUE;
1069 if (ctxt->rex_prefix) {
1070 ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1; /* REX.R */
1071 index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */
1072 ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
1075 ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
1076 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1077 ctxt->modrm_rm |= (ctxt->modrm & 0x07);
1078 ctxt->modrm_seg = VCPU_SREG_DS;
1080 if (ctxt->modrm_mod == 3) {
1082 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1083 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1085 if (ctxt->d & Sse) {
1088 op->addr.xmm = ctxt->modrm_rm;
1089 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1092 if (ctxt->d & Mmx) {
1095 op->addr.xmm = ctxt->modrm_rm & 7;
1098 fetch_register_operand(op);
1104 if (ctxt->ad_bytes == 2) {
1105 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1106 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1107 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1108 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1110 /* 16-bit ModR/M decode. */
1111 switch (ctxt->modrm_mod) {
1113 if (ctxt->modrm_rm == 6)
1114 modrm_ea += insn_fetch(u16, ctxt);
1117 modrm_ea += insn_fetch(s8, ctxt);
1120 modrm_ea += insn_fetch(u16, ctxt);
1123 switch (ctxt->modrm_rm) {
1125 modrm_ea += bx + si;
1128 modrm_ea += bx + di;
1131 modrm_ea += bp + si;
1134 modrm_ea += bp + di;
1143 if (ctxt->modrm_mod != 0)
1150 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1151 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1152 ctxt->modrm_seg = VCPU_SREG_SS;
1153 modrm_ea = (u16)modrm_ea;
1155 /* 32/64-bit ModR/M decode. */
1156 if ((ctxt->modrm_rm & 7) == 4) {
1157 sib = insn_fetch(u8, ctxt);
1158 index_reg |= (sib >> 3) & 7;
1159 base_reg |= sib & 7;
1162 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1163 modrm_ea += insn_fetch(s32, ctxt);
1165 modrm_ea += reg_read(ctxt, base_reg);
1166 adjust_modrm_seg(ctxt, base_reg);
1169 modrm_ea += reg_read(ctxt, index_reg) << scale;
1170 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1171 if (ctxt->mode == X86EMUL_MODE_PROT64)
1172 ctxt->rip_relative = 1;
1174 base_reg = ctxt->modrm_rm;
1175 modrm_ea += reg_read(ctxt, base_reg);
1176 adjust_modrm_seg(ctxt, base_reg);
1178 switch (ctxt->modrm_mod) {
1180 if (ctxt->modrm_rm == 5)
1181 modrm_ea += insn_fetch(s32, ctxt);
1184 modrm_ea += insn_fetch(s8, ctxt);
1187 modrm_ea += insn_fetch(s32, ctxt);
1191 op->addr.mem.ea = modrm_ea;
1196 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1199 int rc = X86EMUL_CONTINUE;
1202 switch (ctxt->ad_bytes) {
1204 op->addr.mem.ea = insn_fetch(u16, ctxt);
1207 op->addr.mem.ea = insn_fetch(u32, ctxt);
1210 op->addr.mem.ea = insn_fetch(u64, ctxt);
1217 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1221 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1222 mask = ~(ctxt->dst.bytes * 8 - 1);
1224 if (ctxt->src.bytes == 2)
1225 sv = (s16)ctxt->src.val & (s16)mask;
1226 else if (ctxt->src.bytes == 4)
1227 sv = (s32)ctxt->src.val & (s32)mask;
1229 ctxt->dst.addr.mem.ea += (sv >> 3);
1232 /* only subword offset */
1233 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1236 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1237 unsigned long addr, void *dest, unsigned size)
1240 struct read_cache *mc = &ctxt->mem_read;
1242 if (mc->pos < mc->end)
1245 WARN_ON((mc->end + size) >= sizeof(mc->data));
1247 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1249 if (rc != X86EMUL_CONTINUE)
1255 memcpy(dest, mc->data + mc->pos, size);
1257 return X86EMUL_CONTINUE;
1260 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1261 struct segmented_address addr,
1268 rc = linearize(ctxt, addr, size, false, &linear);
1269 if (rc != X86EMUL_CONTINUE)
1271 return read_emulated(ctxt, linear, data, size);
1274 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1275 struct segmented_address addr,
1282 rc = linearize(ctxt, addr, size, true, &linear);
1283 if (rc != X86EMUL_CONTINUE)
1285 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1289 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1290 struct segmented_address addr,
1291 const void *orig_data, const void *data,
1297 rc = linearize(ctxt, addr, size, true, &linear);
1298 if (rc != X86EMUL_CONTINUE)
1300 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1301 size, &ctxt->exception);
1304 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1305 unsigned int size, unsigned short port,
1308 struct read_cache *rc = &ctxt->io_read;
1310 if (rc->pos == rc->end) { /* refill pio read ahead */
1311 unsigned int in_page, n;
1312 unsigned int count = ctxt->rep_prefix ?
1313 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1314 in_page = (ctxt->eflags & EFLG_DF) ?
1315 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1316 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1317 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
1321 rc->pos = rc->end = 0;
1322 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1327 if (ctxt->rep_prefix && !(ctxt->eflags & EFLG_DF)) {
1328 ctxt->dst.data = rc->data + rc->pos;
1329 ctxt->dst.type = OP_MEM_STR;
1330 ctxt->dst.count = (rc->end - rc->pos) / size;
1333 memcpy(dest, rc->data + rc->pos, size);
1339 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1340 u16 index, struct desc_struct *desc)
1345 ctxt->ops->get_idt(ctxt, &dt);
1347 if (dt.size < index * 8 + 7)
1348 return emulate_gp(ctxt, index << 3 | 0x2);
1350 addr = dt.address + index * 8;
1351 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1355 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1356 u16 selector, struct desc_ptr *dt)
1358 const struct x86_emulate_ops *ops = ctxt->ops;
1360 if (selector & 1 << 2) {
1361 struct desc_struct desc;
1364 memset (dt, 0, sizeof *dt);
1365 if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR))
1368 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1369 dt->address = get_desc_base(&desc);
1371 ops->get_gdt(ctxt, dt);
1374 /* allowed just for 8 bytes segments */
1375 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1376 u16 selector, struct desc_struct *desc,
1380 u16 index = selector >> 3;
1383 get_descriptor_table_ptr(ctxt, selector, &dt);
1385 if (dt.size < index * 8 + 7)
1386 return emulate_gp(ctxt, selector & 0xfffc);
1388 *desc_addr_p = addr = dt.address + index * 8;
1389 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1393 /* allowed just for 8 bytes segments */
1394 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1395 u16 selector, struct desc_struct *desc)
1398 u16 index = selector >> 3;
1401 get_descriptor_table_ptr(ctxt, selector, &dt);
1403 if (dt.size < index * 8 + 7)
1404 return emulate_gp(ctxt, selector & 0xfffc);
1406 addr = dt.address + index * 8;
1407 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1411 /* Does not support long mode */
1412 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1413 u16 selector, int seg)
1415 struct desc_struct seg_desc, old_desc;
1417 unsigned err_vec = GP_VECTOR;
1419 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1424 memset(&seg_desc, 0, sizeof seg_desc);
1426 if (ctxt->mode == X86EMUL_MODE_REAL) {
1427 /* set real mode segment descriptor (keep limit etc. for
1429 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1430 set_desc_base(&seg_desc, selector << 4);
1432 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1433 /* VM86 needs a clean new segment descriptor */
1434 set_desc_base(&seg_desc, selector << 4);
1435 set_desc_limit(&seg_desc, 0xffff);
1444 cpl = ctxt->ops->cpl(ctxt);
1446 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1447 if ((seg == VCPU_SREG_CS
1448 || (seg == VCPU_SREG_SS
1449 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1450 || seg == VCPU_SREG_TR)
1454 /* TR should be in GDT only */
1455 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1458 if (null_selector) /* for NULL selector skip all following checks */
1461 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1462 if (ret != X86EMUL_CONTINUE)
1465 err_code = selector & 0xfffc;
1466 err_vec = GP_VECTOR;
1468 /* can't load system descriptor into segment selector */
1469 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1473 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1482 * segment is not a writable data segment or segment
1483 * selector's RPL != CPL or segment selector's RPL != CPL
1485 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1489 if (!(seg_desc.type & 8))
1492 if (seg_desc.type & 4) {
1498 if (rpl > cpl || dpl != cpl)
1501 /* CS(RPL) <- CPL */
1502 selector = (selector & 0xfffc) | cpl;
1505 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1507 old_desc = seg_desc;
1508 seg_desc.type |= 2; /* busy */
1509 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1510 sizeof(seg_desc), &ctxt->exception);
1511 if (ret != X86EMUL_CONTINUE)
1514 case VCPU_SREG_LDTR:
1515 if (seg_desc.s || seg_desc.type != 2)
1518 default: /* DS, ES, FS, or GS */
1520 * segment is not a data or readable code segment or
1521 * ((segment is a data or nonconforming code segment)
1522 * and (both RPL and CPL > DPL))
1524 if ((seg_desc.type & 0xa) == 0x8 ||
1525 (((seg_desc.type & 0xc) != 0xc) &&
1526 (rpl > dpl && cpl > dpl)))
1532 /* mark segment as accessed */
1534 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1535 if (ret != X86EMUL_CONTINUE)
1539 ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
1540 return X86EMUL_CONTINUE;
1542 emulate_exception(ctxt, err_vec, err_code, true);
1543 return X86EMUL_PROPAGATE_FAULT;
1546 static void write_register_operand(struct operand *op)
1548 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1549 switch (op->bytes) {
1551 *(u8 *)op->addr.reg = (u8)op->val;
1554 *(u16 *)op->addr.reg = (u16)op->val;
1557 *op->addr.reg = (u32)op->val;
1558 break; /* 64b: zero-extend */
1560 *op->addr.reg = op->val;
1565 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1571 write_register_operand(op);
1574 if (ctxt->lock_prefix)
1575 rc = segmented_cmpxchg(ctxt,
1581 rc = segmented_write(ctxt,
1585 if (rc != X86EMUL_CONTINUE)
1589 rc = segmented_write(ctxt,
1592 op->bytes * op->count);
1593 if (rc != X86EMUL_CONTINUE)
1597 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1600 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1608 return X86EMUL_CONTINUE;
1611 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1613 struct segmented_address addr;
1615 rsp_increment(ctxt, -bytes);
1616 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1617 addr.seg = VCPU_SREG_SS;
1619 return segmented_write(ctxt, addr, data, bytes);
1622 static int em_push(struct x86_emulate_ctxt *ctxt)
1624 /* Disable writeback. */
1625 ctxt->dst.type = OP_NONE;
1626 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1629 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1630 void *dest, int len)
1633 struct segmented_address addr;
1635 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1636 addr.seg = VCPU_SREG_SS;
1637 rc = segmented_read(ctxt, addr, dest, len);
1638 if (rc != X86EMUL_CONTINUE)
1641 rsp_increment(ctxt, len);
1645 static int em_pop(struct x86_emulate_ctxt *ctxt)
1647 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1650 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1651 void *dest, int len)
1654 unsigned long val, change_mask;
1655 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1656 int cpl = ctxt->ops->cpl(ctxt);
1658 rc = emulate_pop(ctxt, &val, len);
1659 if (rc != X86EMUL_CONTINUE)
1662 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1663 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1665 switch(ctxt->mode) {
1666 case X86EMUL_MODE_PROT64:
1667 case X86EMUL_MODE_PROT32:
1668 case X86EMUL_MODE_PROT16:
1670 change_mask |= EFLG_IOPL;
1672 change_mask |= EFLG_IF;
1674 case X86EMUL_MODE_VM86:
1676 return emulate_gp(ctxt, 0);
1677 change_mask |= EFLG_IF;
1679 default: /* real mode */
1680 change_mask |= (EFLG_IOPL | EFLG_IF);
1684 *(unsigned long *)dest =
1685 (ctxt->eflags & ~change_mask) | (val & change_mask);
1690 static int em_popf(struct x86_emulate_ctxt *ctxt)
1692 ctxt->dst.type = OP_REG;
1693 ctxt->dst.addr.reg = &ctxt->eflags;
1694 ctxt->dst.bytes = ctxt->op_bytes;
1695 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1698 static int em_enter(struct x86_emulate_ctxt *ctxt)
1701 unsigned frame_size = ctxt->src.val;
1702 unsigned nesting_level = ctxt->src2.val & 31;
1706 return X86EMUL_UNHANDLEABLE;
1708 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1709 rc = push(ctxt, &rbp, stack_size(ctxt));
1710 if (rc != X86EMUL_CONTINUE)
1712 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1714 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1715 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1717 return X86EMUL_CONTINUE;
1720 static int em_leave(struct x86_emulate_ctxt *ctxt)
1722 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1724 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1727 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1729 int seg = ctxt->src2.val;
1731 ctxt->src.val = get_segment_selector(ctxt, seg);
1733 return em_push(ctxt);
1736 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1738 int seg = ctxt->src2.val;
1739 unsigned long selector;
1742 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1743 if (rc != X86EMUL_CONTINUE)
1746 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1750 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1752 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1753 int rc = X86EMUL_CONTINUE;
1754 int reg = VCPU_REGS_RAX;
1756 while (reg <= VCPU_REGS_RDI) {
1757 (reg == VCPU_REGS_RSP) ?
1758 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1761 if (rc != X86EMUL_CONTINUE)
1770 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1772 ctxt->src.val = (unsigned long)ctxt->eflags;
1773 return em_push(ctxt);
1776 static int em_popa(struct x86_emulate_ctxt *ctxt)
1778 int rc = X86EMUL_CONTINUE;
1779 int reg = VCPU_REGS_RDI;
1781 while (reg >= VCPU_REGS_RAX) {
1782 if (reg == VCPU_REGS_RSP) {
1783 rsp_increment(ctxt, ctxt->op_bytes);
1787 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1788 if (rc != X86EMUL_CONTINUE)
1795 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1797 const struct x86_emulate_ops *ops = ctxt->ops;
1804 /* TODO: Add limit checks */
1805 ctxt->src.val = ctxt->eflags;
1807 if (rc != X86EMUL_CONTINUE)
1810 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1812 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1814 if (rc != X86EMUL_CONTINUE)
1817 ctxt->src.val = ctxt->_eip;
1819 if (rc != X86EMUL_CONTINUE)
1822 ops->get_idt(ctxt, &dt);
1824 eip_addr = dt.address + (irq << 2);
1825 cs_addr = dt.address + (irq << 2) + 2;
1827 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1828 if (rc != X86EMUL_CONTINUE)
1831 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1832 if (rc != X86EMUL_CONTINUE)
1835 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1836 if (rc != X86EMUL_CONTINUE)
1844 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1848 invalidate_registers(ctxt);
1849 rc = __emulate_int_real(ctxt, irq);
1850 if (rc == X86EMUL_CONTINUE)
1851 writeback_registers(ctxt);
1855 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1857 switch(ctxt->mode) {
1858 case X86EMUL_MODE_REAL:
1859 return __emulate_int_real(ctxt, irq);
1860 case X86EMUL_MODE_VM86:
1861 case X86EMUL_MODE_PROT16:
1862 case X86EMUL_MODE_PROT32:
1863 case X86EMUL_MODE_PROT64:
1865 /* Protected mode interrupts unimplemented yet */
1866 return X86EMUL_UNHANDLEABLE;
1870 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1872 int rc = X86EMUL_CONTINUE;
1873 unsigned long temp_eip = 0;
1874 unsigned long temp_eflags = 0;
1875 unsigned long cs = 0;
1876 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1877 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1878 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1879 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1881 /* TODO: Add stack limit check */
1883 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1885 if (rc != X86EMUL_CONTINUE)
1888 if (temp_eip & ~0xffff)
1889 return emulate_gp(ctxt, 0);
1891 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1893 if (rc != X86EMUL_CONTINUE)
1896 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1898 if (rc != X86EMUL_CONTINUE)
1901 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1903 if (rc != X86EMUL_CONTINUE)
1906 ctxt->_eip = temp_eip;
1909 if (ctxt->op_bytes == 4)
1910 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1911 else if (ctxt->op_bytes == 2) {
1912 ctxt->eflags &= ~0xffff;
1913 ctxt->eflags |= temp_eflags;
1916 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1917 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1922 static int em_iret(struct x86_emulate_ctxt *ctxt)
1924 switch(ctxt->mode) {
1925 case X86EMUL_MODE_REAL:
1926 return emulate_iret_real(ctxt);
1927 case X86EMUL_MODE_VM86:
1928 case X86EMUL_MODE_PROT16:
1929 case X86EMUL_MODE_PROT32:
1930 case X86EMUL_MODE_PROT64:
1932 /* iret from protected mode unimplemented yet */
1933 return X86EMUL_UNHANDLEABLE;
1937 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
1942 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1944 rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
1945 if (rc != X86EMUL_CONTINUE)
1949 memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
1950 return X86EMUL_CONTINUE;
1953 static int em_grp45(struct x86_emulate_ctxt *ctxt)
1955 int rc = X86EMUL_CONTINUE;
1957 switch (ctxt->modrm_reg) {
1958 case 2: /* call near abs */ {
1960 old_eip = ctxt->_eip;
1961 ctxt->_eip = ctxt->src.val;
1962 ctxt->src.val = old_eip;
1966 case 4: /* jmp abs */
1967 ctxt->_eip = ctxt->src.val;
1969 case 5: /* jmp far */
1970 rc = em_jmp_far(ctxt);
1979 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
1981 u64 old = ctxt->dst.orig_val64;
1983 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
1984 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
1985 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
1986 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
1987 ctxt->eflags &= ~EFLG_ZF;
1989 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
1990 (u32) reg_read(ctxt, VCPU_REGS_RBX);
1992 ctxt->eflags |= EFLG_ZF;
1994 return X86EMUL_CONTINUE;
1997 static int em_ret(struct x86_emulate_ctxt *ctxt)
1999 ctxt->dst.type = OP_REG;
2000 ctxt->dst.addr.reg = &ctxt->_eip;
2001 ctxt->dst.bytes = ctxt->op_bytes;
2002 return em_pop(ctxt);
2005 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2009 int cpl = ctxt->ops->cpl(ctxt);
2011 rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
2012 if (rc != X86EMUL_CONTINUE)
2014 if (ctxt->op_bytes == 4)
2015 ctxt->_eip = (u32)ctxt->_eip;
2016 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2017 if (rc != X86EMUL_CONTINUE)
2019 /* Outer-privilege level return is not implemented */
2020 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2021 return X86EMUL_UNHANDLEABLE;
2022 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2026 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2030 rc = em_ret_far(ctxt);
2031 if (rc != X86EMUL_CONTINUE)
2033 rsp_increment(ctxt, ctxt->src.val);
2034 return X86EMUL_CONTINUE;
2037 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2039 /* Save real source value, then compare EAX against destination. */
2040 ctxt->src.orig_val = ctxt->src.val;
2041 ctxt->src.val = reg_read(ctxt, VCPU_REGS_RAX);
2042 fastop(ctxt, em_cmp);
2044 if (ctxt->eflags & EFLG_ZF) {
2045 /* Success: write back to memory. */
2046 ctxt->dst.val = ctxt->src.orig_val;
2048 /* Failure: write the value we saw to EAX. */
2049 ctxt->dst.type = OP_REG;
2050 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2052 return X86EMUL_CONTINUE;
2055 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2057 int seg = ctxt->src2.val;
2061 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2063 rc = load_segment_descriptor(ctxt, sel, seg);
2064 if (rc != X86EMUL_CONTINUE)
2067 ctxt->dst.val = ctxt->src.val;
2072 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2073 struct desc_struct *cs, struct desc_struct *ss)
2075 cs->l = 0; /* will be adjusted later */
2076 set_desc_base(cs, 0); /* flat segment */
2077 cs->g = 1; /* 4kb granularity */
2078 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2079 cs->type = 0x0b; /* Read, Execute, Accessed */
2081 cs->dpl = 0; /* will be adjusted later */
2086 set_desc_base(ss, 0); /* flat segment */
2087 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2088 ss->g = 1; /* 4kb granularity */
2090 ss->type = 0x03; /* Read/Write, Accessed */
2091 ss->d = 1; /* 32bit stack segment */
2098 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2100 u32 eax, ebx, ecx, edx;
2103 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2104 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2105 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2106 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2109 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2111 const struct x86_emulate_ops *ops = ctxt->ops;
2112 u32 eax, ebx, ecx, edx;
2115 * syscall should always be enabled in longmode - so only become
2116 * vendor specific (cpuid) if other modes are active...
2118 if (ctxt->mode == X86EMUL_MODE_PROT64)
2123 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2125 * Intel ("GenuineIntel")
2126 * remark: Intel CPUs only support "syscall" in 64bit
2127 * longmode. Also an 64bit guest with a
2128 * 32bit compat-app running will #UD !! While this
2129 * behaviour can be fixed (by emulating) into AMD
2130 * response - CPUs of AMD can't behave like Intel.
2132 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2133 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2134 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2137 /* AMD ("AuthenticAMD") */
2138 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2139 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2140 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2143 /* AMD ("AMDisbetter!") */
2144 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2145 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2146 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2149 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2153 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2155 const struct x86_emulate_ops *ops = ctxt->ops;
2156 struct desc_struct cs, ss;
2161 /* syscall is not available in real mode */
2162 if (ctxt->mode == X86EMUL_MODE_REAL ||
2163 ctxt->mode == X86EMUL_MODE_VM86)
2164 return emulate_ud(ctxt);
2166 if (!(em_syscall_is_enabled(ctxt)))
2167 return emulate_ud(ctxt);
2169 ops->get_msr(ctxt, MSR_EFER, &efer);
2170 setup_syscalls_segments(ctxt, &cs, &ss);
2172 if (!(efer & EFER_SCE))
2173 return emulate_ud(ctxt);
2175 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2177 cs_sel = (u16)(msr_data & 0xfffc);
2178 ss_sel = (u16)(msr_data + 8);
2180 if (efer & EFER_LMA) {
2184 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2185 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2187 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2188 if (efer & EFER_LMA) {
2189 #ifdef CONFIG_X86_64
2190 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags & ~EFLG_RF;
2193 ctxt->mode == X86EMUL_MODE_PROT64 ?
2194 MSR_LSTAR : MSR_CSTAR, &msr_data);
2195 ctxt->_eip = msr_data;
2197 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2198 ctxt->eflags &= ~(msr_data | EFLG_RF);
2202 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2203 ctxt->_eip = (u32)msr_data;
2205 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2208 return X86EMUL_CONTINUE;
2211 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2213 const struct x86_emulate_ops *ops = ctxt->ops;
2214 struct desc_struct cs, ss;
2219 ops->get_msr(ctxt, MSR_EFER, &efer);
2220 /* inject #GP if in real mode */
2221 if (ctxt->mode == X86EMUL_MODE_REAL)
2222 return emulate_gp(ctxt, 0);
2225 * Not recognized on AMD in compat mode (but is recognized in legacy
2228 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2229 && !vendor_intel(ctxt))
2230 return emulate_ud(ctxt);
2232 /* XXX sysenter/sysexit have not been tested in 64bit mode.
2233 * Therefore, we inject an #UD.
2235 if (ctxt->mode == X86EMUL_MODE_PROT64)
2236 return emulate_ud(ctxt);
2238 setup_syscalls_segments(ctxt, &cs, &ss);
2240 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2241 switch (ctxt->mode) {
2242 case X86EMUL_MODE_PROT32:
2243 if ((msr_data & 0xfffc) == 0x0)
2244 return emulate_gp(ctxt, 0);
2246 case X86EMUL_MODE_PROT64:
2247 if (msr_data == 0x0)
2248 return emulate_gp(ctxt, 0);
2254 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2255 cs_sel = (u16)msr_data;
2256 cs_sel &= ~SELECTOR_RPL_MASK;
2257 ss_sel = cs_sel + 8;
2258 ss_sel &= ~SELECTOR_RPL_MASK;
2259 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2264 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2265 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2267 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2268 ctxt->_eip = msr_data;
2270 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2271 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
2273 return X86EMUL_CONTINUE;
2276 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2278 const struct x86_emulate_ops *ops = ctxt->ops;
2279 struct desc_struct cs, ss;
2282 u16 cs_sel = 0, ss_sel = 0;
2284 /* inject #GP if in real mode or Virtual 8086 mode */
2285 if (ctxt->mode == X86EMUL_MODE_REAL ||
2286 ctxt->mode == X86EMUL_MODE_VM86)
2287 return emulate_gp(ctxt, 0);
2289 setup_syscalls_segments(ctxt, &cs, &ss);
2291 if ((ctxt->rex_prefix & 0x8) != 0x0)
2292 usermode = X86EMUL_MODE_PROT64;
2294 usermode = X86EMUL_MODE_PROT32;
2298 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2300 case X86EMUL_MODE_PROT32:
2301 cs_sel = (u16)(msr_data + 16);
2302 if ((msr_data & 0xfffc) == 0x0)
2303 return emulate_gp(ctxt, 0);
2304 ss_sel = (u16)(msr_data + 24);
2306 case X86EMUL_MODE_PROT64:
2307 cs_sel = (u16)(msr_data + 32);
2308 if (msr_data == 0x0)
2309 return emulate_gp(ctxt, 0);
2310 ss_sel = cs_sel + 8;
2315 cs_sel |= SELECTOR_RPL_MASK;
2316 ss_sel |= SELECTOR_RPL_MASK;
2318 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2319 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2321 ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX);
2322 *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX);
2324 return X86EMUL_CONTINUE;
2327 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2330 if (ctxt->mode == X86EMUL_MODE_REAL)
2332 if (ctxt->mode == X86EMUL_MODE_VM86)
2334 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2335 return ctxt->ops->cpl(ctxt) > iopl;
2338 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2341 const struct x86_emulate_ops *ops = ctxt->ops;
2342 struct desc_struct tr_seg;
2345 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2346 unsigned mask = (1 << len) - 1;
2349 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2352 if (desc_limit_scaled(&tr_seg) < 103)
2354 base = get_desc_base(&tr_seg);
2355 #ifdef CONFIG_X86_64
2356 base |= ((u64)base3) << 32;
2358 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2359 if (r != X86EMUL_CONTINUE)
2361 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2363 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2364 if (r != X86EMUL_CONTINUE)
2366 if ((perm >> bit_idx) & mask)
2371 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2377 if (emulator_bad_iopl(ctxt))
2378 if (!emulator_io_port_access_allowed(ctxt, port, len))
2381 ctxt->perm_ok = true;
2386 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2387 struct tss_segment_16 *tss)
2389 tss->ip = ctxt->_eip;
2390 tss->flag = ctxt->eflags;
2391 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2392 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2393 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2394 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2395 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2396 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2397 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2398 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2400 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2401 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2402 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2403 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2404 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2407 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2408 struct tss_segment_16 *tss)
2412 ctxt->_eip = tss->ip;
2413 ctxt->eflags = tss->flag | 2;
2414 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2415 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2416 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2417 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2418 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2419 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2420 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2421 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2424 * SDM says that segment selectors are loaded before segment
2427 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2428 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2429 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2430 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2431 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2434 * Now load segment descriptors. If fault happens at this stage
2435 * it is handled in a context of new task
2437 ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR);
2438 if (ret != X86EMUL_CONTINUE)
2440 ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2441 if (ret != X86EMUL_CONTINUE)
2443 ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
2444 if (ret != X86EMUL_CONTINUE)
2446 ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
2447 if (ret != X86EMUL_CONTINUE)
2449 ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
2450 if (ret != X86EMUL_CONTINUE)
2453 return X86EMUL_CONTINUE;
2456 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2457 u16 tss_selector, u16 old_tss_sel,
2458 ulong old_tss_base, struct desc_struct *new_desc)
2460 const struct x86_emulate_ops *ops = ctxt->ops;
2461 struct tss_segment_16 tss_seg;
2463 u32 new_tss_base = get_desc_base(new_desc);
2465 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2467 if (ret != X86EMUL_CONTINUE)
2468 /* FIXME: need to provide precise fault address */
2471 save_state_to_tss16(ctxt, &tss_seg);
2473 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2475 if (ret != X86EMUL_CONTINUE)
2476 /* FIXME: need to provide precise fault address */
2479 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2481 if (ret != X86EMUL_CONTINUE)
2482 /* FIXME: need to provide precise fault address */
2485 if (old_tss_sel != 0xffff) {
2486 tss_seg.prev_task_link = old_tss_sel;
2488 ret = ops->write_std(ctxt, new_tss_base,
2489 &tss_seg.prev_task_link,
2490 sizeof tss_seg.prev_task_link,
2492 if (ret != X86EMUL_CONTINUE)
2493 /* FIXME: need to provide precise fault address */
2497 return load_state_from_tss16(ctxt, &tss_seg);
2500 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2501 struct tss_segment_32 *tss)
2503 tss->cr3 = ctxt->ops->get_cr(ctxt, 3);
2504 tss->eip = ctxt->_eip;
2505 tss->eflags = ctxt->eflags;
2506 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2507 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2508 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2509 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2510 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2511 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2512 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2513 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2515 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2516 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2517 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2518 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2519 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2520 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2521 tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2524 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2525 struct tss_segment_32 *tss)
2529 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2530 return emulate_gp(ctxt, 0);
2531 ctxt->_eip = tss->eip;
2532 ctxt->eflags = tss->eflags | 2;
2534 /* General purpose registers */
2535 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2536 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2537 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2538 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2539 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2540 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2541 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2542 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2545 * SDM says that segment selectors are loaded before segment
2548 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2549 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2550 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2551 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2552 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2553 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2554 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2557 * If we're switching between Protected Mode and VM86, we need to make
2558 * sure to update the mode before loading the segment descriptors so
2559 * that the selectors are interpreted correctly.
2561 * Need to get rflags to the vcpu struct immediately because it
2562 * influences the CPL which is checked at least when loading the segment
2563 * descriptors and when pushing an error code to the new kernel stack.
2565 * TODO Introduce a separate ctxt->ops->set_cpl callback
2567 if (ctxt->eflags & X86_EFLAGS_VM)
2568 ctxt->mode = X86EMUL_MODE_VM86;
2570 ctxt->mode = X86EMUL_MODE_PROT32;
2572 ctxt->ops->set_rflags(ctxt, ctxt->eflags);
2575 * Now load segment descriptors. If fault happenes at this stage
2576 * it is handled in a context of new task
2578 ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2579 if (ret != X86EMUL_CONTINUE)
2581 ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2582 if (ret != X86EMUL_CONTINUE)
2584 ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
2585 if (ret != X86EMUL_CONTINUE)
2587 ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
2588 if (ret != X86EMUL_CONTINUE)
2590 ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
2591 if (ret != X86EMUL_CONTINUE)
2593 ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS);
2594 if (ret != X86EMUL_CONTINUE)
2596 ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS);
2597 if (ret != X86EMUL_CONTINUE)
2600 return X86EMUL_CONTINUE;
2603 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2604 u16 tss_selector, u16 old_tss_sel,
2605 ulong old_tss_base, struct desc_struct *new_desc)
2607 const struct x86_emulate_ops *ops = ctxt->ops;
2608 struct tss_segment_32 tss_seg;
2610 u32 new_tss_base = get_desc_base(new_desc);
2612 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2614 if (ret != X86EMUL_CONTINUE)
2615 /* FIXME: need to provide precise fault address */
2618 save_state_to_tss32(ctxt, &tss_seg);
2620 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2622 if (ret != X86EMUL_CONTINUE)
2623 /* FIXME: need to provide precise fault address */
2626 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2628 if (ret != X86EMUL_CONTINUE)
2629 /* FIXME: need to provide precise fault address */
2632 if (old_tss_sel != 0xffff) {
2633 tss_seg.prev_task_link = old_tss_sel;
2635 ret = ops->write_std(ctxt, new_tss_base,
2636 &tss_seg.prev_task_link,
2637 sizeof tss_seg.prev_task_link,
2639 if (ret != X86EMUL_CONTINUE)
2640 /* FIXME: need to provide precise fault address */
2644 return load_state_from_tss32(ctxt, &tss_seg);
2647 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2648 u16 tss_selector, int idt_index, int reason,
2649 bool has_error_code, u32 error_code)
2651 const struct x86_emulate_ops *ops = ctxt->ops;
2652 struct desc_struct curr_tss_desc, next_tss_desc;
2654 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2655 ulong old_tss_base =
2656 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2660 /* FIXME: old_tss_base == ~0 ? */
2662 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2663 if (ret != X86EMUL_CONTINUE)
2665 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2666 if (ret != X86EMUL_CONTINUE)
2669 /* FIXME: check that next_tss_desc is tss */
2672 * Check privileges. The three cases are task switch caused by...
2674 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2675 * 2. Exception/IRQ/iret: No check is performed
2676 * 3. jmp/call to TSS: Check against DPL of the TSS
2678 if (reason == TASK_SWITCH_GATE) {
2679 if (idt_index != -1) {
2680 /* Software interrupts */
2681 struct desc_struct task_gate_desc;
2684 ret = read_interrupt_descriptor(ctxt, idt_index,
2686 if (ret != X86EMUL_CONTINUE)
2689 dpl = task_gate_desc.dpl;
2690 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2691 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2693 } else if (reason != TASK_SWITCH_IRET) {
2694 int dpl = next_tss_desc.dpl;
2695 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2696 return emulate_gp(ctxt, tss_selector);
2700 desc_limit = desc_limit_scaled(&next_tss_desc);
2701 if (!next_tss_desc.p ||
2702 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2703 desc_limit < 0x2b)) {
2704 emulate_ts(ctxt, tss_selector & 0xfffc);
2705 return X86EMUL_PROPAGATE_FAULT;
2708 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2709 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2710 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2713 if (reason == TASK_SWITCH_IRET)
2714 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2716 /* set back link to prev task only if NT bit is set in eflags
2717 note that old_tss_sel is not used after this point */
2718 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2719 old_tss_sel = 0xffff;
2721 if (next_tss_desc.type & 8)
2722 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2723 old_tss_base, &next_tss_desc);
2725 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2726 old_tss_base, &next_tss_desc);
2727 if (ret != X86EMUL_CONTINUE)
2730 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2731 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2733 if (reason != TASK_SWITCH_IRET) {
2734 next_tss_desc.type |= (1 << 1); /* set busy flag */
2735 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2738 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2739 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2741 if (has_error_code) {
2742 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2743 ctxt->lock_prefix = 0;
2744 ctxt->src.val = (unsigned long) error_code;
2745 ret = em_push(ctxt);
2751 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2752 u16 tss_selector, int idt_index, int reason,
2753 bool has_error_code, u32 error_code)
2757 invalidate_registers(ctxt);
2758 ctxt->_eip = ctxt->eip;
2759 ctxt->dst.type = OP_NONE;
2761 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2762 has_error_code, error_code);
2764 if (rc == X86EMUL_CONTINUE) {
2765 ctxt->eip = ctxt->_eip;
2766 writeback_registers(ctxt);
2769 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2772 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2775 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2777 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
2778 op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
2781 static int em_das(struct x86_emulate_ctxt *ctxt)
2784 bool af, cf, old_cf;
2786 cf = ctxt->eflags & X86_EFLAGS_CF;
2792 af = ctxt->eflags & X86_EFLAGS_AF;
2793 if ((al & 0x0f) > 9 || af) {
2795 cf = old_cf | (al >= 250);
2800 if (old_al > 0x99 || old_cf) {
2806 /* Set PF, ZF, SF */
2807 ctxt->src.type = OP_IMM;
2809 ctxt->src.bytes = 1;
2810 fastop(ctxt, em_or);
2811 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2813 ctxt->eflags |= X86_EFLAGS_CF;
2815 ctxt->eflags |= X86_EFLAGS_AF;
2816 return X86EMUL_CONTINUE;
2819 static int em_aam(struct x86_emulate_ctxt *ctxt)
2823 if (ctxt->src.val == 0)
2824 return emulate_de(ctxt);
2826 al = ctxt->dst.val & 0xff;
2827 ah = al / ctxt->src.val;
2828 al %= ctxt->src.val;
2830 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2832 /* Set PF, ZF, SF */
2833 ctxt->src.type = OP_IMM;
2835 ctxt->src.bytes = 1;
2836 fastop(ctxt, em_or);
2838 return X86EMUL_CONTINUE;
2841 static int em_aad(struct x86_emulate_ctxt *ctxt)
2843 u8 al = ctxt->dst.val & 0xff;
2844 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2846 al = (al + (ah * ctxt->src.val)) & 0xff;
2848 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2850 /* Set PF, ZF, SF */
2851 ctxt->src.type = OP_IMM;
2853 ctxt->src.bytes = 1;
2854 fastop(ctxt, em_or);
2856 return X86EMUL_CONTINUE;
2859 static int em_call(struct x86_emulate_ctxt *ctxt)
2861 long rel = ctxt->src.val;
2863 ctxt->src.val = (unsigned long)ctxt->_eip;
2865 return em_push(ctxt);
2868 static int em_call_far(struct x86_emulate_ctxt *ctxt)
2874 old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2875 old_eip = ctxt->_eip;
2877 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2878 if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
2879 return X86EMUL_CONTINUE;
2882 memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
2884 ctxt->src.val = old_cs;
2886 if (rc != X86EMUL_CONTINUE)
2889 ctxt->src.val = old_eip;
2890 return em_push(ctxt);
2893 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2897 ctxt->dst.type = OP_REG;
2898 ctxt->dst.addr.reg = &ctxt->_eip;
2899 ctxt->dst.bytes = ctxt->op_bytes;
2900 rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
2901 if (rc != X86EMUL_CONTINUE)
2903 rsp_increment(ctxt, ctxt->src.val);
2904 return X86EMUL_CONTINUE;
2907 static int em_xchg(struct x86_emulate_ctxt *ctxt)
2909 /* Write back the register source. */
2910 ctxt->src.val = ctxt->dst.val;
2911 write_register_operand(&ctxt->src);
2913 /* Write back the memory destination with implicit LOCK prefix. */
2914 ctxt->dst.val = ctxt->src.orig_val;
2915 ctxt->lock_prefix = 1;
2916 return X86EMUL_CONTINUE;
2919 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2921 ctxt->dst.val = ctxt->src2.val;
2922 return fastop(ctxt, em_imul);
2925 static int em_cwd(struct x86_emulate_ctxt *ctxt)
2927 ctxt->dst.type = OP_REG;
2928 ctxt->dst.bytes = ctxt->src.bytes;
2929 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
2930 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
2932 return X86EMUL_CONTINUE;
2935 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2939 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2940 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
2941 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
2942 return X86EMUL_CONTINUE;
2945 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
2949 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
2950 return emulate_gp(ctxt, 0);
2951 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
2952 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
2953 return X86EMUL_CONTINUE;
2956 static int em_mov(struct x86_emulate_ctxt *ctxt)
2958 memcpy(ctxt->dst.valptr, ctxt->src.valptr, ctxt->op_bytes);
2959 return X86EMUL_CONTINUE;
2962 #define FFL(x) bit(X86_FEATURE_##x)
2964 static int em_movbe(struct x86_emulate_ctxt *ctxt)
2966 u32 ebx, ecx, edx, eax = 1;
2970 * Check MOVBE is set in the guest-visible CPUID leaf.
2972 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2973 if (!(ecx & FFL(MOVBE)))
2974 return emulate_ud(ctxt);
2976 switch (ctxt->op_bytes) {
2979 * From MOVBE definition: "...When the operand size is 16 bits,
2980 * the upper word of the destination register remains unchanged
2983 * Both casting ->valptr and ->val to u16 breaks strict aliasing
2984 * rules so we have to do the operation almost per hand.
2986 tmp = (u16)ctxt->src.val;
2987 ctxt->dst.val &= ~0xffffUL;
2988 ctxt->dst.val |= (unsigned long)swab16(tmp);
2991 ctxt->dst.val = swab32((u32)ctxt->src.val);
2994 ctxt->dst.val = swab64(ctxt->src.val);
2997 return X86EMUL_PROPAGATE_FAULT;
2999 return X86EMUL_CONTINUE;
3002 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3004 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3005 return emulate_gp(ctxt, 0);
3007 /* Disable writeback. */
3008 ctxt->dst.type = OP_NONE;
3009 return X86EMUL_CONTINUE;
3012 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3016 if (ctxt->mode == X86EMUL_MODE_PROT64)
3017 val = ctxt->src.val & ~0ULL;
3019 val = ctxt->src.val & ~0U;
3021 /* #UD condition is already handled. */
3022 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3023 return emulate_gp(ctxt, 0);
3025 /* Disable writeback. */
3026 ctxt->dst.type = OP_NONE;
3027 return X86EMUL_CONTINUE;
3030 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3034 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3035 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3036 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3037 return emulate_gp(ctxt, 0);
3039 return X86EMUL_CONTINUE;
3042 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3046 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3047 return emulate_gp(ctxt, 0);
3049 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3050 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3051 return X86EMUL_CONTINUE;
3054 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3056 if (ctxt->modrm_reg > VCPU_SREG_GS)
3057 return emulate_ud(ctxt);
3059 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3060 return X86EMUL_CONTINUE;
3063 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3065 u16 sel = ctxt->src.val;
3067 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3068 return emulate_ud(ctxt);
3070 if (ctxt->modrm_reg == VCPU_SREG_SS)
3071 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3073 /* Disable writeback. */
3074 ctxt->dst.type = OP_NONE;
3075 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3078 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3080 u16 sel = ctxt->src.val;
3082 /* Disable writeback. */
3083 ctxt->dst.type = OP_NONE;
3084 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3087 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3089 u16 sel = ctxt->src.val;
3091 /* Disable writeback. */
3092 ctxt->dst.type = OP_NONE;
3093 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3096 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3101 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3102 if (rc == X86EMUL_CONTINUE)
3103 ctxt->ops->invlpg(ctxt, linear);
3104 /* Disable writeback. */
3105 ctxt->dst.type = OP_NONE;
3106 return X86EMUL_CONTINUE;
3109 static int em_clts(struct x86_emulate_ctxt *ctxt)
3113 cr0 = ctxt->ops->get_cr(ctxt, 0);
3115 ctxt->ops->set_cr(ctxt, 0, cr0);
3116 return X86EMUL_CONTINUE;
3119 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3123 if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1)
3124 return X86EMUL_UNHANDLEABLE;
3126 rc = ctxt->ops->fix_hypercall(ctxt);
3127 if (rc != X86EMUL_CONTINUE)
3130 /* Let the processor re-execute the fixed hypercall */
3131 ctxt->_eip = ctxt->eip;
3132 /* Disable writeback. */
3133 ctxt->dst.type = OP_NONE;
3134 return X86EMUL_CONTINUE;
3137 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3138 void (*get)(struct x86_emulate_ctxt *ctxt,
3139 struct desc_ptr *ptr))
3141 struct desc_ptr desc_ptr;
3143 if (ctxt->mode == X86EMUL_MODE_PROT64)
3145 get(ctxt, &desc_ptr);
3146 if (ctxt->op_bytes == 2) {
3148 desc_ptr.address &= 0x00ffffff;
3150 /* Disable writeback. */
3151 ctxt->dst.type = OP_NONE;
3152 return segmented_write(ctxt, ctxt->dst.addr.mem,
3153 &desc_ptr, 2 + ctxt->op_bytes);
3156 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3158 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3161 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3163 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3166 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3168 struct desc_ptr desc_ptr;
3171 if (ctxt->mode == X86EMUL_MODE_PROT64)
3173 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3174 &desc_ptr.size, &desc_ptr.address,
3176 if (rc != X86EMUL_CONTINUE)
3178 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3179 /* Disable writeback. */
3180 ctxt->dst.type = OP_NONE;
3181 return X86EMUL_CONTINUE;
3184 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3188 rc = ctxt->ops->fix_hypercall(ctxt);
3190 /* Disable writeback. */
3191 ctxt->dst.type = OP_NONE;
3195 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3197 struct desc_ptr desc_ptr;
3200 if (ctxt->mode == X86EMUL_MODE_PROT64)
3202 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3203 &desc_ptr.size, &desc_ptr.address,
3205 if (rc != X86EMUL_CONTINUE)
3207 ctxt->ops->set_idt(ctxt, &desc_ptr);
3208 /* Disable writeback. */
3209 ctxt->dst.type = OP_NONE;
3210 return X86EMUL_CONTINUE;
3213 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3215 ctxt->dst.bytes = 2;
3216 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3217 return X86EMUL_CONTINUE;
3220 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3222 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3223 | (ctxt->src.val & 0x0f));
3224 ctxt->dst.type = OP_NONE;
3225 return X86EMUL_CONTINUE;
3228 static int em_loop(struct x86_emulate_ctxt *ctxt)
3230 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
3231 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3232 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3233 jmp_rel(ctxt, ctxt->src.val);
3235 return X86EMUL_CONTINUE;
3238 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3240 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3241 jmp_rel(ctxt, ctxt->src.val);
3243 return X86EMUL_CONTINUE;
3246 static int em_in(struct x86_emulate_ctxt *ctxt)
3248 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3250 return X86EMUL_IO_NEEDED;
3252 return X86EMUL_CONTINUE;
3255 static int em_out(struct x86_emulate_ctxt *ctxt)
3257 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3259 /* Disable writeback. */
3260 ctxt->dst.type = OP_NONE;
3261 return X86EMUL_CONTINUE;
3264 static int em_cli(struct x86_emulate_ctxt *ctxt)
3266 if (emulator_bad_iopl(ctxt))
3267 return emulate_gp(ctxt, 0);
3269 ctxt->eflags &= ~X86_EFLAGS_IF;
3270 return X86EMUL_CONTINUE;
3273 static int em_sti(struct x86_emulate_ctxt *ctxt)
3275 if (emulator_bad_iopl(ctxt))
3276 return emulate_gp(ctxt, 0);
3278 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3279 ctxt->eflags |= X86_EFLAGS_IF;
3280 return X86EMUL_CONTINUE;
3283 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3285 u32 eax, ebx, ecx, edx;
3287 eax = reg_read(ctxt, VCPU_REGS_RAX);
3288 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3289 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3290 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3291 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3292 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3293 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3294 return X86EMUL_CONTINUE;
3297 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3301 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3302 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3304 ctxt->eflags &= ~0xffUL;
3305 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3306 return X86EMUL_CONTINUE;
3309 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3311 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3312 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3313 return X86EMUL_CONTINUE;
3316 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3318 switch (ctxt->op_bytes) {
3319 #ifdef CONFIG_X86_64
3321 asm("bswap %0" : "+r"(ctxt->dst.val));
3325 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3328 return X86EMUL_CONTINUE;
3331 static bool valid_cr(int nr)
3343 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3345 if (!valid_cr(ctxt->modrm_reg))
3346 return emulate_ud(ctxt);
3348 return X86EMUL_CONTINUE;
3351 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3353 u64 new_val = ctxt->src.val64;
3354 int cr = ctxt->modrm_reg;
3357 static u64 cr_reserved_bits[] = {
3358 0xffffffff00000000ULL,
3359 0, 0, 0, /* CR3 checked later */
3366 return emulate_ud(ctxt);
3368 if (new_val & cr_reserved_bits[cr])
3369 return emulate_gp(ctxt, 0);
3374 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3375 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3376 return emulate_gp(ctxt, 0);
3378 cr4 = ctxt->ops->get_cr(ctxt, 4);
3379 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3381 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3382 !(cr4 & X86_CR4_PAE))
3383 return emulate_gp(ctxt, 0);
3390 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3391 if (efer & EFER_LMA)
3392 rsvd = CR3_L_MODE_RESERVED_BITS;
3393 else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE)
3394 rsvd = CR3_PAE_RESERVED_BITS;
3395 else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG)
3396 rsvd = CR3_NONPAE_RESERVED_BITS;
3399 return emulate_gp(ctxt, 0);
3404 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3406 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3407 return emulate_gp(ctxt, 0);
3413 return X86EMUL_CONTINUE;
3416 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3420 ctxt->ops->get_dr(ctxt, 7, &dr7);
3422 /* Check if DR7.Global_Enable is set */
3423 return dr7 & (1 << 13);
3426 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3428 int dr = ctxt->modrm_reg;
3432 return emulate_ud(ctxt);
3434 cr4 = ctxt->ops->get_cr(ctxt, 4);
3435 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3436 return emulate_ud(ctxt);
3438 if (check_dr7_gd(ctxt))
3439 return emulate_db(ctxt);
3441 return X86EMUL_CONTINUE;
3444 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3446 u64 new_val = ctxt->src.val64;
3447 int dr = ctxt->modrm_reg;
3449 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3450 return emulate_gp(ctxt, 0);
3452 return check_dr_read(ctxt);
3455 static int check_svme(struct x86_emulate_ctxt *ctxt)
3459 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3461 if (!(efer & EFER_SVME))
3462 return emulate_ud(ctxt);
3464 return X86EMUL_CONTINUE;
3467 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3469 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3471 /* Valid physical address? */
3472 if (rax & 0xffff000000000000ULL)
3473 return emulate_gp(ctxt, 0);
3475 return check_svme(ctxt);
3478 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3480 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3482 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3483 return emulate_ud(ctxt);
3485 return X86EMUL_CONTINUE;
3488 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3490 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3491 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3493 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3495 return emulate_gp(ctxt, 0);
3497 return X86EMUL_CONTINUE;
3500 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3502 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3503 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3504 return emulate_gp(ctxt, 0);
3506 return X86EMUL_CONTINUE;
3509 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3511 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3512 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3513 return emulate_gp(ctxt, 0);
3515 return X86EMUL_CONTINUE;
3518 #define D(_y) { .flags = (_y) }
3519 #define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
3520 #define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
3521 .check_perm = (_p) }
3522 #define N D(NotImpl)
3523 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3524 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3525 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3526 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3527 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3528 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3529 #define II(_f, _e, _i) \
3530 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
3531 #define IIP(_f, _e, _i, _p) \
3532 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
3533 .check_perm = (_p) }
3534 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3536 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3537 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3538 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3539 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
3540 #define I2bvIP(_f, _e, _i, _p) \
3541 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3543 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3544 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3545 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3547 static const struct opcode group7_rm1[] = {
3548 DI(SrcNone | Priv, monitor),
3549 DI(SrcNone | Priv, mwait),
3553 static const struct opcode group7_rm3[] = {
3554 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
3555 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
3556 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3557 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3558 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3559 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3560 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3561 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
3564 static const struct opcode group7_rm7[] = {
3566 DIP(SrcNone, rdtscp, check_rdtsc),
3570 static const struct opcode group1[] = {
3572 F(Lock | PageTable, em_or),
3575 F(Lock | PageTable, em_and),
3581 static const struct opcode group1A[] = {
3582 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3585 static const struct opcode group2[] = {
3586 F(DstMem | ModRM, em_rol),
3587 F(DstMem | ModRM, em_ror),
3588 F(DstMem | ModRM, em_rcl),
3589 F(DstMem | ModRM, em_rcr),
3590 F(DstMem | ModRM, em_shl),
3591 F(DstMem | ModRM, em_shr),
3592 F(DstMem | ModRM, em_shl),
3593 F(DstMem | ModRM, em_sar),
3596 static const struct opcode group3[] = {
3597 F(DstMem | SrcImm | NoWrite, em_test),
3598 F(DstMem | SrcImm | NoWrite, em_test),
3599 F(DstMem | SrcNone | Lock, em_not),
3600 F(DstMem | SrcNone | Lock, em_neg),
3601 F(DstXacc | Src2Mem, em_mul_ex),
3602 F(DstXacc | Src2Mem, em_imul_ex),
3603 F(DstXacc | Src2Mem, em_div_ex),
3604 F(DstXacc | Src2Mem, em_idiv_ex),
3607 static const struct opcode group4[] = {
3608 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3609 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3613 static const struct opcode group5[] = {
3614 F(DstMem | SrcNone | Lock, em_inc),
3615 F(DstMem | SrcNone | Lock, em_dec),
3616 I(SrcMem | Stack, em_grp45),
3617 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
3618 I(SrcMem | Stack, em_grp45),
3619 I(SrcMemFAddr | ImplicitOps, em_grp45),
3620 I(SrcMem | Stack, em_grp45), D(Undefined),
3623 static const struct opcode group6[] = {
3626 II(Prot | Priv | SrcMem16, em_lldt, lldt),
3627 II(Prot | Priv | SrcMem16, em_ltr, ltr),
3631 static const struct group_dual group7 = { {
3632 II(Mov | DstMem | Priv, em_sgdt, sgdt),
3633 II(Mov | DstMem | Priv, em_sidt, sidt),
3634 II(SrcMem | Priv, em_lgdt, lgdt),
3635 II(SrcMem | Priv, em_lidt, lidt),
3636 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3637 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3638 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3640 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3642 N, EXT(0, group7_rm3),
3643 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3644 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3648 static const struct opcode group8[] = {
3650 F(DstMem | SrcImmByte | NoWrite, em_bt),
3651 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3652 F(DstMem | SrcImmByte | Lock, em_btr),
3653 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
3656 static const struct group_dual group9 = { {
3657 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3659 N, N, N, N, N, N, N, N,
3662 static const struct opcode group11[] = {
3663 I(DstMem | SrcImm | Mov | PageTable, em_mov),
3667 static const struct gprefix pfx_0f_6f_0f_7f = {
3668 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3671 static const struct gprefix pfx_vmovntpx = {
3672 I(0, em_mov), N, N, N,
3675 static const struct escape escape_d9 = { {
3676 N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
3679 N, N, N, N, N, N, N, N,
3681 N, N, N, N, N, N, N, N,
3683 N, N, N, N, N, N, N, N,
3685 N, N, N, N, N, N, N, N,
3687 N, N, N, N, N, N, N, N,
3689 N, N, N, N, N, N, N, N,
3691 N, N, N, N, N, N, N, N,
3693 N, N, N, N, N, N, N, N,
3696 static const struct escape escape_db = { {
3697 N, N, N, N, N, N, N, N,
3700 N, N, N, N, N, N, N, N,
3702 N, N, N, N, N, N, N, N,
3704 N, N, N, N, N, N, N, N,
3706 N, N, N, N, N, N, N, N,
3708 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3710 N, N, N, N, N, N, N, N,
3712 N, N, N, N, N, N, N, N,
3714 N, N, N, N, N, N, N, N,
3717 static const struct escape escape_dd = { {
3718 N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
3721 N, N, N, N, N, N, N, N,
3723 N, N, N, N, N, N, N, N,
3725 N, N, N, N, N, N, N, N,
3727 N, N, N, N, N, N, N, N,
3729 N, N, N, N, N, N, N, N,
3731 N, N, N, N, N, N, N, N,
3733 N, N, N, N, N, N, N, N,
3735 N, N, N, N, N, N, N, N,
3738 static const struct opcode opcode_table[256] = {
3740 F6ALU(Lock, em_add),
3741 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3742 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3744 F6ALU(Lock | PageTable, em_or),
3745 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3748 F6ALU(Lock, em_adc),
3749 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3750 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3752 F6ALU(Lock, em_sbb),
3753 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3754 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3756 F6ALU(Lock | PageTable, em_and), N, N,
3758 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3760 F6ALU(Lock, em_xor), N, N,
3762 F6ALU(NoWrite, em_cmp), N, N,
3764 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
3766 X8(I(SrcReg | Stack, em_push)),
3768 X8(I(DstReg | Stack, em_pop)),
3770 I(ImplicitOps | Stack | No64, em_pusha),
3771 I(ImplicitOps | Stack | No64, em_popa),
3772 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3775 I(SrcImm | Mov | Stack, em_push),
3776 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3777 I(SrcImmByte | Mov | Stack, em_push),
3778 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3779 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
3780 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3784 G(ByteOp | DstMem | SrcImm, group1),
3785 G(DstMem | SrcImm, group1),
3786 G(ByteOp | DstMem | SrcImm | No64, group1),
3787 G(DstMem | SrcImmByte, group1),
3788 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
3789 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3791 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3792 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3793 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3794 D(ModRM | SrcMem | NoAccess | DstReg),
3795 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3798 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3800 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3801 I(SrcImmFAddr | No64, em_call_far), N,
3802 II(ImplicitOps | Stack, em_pushf, pushf),
3803 II(ImplicitOps | Stack, em_popf, popf),
3804 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
3806 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3807 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
3808 I2bv(SrcSI | DstDI | Mov | String, em_mov),
3809 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp),
3811 F2bv(DstAcc | SrcImm | NoWrite, em_test),
3812 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3813 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3814 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp),
3816 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
3818 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
3820 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
3821 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
3822 I(ImplicitOps | Stack, em_ret),
3823 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
3824 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
3825 G(ByteOp, group11), G(0, group11),
3827 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
3828 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
3829 I(ImplicitOps | Stack, em_ret_far),
3830 D(ImplicitOps), DI(SrcImmByte, intn),
3831 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
3833 G(Src2One | ByteOp, group2), G(Src2One, group2),
3834 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
3835 I(DstAcc | SrcImmUByte | No64, em_aam),
3836 I(DstAcc | SrcImmUByte | No64, em_aad),
3837 F(DstAcc | ByteOp | No64, em_salc),
3838 I(DstAcc | SrcXLat | ByteOp, em_mov),
3840 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
3842 X3(I(SrcImmByte, em_loop)),
3843 I(SrcImmByte, em_jcxz),
3844 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
3845 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
3847 I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
3848 I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
3849 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
3850 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
3852 N, DI(ImplicitOps, icebp), N, N,
3853 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
3854 G(ByteOp, group3), G(0, group3),
3856 D(ImplicitOps), D(ImplicitOps),
3857 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
3858 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
3861 static const struct opcode twobyte_table[256] = {
3863 G(0, group6), GD(0, &group7), N, N,
3864 N, I(ImplicitOps | EmulateOnUD, em_syscall),
3865 II(ImplicitOps | Priv, em_clts, clts), N,
3866 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3867 N, D(ImplicitOps | ModRM), N, N,
3869 N, N, N, N, N, N, N, N,
3870 D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM),
3872 DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
3873 DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
3874 IIP(ModRM | SrcMem | Priv | Op3264, em_cr_write, cr_write, check_cr_write),
3875 IIP(ModRM | SrcMem | Priv | Op3264, em_dr_write, dr_write, check_dr_write),
3877 N, N, N, GP(ModRM | DstMem | SrcReg | Sse | Mov | Aligned, &pfx_vmovntpx),
3880 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
3881 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
3882 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
3883 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
3884 I(ImplicitOps | EmulateOnUD, em_sysenter),
3885 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
3887 N, N, N, N, N, N, N, N,
3889 X16(D(DstReg | SrcMem | ModRM | Mov)),
3891 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3896 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
3901 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
3905 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
3907 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
3908 II(ImplicitOps, em_cpuid, cpuid),
3909 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
3910 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
3911 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
3913 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
3914 DI(ImplicitOps, rsm),
3915 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
3916 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
3917 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
3918 D(ModRM), F(DstReg | SrcMem | ModRM, em_imul),
3920 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
3921 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
3922 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
3923 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
3924 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
3925 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3929 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
3930 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
3931 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3933 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
3934 N, D(DstMem | SrcReg | ModRM | Mov),
3935 N, N, N, GD(0, &group9),
3937 X8(I(DstReg, em_bswap)),
3939 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3941 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3943 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
3946 static const struct gprefix three_byte_0f_38_f0 = {
3947 I(DstReg | SrcMem | Mov, em_movbe), N, N, N
3950 static const struct gprefix three_byte_0f_38_f1 = {
3951 I(DstMem | SrcReg | Mov, em_movbe), N, N, N
3955 * Insns below are selected by the prefix which indexed by the third opcode
3958 static const struct opcode opcode_map_0f_38[256] = {
3960 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
3962 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
3964 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
3965 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
3984 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
3988 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3994 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
3995 unsigned size, bool sign_extension)
3997 int rc = X86EMUL_CONTINUE;
4001 op->addr.mem.ea = ctxt->_eip;
4002 /* NB. Immediates are sign-extended as necessary. */
4003 switch (op->bytes) {
4005 op->val = insn_fetch(s8, ctxt);
4008 op->val = insn_fetch(s16, ctxt);
4011 op->val = insn_fetch(s32, ctxt);
4014 op->val = insn_fetch(s64, ctxt);
4017 if (!sign_extension) {
4018 switch (op->bytes) {
4026 op->val &= 0xffffffff;
4034 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4037 int rc = X86EMUL_CONTINUE;
4041 decode_register_operand(ctxt, op);
4044 rc = decode_imm(ctxt, op, 1, false);
4047 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4051 if ((ctxt->d & BitOp) && op == &ctxt->dst)
4052 fetch_bit_operand(ctxt);
4053 op->orig_val = op->val;
4056 ctxt->memop.bytes = 8;
4060 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4061 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4062 fetch_register_operand(op);
4063 op->orig_val = op->val;
4067 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4068 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4069 fetch_register_operand(op);
4070 op->orig_val = op->val;
4073 if (ctxt->d & ByteOp) {
4078 op->bytes = ctxt->op_bytes;
4079 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4080 fetch_register_operand(op);
4081 op->orig_val = op->val;
4085 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4087 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
4088 op->addr.mem.seg = VCPU_SREG_ES;
4095 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4096 fetch_register_operand(op);
4100 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4103 rc = decode_imm(ctxt, op, 1, true);
4110 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4113 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4116 ctxt->memop.bytes = 1;
4117 if (ctxt->memop.type == OP_REG) {
4118 ctxt->memop.addr.reg = decode_register(ctxt,
4119 ctxt->modrm_rm, true);
4120 fetch_register_operand(&ctxt->memop);
4124 ctxt->memop.bytes = 2;
4127 ctxt->memop.bytes = 4;
4130 rc = decode_imm(ctxt, op, 2, false);
4133 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4137 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4139 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
4140 op->addr.mem.seg = seg_override(ctxt);
4146 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4148 register_address(ctxt,
4149 reg_read(ctxt, VCPU_REGS_RBX) +
4150 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4151 op->addr.mem.seg = seg_override(ctxt);
4156 op->addr.mem.ea = ctxt->_eip;
4157 op->bytes = ctxt->op_bytes + 2;
4158 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4161 ctxt->memop.bytes = ctxt->op_bytes + 2;
4164 op->val = VCPU_SREG_ES;
4167 op->val = VCPU_SREG_CS;
4170 op->val = VCPU_SREG_SS;
4173 op->val = VCPU_SREG_DS;
4176 op->val = VCPU_SREG_FS;
4179 op->val = VCPU_SREG_GS;
4182 /* Special instructions do their own operand decoding. */
4184 op->type = OP_NONE; /* Disable writeback. */
4192 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4194 int rc = X86EMUL_CONTINUE;
4195 int mode = ctxt->mode;
4196 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4197 bool op_prefix = false;
4198 struct opcode opcode;
4200 ctxt->memop.type = OP_NONE;
4201 ctxt->memopp = NULL;
4202 ctxt->_eip = ctxt->eip;
4203 ctxt->fetch.start = ctxt->_eip;
4204 ctxt->fetch.end = ctxt->fetch.start + insn_len;
4205 ctxt->opcode_len = 1;
4207 memcpy(ctxt->fetch.data, insn, insn_len);
4210 case X86EMUL_MODE_REAL:
4211 case X86EMUL_MODE_VM86:
4212 case X86EMUL_MODE_PROT16:
4213 def_op_bytes = def_ad_bytes = 2;
4215 case X86EMUL_MODE_PROT32:
4216 def_op_bytes = def_ad_bytes = 4;
4218 #ifdef CONFIG_X86_64
4219 case X86EMUL_MODE_PROT64:
4225 return EMULATION_FAILED;
4228 ctxt->op_bytes = def_op_bytes;
4229 ctxt->ad_bytes = def_ad_bytes;
4231 /* Legacy prefixes. */
4233 switch (ctxt->b = insn_fetch(u8, ctxt)) {
4234 case 0x66: /* operand-size override */
4236 /* switch between 2/4 bytes */
4237 ctxt->op_bytes = def_op_bytes ^ 6;
4239 case 0x67: /* address-size override */
4240 if (mode == X86EMUL_MODE_PROT64)
4241 /* switch between 4/8 bytes */
4242 ctxt->ad_bytes = def_ad_bytes ^ 12;
4244 /* switch between 2/4 bytes */
4245 ctxt->ad_bytes = def_ad_bytes ^ 6;
4247 case 0x26: /* ES override */
4248 case 0x2e: /* CS override */
4249 case 0x36: /* SS override */
4250 case 0x3e: /* DS override */
4251 set_seg_override(ctxt, (ctxt->b >> 3) & 3);
4253 case 0x64: /* FS override */
4254 case 0x65: /* GS override */
4255 set_seg_override(ctxt, ctxt->b & 7);
4257 case 0x40 ... 0x4f: /* REX */
4258 if (mode != X86EMUL_MODE_PROT64)
4260 ctxt->rex_prefix = ctxt->b;
4262 case 0xf0: /* LOCK */
4263 ctxt->lock_prefix = 1;
4265 case 0xf2: /* REPNE/REPNZ */
4266 case 0xf3: /* REP/REPE/REPZ */
4267 ctxt->rep_prefix = ctxt->b;
4273 /* Any legacy prefix after a REX prefix nullifies its effect. */
4275 ctxt->rex_prefix = 0;
4281 if (ctxt->rex_prefix & 8)
4282 ctxt->op_bytes = 8; /* REX.W */
4284 /* Opcode byte(s). */
4285 opcode = opcode_table[ctxt->b];
4286 /* Two-byte opcode? */
4287 if (ctxt->b == 0x0f) {
4288 ctxt->opcode_len = 2;
4289 ctxt->b = insn_fetch(u8, ctxt);
4290 opcode = twobyte_table[ctxt->b];
4292 /* 0F_38 opcode map */
4293 if (ctxt->b == 0x38) {
4294 ctxt->opcode_len = 3;
4295 ctxt->b = insn_fetch(u8, ctxt);
4296 opcode = opcode_map_0f_38[ctxt->b];
4299 ctxt->d = opcode.flags;
4301 if (ctxt->d & ModRM)
4302 ctxt->modrm = insn_fetch(u8, ctxt);
4304 while (ctxt->d & GroupMask) {
4305 switch (ctxt->d & GroupMask) {
4307 goffset = (ctxt->modrm >> 3) & 7;
4308 opcode = opcode.u.group[goffset];
4311 goffset = (ctxt->modrm >> 3) & 7;
4312 if ((ctxt->modrm >> 6) == 3)
4313 opcode = opcode.u.gdual->mod3[goffset];
4315 opcode = opcode.u.gdual->mod012[goffset];
4318 goffset = ctxt->modrm & 7;
4319 opcode = opcode.u.group[goffset];
4322 if (ctxt->rep_prefix && op_prefix)
4323 return EMULATION_FAILED;
4324 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4325 switch (simd_prefix) {
4326 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4327 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4328 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4329 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4333 if (ctxt->modrm > 0xbf)
4334 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4336 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4339 return EMULATION_FAILED;
4342 ctxt->d &= ~(u64)GroupMask;
4343 ctxt->d |= opcode.flags;
4346 ctxt->execute = opcode.u.execute;
4347 ctxt->check_perm = opcode.check_perm;
4348 ctxt->intercept = opcode.intercept;
4351 if (ctxt->d == 0 || (ctxt->d & NotImpl))
4352 return EMULATION_FAILED;
4354 if (!(ctxt->d & EmulateOnUD) && ctxt->ud)
4355 return EMULATION_FAILED;
4357 if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
4360 if (ctxt->d & Op3264) {
4361 if (mode == X86EMUL_MODE_PROT64)
4368 ctxt->op_bytes = 16;
4369 else if (ctxt->d & Mmx)
4372 /* ModRM and SIB bytes. */
4373 if (ctxt->d & ModRM) {
4374 rc = decode_modrm(ctxt, &ctxt->memop);
4375 if (!ctxt->has_seg_override)
4376 set_seg_override(ctxt, ctxt->modrm_seg);
4377 } else if (ctxt->d & MemAbs)
4378 rc = decode_abs(ctxt, &ctxt->memop);
4379 if (rc != X86EMUL_CONTINUE)
4382 if (!ctxt->has_seg_override)
4383 set_seg_override(ctxt, VCPU_SREG_DS);
4385 ctxt->memop.addr.mem.seg = seg_override(ctxt);
4387 if (ctxt->memop.type == OP_MEM && ctxt->ad_bytes != 8)
4388 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
4391 * Decode and fetch the source operand: register, memory
4394 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4395 if (rc != X86EMUL_CONTINUE)
4399 * Decode and fetch the second source operand: register, memory
4402 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4403 if (rc != X86EMUL_CONTINUE)
4406 /* Decode and fetch the destination operand: register or memory. */
4407 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4410 if (ctxt->memopp && ctxt->memopp->type == OP_MEM && ctxt->rip_relative)
4411 ctxt->memopp->addr.mem.ea += ctxt->_eip;
4413 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4416 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4418 return ctxt->d & PageTable;
4421 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4423 /* The second termination condition only applies for REPE
4424 * and REPNE. Test if the repeat string operation prefix is
4425 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4426 * corresponding termination condition according to:
4427 * - if REPE/REPZ and ZF = 0 then done
4428 * - if REPNE/REPNZ and ZF = 1 then done
4430 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4431 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4432 && (((ctxt->rep_prefix == REPE_PREFIX) &&
4433 ((ctxt->eflags & EFLG_ZF) == 0))
4434 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
4435 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4441 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4445 ctxt->ops->get_fpu(ctxt);
4446 asm volatile("1: fwait \n\t"
4448 ".pushsection .fixup,\"ax\" \n\t"
4450 "movb $1, %[fault] \n\t"
4453 _ASM_EXTABLE(1b, 3b)
4454 : [fault]"+qm"(fault));
4455 ctxt->ops->put_fpu(ctxt);
4457 if (unlikely(fault))
4458 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4460 return X86EMUL_CONTINUE;
4463 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4466 if (op->type == OP_MM)
4467 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4470 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4472 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4473 if (!(ctxt->d & ByteOp))
4474 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4475 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4476 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4478 : "c"(ctxt->src2.val));
4479 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4480 if (!fop) /* exception is returned in fop variable */
4481 return emulate_de(ctxt);
4482 return X86EMUL_CONTINUE;
4485 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4487 const struct x86_emulate_ops *ops = ctxt->ops;
4488 int rc = X86EMUL_CONTINUE;
4489 int saved_dst_type = ctxt->dst.type;
4491 ctxt->mem_read.pos = 0;
4493 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4494 (ctxt->d & Undefined)) {
4495 rc = emulate_ud(ctxt);
4499 /* LOCK prefix is allowed only with some instructions */
4500 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4501 rc = emulate_ud(ctxt);
4505 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4506 rc = emulate_ud(ctxt);
4510 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4511 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4512 rc = emulate_ud(ctxt);
4516 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4517 rc = emulate_nm(ctxt);
4521 if (ctxt->d & Mmx) {
4522 rc = flush_pending_x87_faults(ctxt);
4523 if (rc != X86EMUL_CONTINUE)
4526 * Now that we know the fpu is exception safe, we can fetch
4529 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4530 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4531 if (!(ctxt->d & Mov))
4532 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4535 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
4536 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4537 X86_ICPT_PRE_EXCEPT);
4538 if (rc != X86EMUL_CONTINUE)
4542 /* Privileged instruction can be executed only in CPL=0 */
4543 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4544 rc = emulate_gp(ctxt, 0);
4548 /* Instruction can only be executed in protected mode */
4549 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4550 rc = emulate_ud(ctxt);
4554 /* Do instruction specific permission checks */
4555 if (ctxt->check_perm) {
4556 rc = ctxt->check_perm(ctxt);
4557 if (rc != X86EMUL_CONTINUE)
4561 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
4562 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4563 X86_ICPT_POST_EXCEPT);
4564 if (rc != X86EMUL_CONTINUE)
4568 if (ctxt->rep_prefix && (ctxt->d & String)) {
4569 /* All REP prefixes have the same first termination condition */
4570 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4571 ctxt->eip = ctxt->_eip;
4576 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4577 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4578 ctxt->src.valptr, ctxt->src.bytes);
4579 if (rc != X86EMUL_CONTINUE)
4581 ctxt->src.orig_val64 = ctxt->src.val64;
4584 if (ctxt->src2.type == OP_MEM) {
4585 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4586 &ctxt->src2.val, ctxt->src2.bytes);
4587 if (rc != X86EMUL_CONTINUE)
4591 if ((ctxt->d & DstMask) == ImplicitOps)
4595 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4596 /* optimisation - avoid slow emulated read if Mov */
4597 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4598 &ctxt->dst.val, ctxt->dst.bytes);
4599 if (rc != X86EMUL_CONTINUE)
4602 ctxt->dst.orig_val = ctxt->dst.val;
4606 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
4607 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4608 X86_ICPT_POST_MEMACCESS);
4609 if (rc != X86EMUL_CONTINUE)
4613 if (ctxt->execute) {
4614 if (ctxt->d & Fastop) {
4615 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4616 rc = fastop(ctxt, fop);
4617 if (rc != X86EMUL_CONTINUE)
4621 rc = ctxt->execute(ctxt);
4622 if (rc != X86EMUL_CONTINUE)
4627 if (ctxt->opcode_len == 2)
4629 else if (ctxt->opcode_len == 3)
4630 goto threebyte_insn;
4633 case 0x63: /* movsxd */
4634 if (ctxt->mode != X86EMUL_MODE_PROT64)
4635 goto cannot_emulate;
4636 ctxt->dst.val = (s32) ctxt->src.val;
4638 case 0x70 ... 0x7f: /* jcc (short) */
4639 if (test_cc(ctxt->b, ctxt->eflags))
4640 jmp_rel(ctxt, ctxt->src.val);
4642 case 0x8d: /* lea r16/r32, m */
4643 ctxt->dst.val = ctxt->src.addr.mem.ea;
4645 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4646 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4650 case 0x98: /* cbw/cwde/cdqe */
4651 switch (ctxt->op_bytes) {
4652 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4653 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4654 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4657 case 0xcc: /* int3 */
4658 rc = emulate_int(ctxt, 3);
4660 case 0xcd: /* int n */
4661 rc = emulate_int(ctxt, ctxt->src.val);
4663 case 0xce: /* into */
4664 if (ctxt->eflags & EFLG_OF)
4665 rc = emulate_int(ctxt, 4);
4667 case 0xe9: /* jmp rel */
4668 case 0xeb: /* jmp rel short */
4669 jmp_rel(ctxt, ctxt->src.val);
4670 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4672 case 0xf4: /* hlt */
4673 ctxt->ops->halt(ctxt);
4675 case 0xf5: /* cmc */
4676 /* complement carry flag from eflags reg */
4677 ctxt->eflags ^= EFLG_CF;
4679 case 0xf8: /* clc */
4680 ctxt->eflags &= ~EFLG_CF;
4682 case 0xf9: /* stc */
4683 ctxt->eflags |= EFLG_CF;
4685 case 0xfc: /* cld */
4686 ctxt->eflags &= ~EFLG_DF;
4688 case 0xfd: /* std */
4689 ctxt->eflags |= EFLG_DF;
4692 goto cannot_emulate;
4695 if (rc != X86EMUL_CONTINUE)
4699 if (!(ctxt->d & NoWrite)) {
4700 rc = writeback(ctxt, &ctxt->dst);
4701 if (rc != X86EMUL_CONTINUE)
4704 if (ctxt->d & SrcWrite) {
4705 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4706 rc = writeback(ctxt, &ctxt->src);
4707 if (rc != X86EMUL_CONTINUE)
4712 * restore dst type in case the decoding will be reused
4713 * (happens for string instruction )
4715 ctxt->dst.type = saved_dst_type;
4717 if ((ctxt->d & SrcMask) == SrcSI)
4718 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
4720 if ((ctxt->d & DstMask) == DstDI)
4721 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
4723 if (ctxt->rep_prefix && (ctxt->d & String)) {
4725 struct read_cache *r = &ctxt->io_read;
4726 if ((ctxt->d & SrcMask) == SrcSI)
4727 count = ctxt->src.count;
4729 count = ctxt->dst.count;
4730 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
4733 if (!string_insn_completed(ctxt)) {
4735 * Re-enter guest when pio read ahead buffer is empty
4736 * or, if it is not used, after each 1024 iteration.
4738 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
4739 (r->end == 0 || r->end != r->pos)) {
4741 * Reset read cache. Usually happens before
4742 * decode, but since instruction is restarted
4743 * we have to do it here.
4745 ctxt->mem_read.end = 0;
4746 writeback_registers(ctxt);
4747 return EMULATION_RESTART;
4749 goto done; /* skip rip writeback */
4753 ctxt->eip = ctxt->_eip;
4756 if (rc == X86EMUL_PROPAGATE_FAULT)
4757 ctxt->have_exception = true;
4758 if (rc == X86EMUL_INTERCEPTED)
4759 return EMULATION_INTERCEPTED;
4761 if (rc == X86EMUL_CONTINUE)
4762 writeback_registers(ctxt);
4764 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
4768 case 0x09: /* wbinvd */
4769 (ctxt->ops->wbinvd)(ctxt);
4771 case 0x08: /* invd */
4772 case 0x0d: /* GrpP (prefetch) */
4773 case 0x18: /* Grp16 (prefetch/nop) */
4774 case 0x1f: /* nop */
4776 case 0x20: /* mov cr, reg */
4777 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
4779 case 0x21: /* mov from dr to reg */
4780 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
4782 case 0x40 ... 0x4f: /* cmov */
4783 ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val;
4784 if (!test_cc(ctxt->b, ctxt->eflags))
4785 ctxt->dst.type = OP_NONE; /* no writeback */
4787 case 0x80 ... 0x8f: /* jnz rel, etc*/
4788 if (test_cc(ctxt->b, ctxt->eflags))
4789 jmp_rel(ctxt, ctxt->src.val);
4791 case 0x90 ... 0x9f: /* setcc r/m8 */
4792 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
4794 case 0xae: /* clflush */
4796 case 0xb6 ... 0xb7: /* movzx */
4797 ctxt->dst.bytes = ctxt->op_bytes;
4798 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
4799 : (u16) ctxt->src.val;
4801 case 0xbe ... 0xbf: /* movsx */
4802 ctxt->dst.bytes = ctxt->op_bytes;
4803 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
4804 (s16) ctxt->src.val;
4806 case 0xc3: /* movnti */
4807 ctxt->dst.bytes = ctxt->op_bytes;
4808 ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val :
4809 (u64) ctxt->src.val;
4812 goto cannot_emulate;
4817 if (rc != X86EMUL_CONTINUE)
4823 return EMULATION_FAILED;
4826 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
4828 invalidate_registers(ctxt);
4831 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
4833 writeback_registers(ctxt);