From: Borislav Petkov Date: Tue, 29 Oct 2013 11:54:56 +0000 (+0100) Subject: kvm: Emulate MOVBE X-Git-Tag: accepted/tizen/common/20141203.182822~1116^2~28 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=84cffe499b9418d6c3b4de2ad9599cc2ec50c607;p=platform%2Fkernel%2Flinux-arm64.git kvm: Emulate MOVBE This basically came from the need to be able to boot 32-bit Atom SMP guests on an AMD host, i.e. a host which doesn't support MOVBE. As a matter of fact, qemu has since recently received MOVBE support but we cannot share that with kvm emulation and thus we have to do this in the host. We're waay faster in kvm anyway. :-) So, we piggyback on the #UD path and emulate the MOVBE functionality. With it, an 8-core SMP guest boots in under 6 seconds. Also, requesting MOVBE emulation needs to happen explicitly to work, i.e. qemu -cpu n270,+movbe... Just FYI, a fairly straight-forward boot of a MOVBE-enabled 3.9-rc6+ kernel in kvm executes MOVBE ~60K times. Signed-off-by: Andre Przywara Signed-off-by: Borislav Petkov Signed-off-by: Paolo Bonzini --- diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 78a4439..86d5756 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -222,6 +222,22 @@ static bool supported_xcr0_bit(unsigned bit) static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry, u32 func, u32 index, int *nent, int maxnent) { + switch (func) { + case 0: + entry->eax = 1; /* only one leaf currently */ + ++*nent; + break; + case 1: + entry->ecx = F(MOVBE); + ++*nent; + break; + default: + break; + } + + entry->function = func; + entry->index = index; + return 0; } @@ -593,7 +609,7 @@ int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, return -EINVAL; r = -ENOMEM; - cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent); + cpuid_entries = vzalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent); if (!cpuid_entries) goto out; diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 6c5cfe9..8e2a07b 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2961,6 +2961,46 @@ static int em_mov(struct x86_emulate_ctxt *ctxt) return X86EMUL_CONTINUE; } +#define FFL(x) bit(X86_FEATURE_##x) + +static int em_movbe(struct x86_emulate_ctxt *ctxt) +{ + u32 ebx, ecx, edx, eax = 1; + u16 tmp; + + /* + * Check MOVBE is set in the guest-visible CPUID leaf. + */ + ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); + if (!(ecx & FFL(MOVBE))) + return emulate_ud(ctxt); + + switch (ctxt->op_bytes) { + case 2: + /* + * From MOVBE definition: "...When the operand size is 16 bits, + * the upper word of the destination register remains unchanged + * ..." + * + * Both casting ->valptr and ->val to u16 breaks strict aliasing + * rules so we have to do the operation almost per hand. + */ + tmp = (u16)ctxt->src.val; + ctxt->dst.val &= ~0xffffUL; + ctxt->dst.val |= (unsigned long)swab16(tmp); + break; + case 4: + ctxt->dst.val = swab32((u32)ctxt->src.val); + break; + case 8: + ctxt->dst.val = swab64(ctxt->src.val); + break; + default: + return X86EMUL_PROPAGATE_FAULT; + } + return X86EMUL_CONTINUE; +} + static int em_cr_write(struct x86_emulate_ctxt *ctxt) { if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) @@ -3893,11 +3933,11 @@ static const struct opcode twobyte_table[256] = { }; static const struct gprefix three_byte_0f_38_f0 = { - N, N, N, N + I(DstReg | SrcMem | Mov, em_movbe), N, N, N }; static const struct gprefix three_byte_0f_38_f1 = { - N, N, N, N + I(DstMem | SrcReg | Mov, em_movbe), N, N, N }; /* @@ -3907,8 +3947,13 @@ static const struct gprefix three_byte_0f_38_f1 = { static const struct opcode opcode_map_0f_38[256] = { /* 0x00 - 0x7f */ X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), - /* 0x80 - 0xff */ - X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N) + /* 0x80 - 0xef */ + X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), + /* 0xf0 - 0xf1 */ + GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0), + GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1), + /* 0xf2 - 0xff */ + N, N, X4(N), X8(N) }; #undef D