Fixes compilation with CONFIG_VMI enabled.
Signed-off-by: Avi Kivity <avi@qumranet.com>
sync_lapic_to_cr8(vcpu);
save_host_msrs(vcpu);
- fs_selector = read_fs();
- gs_selector = read_gs();
- ldt_selector = read_ldt();
+ fs_selector = kvm_read_fs();
+ gs_selector = kvm_read_gs();
+ ldt_selector = kvm_read_ldt();
svm->host_cr2 = kvm_read_cr2();
svm->host_dr6 = read_dr6();
svm->host_dr7 = read_dr7();
write_dr7(svm->host_dr7);
kvm_write_cr2(svm->host_cr2);
- load_fs(fs_selector);
- load_gs(gs_selector);
- load_ldt(ldt_selector);
+ kvm_load_fs(fs_selector);
+ kvm_load_gs(gs_selector);
+ kvm_load_ldt(ldt_selector);
load_host_msrs(vcpu);
reload_tss(vcpu);
struct descriptor_table gdt;
struct desc_struct *descs;
- get_gdt(&gdt);
+ kvm_get_gdt(&gdt);
descs = (void *)gdt.base;
descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
load_TR_desc();
* Set host fs and gs selectors. Unfortunately, 22.2.3 does not
* allow segment selectors with cpl > 0 or ti == 1.
*/
- vmx->host_state.ldt_sel = read_ldt();
+ vmx->host_state.ldt_sel = kvm_read_ldt();
vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
- vmx->host_state.fs_sel = read_fs();
+ vmx->host_state.fs_sel = kvm_read_fs();
if (!(vmx->host_state.fs_sel & 7)) {
vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
vmx->host_state.fs_reload_needed = 0;
vmcs_write16(HOST_FS_SELECTOR, 0);
vmx->host_state.fs_reload_needed = 1;
}
- vmx->host_state.gs_sel = read_gs();
+ vmx->host_state.gs_sel = kvm_read_gs();
if (!(vmx->host_state.gs_sel & 7))
vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
else {
++vmx->vcpu.stat.host_state_reload;
vmx->host_state.loaded = 0;
if (vmx->host_state.fs_reload_needed)
- load_fs(vmx->host_state.fs_sel);
+ kvm_load_fs(vmx->host_state.fs_sel);
if (vmx->host_state.gs_ldt_reload_needed) {
- load_ldt(vmx->host_state.ldt_sel);
+ kvm_load_ldt(vmx->host_state.ldt_sel);
/*
* If we have to reload gs, we must take care to
* preserve our gs base.
*/
local_irq_save(flags);
- load_gs(vmx->host_state.gs_sel);
+ kvm_load_gs(vmx->host_state.gs_sel);
#ifdef CONFIG_X86_64
wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
#endif
* Linux uses per-cpu TSS and GDT, so set these when switching
* processors.
*/
- vmcs_writel(HOST_TR_BASE, read_tr_base()); /* 22.2.4 */
- get_gdt(&dt);
+ vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
+ kvm_get_gdt(&dt);
vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
- vmcs_write16(HOST_FS_SELECTOR, read_fs()); /* 22.2.4 */
- vmcs_write16(HOST_GS_SELECTOR, read_gs()); /* 22.2.4 */
+ vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs()); /* 22.2.4 */
+ vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs()); /* 22.2.4 */
vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
#ifdef CONFIG_X86_64
rdmsrl(MSR_FS_BASE, a);
vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
- get_idt(&dt);
+ kvm_get_idt(&dt);
vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
* allocate ram with GFP_KERNEL.
*/
if (!used_math())
- fx_save(&vcpu->arch.host_fx_image);
+ kvm_fx_save(&vcpu->arch.host_fx_image);
/* Initialize guest FPU by resetting ours and saving into guest's */
preempt_disable();
- fx_save(&vcpu->arch.host_fx_image);
- fx_finit();
- fx_save(&vcpu->arch.guest_fx_image);
- fx_restore(&vcpu->arch.host_fx_image);
+ kvm_fx_save(&vcpu->arch.host_fx_image);
+ kvm_fx_finit();
+ kvm_fx_save(&vcpu->arch.guest_fx_image);
+ kvm_fx_restore(&vcpu->arch.host_fx_image);
preempt_enable();
vcpu->arch.cr0 |= X86_CR0_ET;
return;
vcpu->guest_fpu_loaded = 1;
- fx_save(&vcpu->arch.host_fx_image);
- fx_restore(&vcpu->arch.guest_fx_image);
+ kvm_fx_save(&vcpu->arch.host_fx_image);
+ kvm_fx_restore(&vcpu->arch.guest_fx_image);
}
EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
return;
vcpu->guest_fpu_loaded = 0;
- fx_save(&vcpu->arch.guest_fx_image);
- fx_restore(&vcpu->arch.host_fx_image);
+ kvm_fx_save(&vcpu->arch.guest_fx_image);
+ kvm_fx_restore(&vcpu->arch.host_fx_image);
++vcpu->stat.fpu_reload;
}
EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
return (struct kvm_mmu_page *)page_private(page);
}
-static inline u16 read_fs(void)
+static inline u16 kvm_read_fs(void)
{
u16 seg;
asm("mov %%fs, %0" : "=g"(seg));
return seg;
}
-static inline u16 read_gs(void)
+static inline u16 kvm_read_gs(void)
{
u16 seg;
asm("mov %%gs, %0" : "=g"(seg));
return seg;
}
-static inline u16 read_ldt(void)
+static inline u16 kvm_read_ldt(void)
{
u16 ldt;
asm("sldt %0" : "=g"(ldt));
return ldt;
}
-static inline void load_fs(u16 sel)
+static inline void kvm_load_fs(u16 sel)
{
asm("mov %0, %%fs" : : "rm"(sel));
}
-static inline void load_gs(u16 sel)
+static inline void kvm_load_gs(u16 sel)
{
asm("mov %0, %%gs" : : "rm"(sel));
}
-#ifndef load_ldt
-static inline void load_ldt(u16 sel)
+static inline void kvm_load_ldt(u16 sel)
{
asm("lldt %0" : : "rm"(sel));
}
-#endif
-static inline void get_idt(struct descriptor_table *table)
+static inline void kvm_get_idt(struct descriptor_table *table)
{
asm("sidt %0" : "=m"(*table));
}
-static inline void get_gdt(struct descriptor_table *table)
+static inline void kvm_get_gdt(struct descriptor_table *table)
{
asm("sgdt %0" : "=m"(*table));
}
-static inline unsigned long read_tr_base(void)
+static inline unsigned long kvm_read_tr_base(void)
{
u16 tr;
asm("str %0" : "=g"(tr));
}
#endif
-static inline void fx_save(struct i387_fxsave_struct *image)
+static inline void kvm_fx_save(struct i387_fxsave_struct *image)
{
asm("fxsave (%0)":: "r" (image));
}
-static inline void fx_restore(struct i387_fxsave_struct *image)
+static inline void kvm_fx_restore(struct i387_fxsave_struct *image)
{
asm("fxrstor (%0)":: "r" (image));
}
-static inline void fx_finit(void)
+static inline void kvm_fx_finit(void)
{
asm("finit");
}