== (PT_WRITABLE_MASK | PT_PRESENT_MASK);
}
-static void mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
- size_t objsize, int min)
+static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
+ size_t objsize, int min)
{
void *obj;
if (cache->nobjs >= min)
- return;
+ return 0;
while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
obj = kzalloc(objsize, GFP_NOWAIT);
if (!obj)
- BUG();
+ return -ENOMEM;
cache->objects[cache->nobjs++] = obj;
}
+ return 0;
}
static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
kfree(mc->objects[--mc->nobjs]);
}
-static void mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
+static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
{
- mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
- sizeof(struct kvm_pte_chain), 4);
- mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
- sizeof(struct kvm_rmap_desc), 1);
+ int r;
+
+ r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
+ sizeof(struct kvm_pte_chain), 4);
+ if (r)
+ goto out;
+ r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
+ sizeof(struct kvm_rmap_desc), 1);
+out:
+ return r;
}
static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
gpa_t addr = gva;
hpa_t paddr;
+ int r;
- mmu_topup_memory_caches(vcpu);
+ r = mmu_topup_memory_caches(vcpu);
+ if (r)
+ return r;
ASSERT(vcpu);
ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
r = init_kvm_mmu(vcpu);
if (r < 0)
goto out;
- mmu_topup_memory_caches(vcpu);
+ r = mmu_topup_memory_caches(vcpu);
out:
return r;
}
* - normal guest page fault due to the guest pte marked not present, not
* writable, or not executable
*
- * Returns: 1 if we need to emulate the instruction, 0 otherwise
+ * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
+ * a negative value on error.
*/
static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
u32 error_code)
u64 *shadow_pte;
int fixed;
int write_pt = 0;
+ int r;
pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
- mmu_topup_memory_caches(vcpu);
+ r = mmu_topup_memory_caches(vcpu);
+ if (r)
+ return r;
/*
* Look up the shadow pte for the faulting address.
u64 fault_address;
u32 error_code;
enum emulation_result er;
+ int r;
if (is_external_interrupt(exit_int_info))
push_irq(vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
fault_address = vcpu->svm->vmcb->control.exit_info_2;
error_code = vcpu->svm->vmcb->control.exit_info_1;
- if (!kvm_mmu_page_fault(vcpu, fault_address, error_code)) {
+ r = kvm_mmu_page_fault(vcpu, fault_address, error_code);
+ if (r < 0) {
+ spin_unlock(&vcpu->kvm->lock);
+ return r;
+ }
+ if (!r) {
spin_unlock(&vcpu->kvm->lock);
return 1;
}
u16 fs_selector;
u16 gs_selector;
u16 ldt_selector;
+ int r;
again:
do_interrupt_requests(vcpu, kvm_run);
return 0;
}
- if (handle_exit(vcpu, kvm_run)) {
+ r = handle_exit(vcpu, kvm_run);
+ if (r > 0) {
if (signal_pending(current)) {
++kvm_stat.signal_exits;
post_kvm_run_save(vcpu, kvm_run);
goto again;
}
post_kvm_run_save(vcpu, kvm_run);
- return 0;
+ return r;
}
static void svm_flush_tlb(struct kvm_vcpu *vcpu)
unsigned long cr2, rip;
u32 vect_info;
enum emulation_result er;
+ int r;
vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
cr2 = vmcs_readl(EXIT_QUALIFICATION);
spin_lock(&vcpu->kvm->lock);
- if (!kvm_mmu_page_fault(vcpu, cr2, error_code)) {
+ r = kvm_mmu_page_fault(vcpu, cr2, error_code);
+ if (r < 0) {
+ spin_unlock(&vcpu->kvm->lock);
+ return r;
+ }
+ if (!r) {
spin_unlock(&vcpu->kvm->lock);
return 1;
}
u8 fail;
u16 fs_sel, gs_sel, ldt_sel;
int fs_gs_ldt_reload_needed;
+ int r;
again:
/*
if (fail) {
kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY;
kvm_run->exit_reason = vmcs_read32(VM_INSTRUCTION_ERROR);
+ r = 0;
} else {
if (fs_gs_ldt_reload_needed) {
load_ldt(ldt_sel);
}
vcpu->launched = 1;
kvm_run->exit_type = KVM_EXIT_TYPE_VM_EXIT;
- if (kvm_handle_exit(kvm_run, vcpu)) {
+ r = kvm_handle_exit(kvm_run, vcpu);
+ if (r > 0) {
/* Give scheduler a change to reschedule. */
if (signal_pending(current)) {
++kvm_stat.signal_exits;
}
post_kvm_run_save(vcpu, kvm_run);
- return 0;
+ return r;
}
static void vmx_flush_tlb(struct kvm_vcpu *vcpu)