KVM: do not release the error page
[platform/adaptation/renesas_rcar/renesas_kernel.git] / arch / x86 / kvm / x86.c
index 42bce48..7953a9e 100644 (file)
@@ -806,7 +806,7 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc);
  * kvm-specific. Those are put in the beginning of the list.
  */
 
-#define KVM_SAVE_MSRS_BEGIN    9
+#define KVM_SAVE_MSRS_BEGIN    10
 static u32 msrs_to_save[] = {
        MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
        MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
@@ -1097,7 +1097,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
                 * For each generation, we track the original measured
                 * nanosecond time, offset, and write, so if TSCs are in
                 * sync, we can match exact offset, and if not, we can match
-                * exact software computaion in compute_guest_tsc()
+                * exact software computation in compute_guest_tsc()
                 *
                 * These values are tracked in kvm->arch.cur_xxx variables.
                 */
@@ -1504,7 +1504,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
 {
        gpa_t gpa = data & ~0x3f;
 
-       /* Bits 2:5 are resrved, Should be zero */
+       /* Bits 2:5 are reserved, Should be zero */
        if (data & 0x3c)
                return 1;
 
@@ -1639,10 +1639,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                vcpu->arch.time_page =
                                gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
 
-               if (is_error_page(vcpu->arch.time_page)) {
-                       kvm_release_page_clean(vcpu->arch.time_page);
+               if (is_error_page(vcpu->arch.time_page))
                        vcpu->arch.time_page = NULL;
-               }
+
                break;
        }
        case MSR_KVM_ASYNC_PF_EN:
@@ -1727,7 +1726,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                 * Ignore all writes to this no longer documented MSR.
                 * Writes are only relevant for old K7 processors,
                 * all pre-dating SVM, but a recommended workaround from
-                * AMD for these chips. It is possible to speicify the
+                * AMD for these chips. It is possible to specify the
                 * affected processor models on the command line, hence
                 * the need to ignore the workaround.
                 */
@@ -2636,7 +2635,6 @@ static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
        if (!vcpu->arch.time_page)
                return -EINVAL;
        src->flags |= PVCLOCK_GUEST_STOPPED;
-       mark_page_dirty(vcpu->kvm, vcpu->arch.time >> PAGE_SHIFT);
        kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
        return 0;
 }
@@ -3087,7 +3085,7 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
        if (!kvm->arch.vpit)
                return -ENXIO;
        mutex_lock(&kvm->arch.vpit->pit_state.lock);
-       kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
+       kvm->arch.vpit->pit_state.reinject = control->pit_reinject;
        mutex_unlock(&kvm->arch.vpit->pit_state.lock);
        return 0;
 }
@@ -3170,6 +3168,16 @@ out:
        return r;
 }
 
+int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event)
+{
+       if (!irqchip_in_kernel(kvm))
+               return -ENXIO;
+
+       irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
+                                       irq_event->irq, irq_event->level);
+       return 0;
+}
+
 long kvm_arch_vm_ioctl(struct file *filp,
                       unsigned int ioctl, unsigned long arg)
 {
@@ -3276,29 +3284,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
        create_pit_unlock:
                mutex_unlock(&kvm->slots_lock);
                break;
-       case KVM_IRQ_LINE_STATUS:
-       case KVM_IRQ_LINE: {
-               struct kvm_irq_level irq_event;
-
-               r = -EFAULT;
-               if (copy_from_user(&irq_event, argp, sizeof irq_event))
-                       goto out;
-               r = -ENXIO;
-               if (irqchip_in_kernel(kvm)) {
-                       __s32 status;
-                       status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
-                                       irq_event.irq, irq_event.level);
-                       if (ioctl == KVM_IRQ_LINE_STATUS) {
-                               r = -EFAULT;
-                               irq_event.status = status;
-                               if (copy_to_user(argp, &irq_event,
-                                                       sizeof irq_event))
-                                       goto out;
-                       }
-                       r = 0;
-               }
-               break;
-       }
        case KVM_GET_IRQCHIP: {
                /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
                struct kvm_irqchip *chip;
@@ -3959,10 +3944,8 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
                goto emul_write;
 
        page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
-       if (is_error_page(page)) {
-               kvm_release_page_clean(page);
+       if (is_error_page(page))
                goto emul_write;
-       }
 
        kaddr = kmap_atomic(page);
        kaddr += offset_in_page(gpa);
@@ -4496,7 +4479,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
 
        /*
         * if emulation was due to access to shadowed page table
-        * and it failed try to unshadow page and re-entetr the
+        * and it failed try to unshadow page and re-enter the
         * guest to let CPU execute the instruction.
         */
        if (kvm_mmu_unprotect_page_virt(vcpu, gva))
@@ -5592,7 +5575,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
                /*
                 * We are here if userspace calls get_regs() in the middle of
                 * instruction emulation. Registers state needs to be copied
-                * back from emulation context to vcpu. Usrapace shouldn't do
+                * back from emulation context to vcpu. Userspace shouldn't do
                 * that usually, but some bad designed PV devices (vmware
                 * backdoor interface) need this to work
                 */
@@ -6121,7 +6104,7 @@ int kvm_arch_hardware_enable(void *garbage)
         * as we reset last_host_tsc on all VCPUs to stop this from being
         * called multiple times (one for each physical CPU bringup).
         *
-        * Platforms with unnreliable TSCs don't have to deal with this, they
+        * Platforms with unreliable TSCs don't have to deal with this, they
         * will be compensated by the logic in vcpu_load, which sets the TSC to
         * catchup mode.  This will catchup all VCPUs to real time, but cannot
         * guarantee that they stay in perfect synchronization.
@@ -6317,10 +6300,18 @@ void kvm_arch_free_memslot(struct kvm_memory_slot *free,
 {
        int i;
 
-       for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
-               if (!dont || free->arch.lpage_info[i] != dont->arch.lpage_info[i]) {
-                       kvm_kvfree(free->arch.lpage_info[i]);
-                       free->arch.lpage_info[i] = NULL;
+       for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
+               if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) {
+                       kvm_kvfree(free->arch.rmap[i]);
+                       free->arch.rmap[i] = NULL;
+               }
+               if (i == 0)
+                       continue;
+
+               if (!dont || free->arch.lpage_info[i - 1] !=
+                            dont->arch.lpage_info[i - 1]) {
+                       kvm_kvfree(free->arch.lpage_info[i - 1]);
+                       free->arch.lpage_info[i - 1] = NULL;
                }
        }
 }
@@ -6329,23 +6320,30 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
 {
        int i;
 
-       for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
+       for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
                unsigned long ugfn;
                int lpages;
-               int level = i + 2;
+               int level = i + 1;
 
                lpages = gfn_to_index(slot->base_gfn + npages - 1,
                                      slot->base_gfn, level) + 1;
 
-               slot->arch.lpage_info[i] =
-                       kvm_kvzalloc(lpages * sizeof(*slot->arch.lpage_info[i]));
-               if (!slot->arch.lpage_info[i])
+               slot->arch.rmap[i] =
+                       kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i]));
+               if (!slot->arch.rmap[i])
+                       goto out_free;
+               if (i == 0)
+                       continue;
+
+               slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages *
+                                       sizeof(*slot->arch.lpage_info[i - 1]));
+               if (!slot->arch.lpage_info[i - 1])
                        goto out_free;
 
                if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
-                       slot->arch.lpage_info[i][0].write_count = 1;
+                       slot->arch.lpage_info[i - 1][0].write_count = 1;
                if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
-                       slot->arch.lpage_info[i][lpages - 1].write_count = 1;
+                       slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1;
                ugfn = slot->userspace_addr >> PAGE_SHIFT;
                /*
                 * If the gfn and userspace address are not aligned wrt each
@@ -6357,16 +6355,21 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
                        unsigned long j;
 
                        for (j = 0; j < lpages; ++j)
-                               slot->arch.lpage_info[i][j].write_count = 1;
+                               slot->arch.lpage_info[i - 1][j].write_count = 1;
                }
        }
 
        return 0;
 
 out_free:
-       for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
-               kvm_kvfree(slot->arch.lpage_info[i]);
-               slot->arch.lpage_info[i] = NULL;
+       for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
+               kvm_kvfree(slot->arch.rmap[i]);
+               slot->arch.rmap[i] = NULL;
+               if (i == 0)
+                       continue;
+
+               kvm_kvfree(slot->arch.lpage_info[i - 1]);
+               slot->arch.lpage_info[i - 1] = NULL;
        }
        return -ENOMEM;
 }
@@ -6385,10 +6388,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
                map_flags = MAP_SHARED | MAP_ANONYMOUS;
 
        /*To keep backward compatibility with older userspace,
-        *x86 needs to hanlde !user_alloc case.
+        *x86 needs to handle !user_alloc case.
         */
        if (!user_alloc) {
-               if (npages && !old.rmap) {
+               if (npages && !old.npages) {
                        unsigned long userspace_addr;
 
                        userspace_addr = vm_mmap(NULL, 0,
@@ -6416,7 +6419,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
 
        int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT;
 
-       if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
+       if (!user_alloc && !old.user_alloc && old.npages && !npages) {
                int ret;
 
                ret = vm_munmap(old.userspace_addr,