KVM: x86: Convert vapic synchronization to _cached functions (CVE-2013-6368)
authorAndy Honig <ahonig@google.com>
Wed, 20 Nov 2013 18:23:22 +0000 (10:23 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 12 Dec 2013 21:39:46 +0000 (22:39 +0100)
In kvm_lapic_sync_from_vapic and kvm_lapic_sync_to_vapic there is the
potential to corrupt kernel memory if userspace provides an address that
is at the end of a page.  This patches concerts those functions to use
kvm_write_guest_cached and kvm_read_guest_cached.  It also checks the
vapic_address specified by userspace during ioctl processing and returns
an error to userspace if the address is not a valid GPA.

This is generally not guest triggerable, because the required write is
done by firmware that runs before the guest.  Also, it only affects AMD
processors and oldish Intel that do not have the FlexPriority feature
(unless you disable FlexPriority, of course; then newer processors are
also affected).

Fixes: b93463aa59d6 ('KVM: Accelerated apic support')

Reported-by: Andrew Honig <ahonig@google.com>
Cc: stable@vger.kernel.org
Signed-off-by: Andrew Honig <ahonig@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/lapic.c
arch/x86/kvm/lapic.h
arch/x86/kvm/x86.c

index 89b52ec..b8bec45 100644 (file)
@@ -1692,7 +1692,6 @@ static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
 {
        u32 data;
-       void *vapic;
 
        if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
                apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
@@ -1700,9 +1699,8 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
        if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
                return;
 
-       vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
-       data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr));
-       kunmap_atomic(vapic);
+       kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
+                               sizeof(u32));
 
        apic_set_tpr(vcpu->arch.apic, data & 0xff);
 }
@@ -1738,7 +1736,6 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
        u32 data, tpr;
        int max_irr, max_isr;
        struct kvm_lapic *apic = vcpu->arch.apic;
-       void *vapic;
 
        apic_sync_pv_eoi_to_guest(vcpu, apic);
 
@@ -1754,18 +1751,24 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
                max_isr = 0;
        data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
 
-       vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
-       *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data;
-       kunmap_atomic(vapic);
+       kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
+                               sizeof(u32));
 }
 
-void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
+int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
 {
-       vcpu->arch.apic->vapic_addr = vapic_addr;
-       if (vapic_addr)
+       if (vapic_addr) {
+               if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+                                       &vcpu->arch.apic->vapic_cache,
+                                       vapic_addr, sizeof(u32)))
+                       return -EINVAL;
                __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
-       else
+       } else {
                __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
+       }
+
+       vcpu->arch.apic->vapic_addr = vapic_addr;
+       return 0;
 }
 
 int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
index c730ac9..c8b0d0d 100644 (file)
@@ -34,7 +34,7 @@ struct kvm_lapic {
         */
        void *regs;
        gpa_t vapic_addr;
-       struct page *vapic_page;
+       struct gfn_to_hva_cache vapic_cache;
        unsigned long pending_events;
        unsigned int sipi_vector;
 };
@@ -76,7 +76,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
 void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset);
 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector);
 
-void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
+int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
 void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);
 
index 21ef1ba..5d004da 100644 (file)
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                r = -EFAULT;
                if (copy_from_user(&va, argp, sizeof va))
                        goto out;
-               r = 0;
-               kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
+               r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
                break;
        }
        case KVM_X86_SETUP_MCE: {
@@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
                        !kvm_event_needs_reinjection(vcpu);
 }
 
-static int vapic_enter(struct kvm_vcpu *vcpu)
-{
-       struct kvm_lapic *apic = vcpu->arch.apic;
-       struct page *page;
-
-       if (!apic || !apic->vapic_addr)
-               return 0;
-
-       page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
-       if (is_error_page(page))
-               return -EFAULT;
-
-       vcpu->arch.apic->vapic_page = page;
-       return 0;
-}
-
-static void vapic_exit(struct kvm_vcpu *vcpu)
-{
-       struct kvm_lapic *apic = vcpu->arch.apic;
-       int idx;
-
-       if (!apic || !apic->vapic_addr)
-               return;
-
-       idx = srcu_read_lock(&vcpu->kvm->srcu);
-       kvm_release_page_dirty(apic->vapic_page);
-       mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
-       srcu_read_unlock(&vcpu->kvm->srcu, idx);
-}
-
 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
 {
        int max_irr, tpr;
@@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
        struct kvm *kvm = vcpu->kvm;
 
        vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
-       r = vapic_enter(vcpu);
-       if (r) {
-               srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
-               return r;
-       }
 
        r = 1;
        while (r > 0) {
@@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
 
        srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
 
-       vapic_exit(vcpu);
-
        return r;
 }