* gfn_to_pfn caches that cover the region.
*/
read_lock_irqsave(&gpc1->lock, flags);
- while (!kvm_gpc_check(gpc1, gpc1->gpa, user_len1)) {
+ while (!kvm_gpc_check(gpc1, user_len1)) {
read_unlock_irqrestore(&gpc1->lock, flags);
/* When invoked from kvm_sched_out() we cannot sleep */
if (atomic)
return;
- if (kvm_gpc_refresh(gpc1, gpc1->gpa, user_len1))
+ if (kvm_gpc_refresh(gpc1, user_len1))
return;
read_lock_irqsave(&gpc1->lock, flags);
*/
read_lock(&gpc2->lock);
- if (!kvm_gpc_check(gpc2, gpc2->gpa, user_len2)) {
+ if (!kvm_gpc_check(gpc2, user_len2)) {
read_unlock(&gpc2->lock);
read_unlock_irqrestore(&gpc1->lock, flags);
* little more honest about it.
*/
read_lock_irqsave(&gpc->lock, flags);
- while (!kvm_gpc_check(gpc, gpc->gpa, sizeof(struct vcpu_info))) {
+ while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
read_unlock_irqrestore(&gpc->lock, flags);
- if (kvm_gpc_refresh(gpc, gpc->gpa, sizeof(struct vcpu_info)))
+ if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info)))
return;
read_lock_irqsave(&gpc->lock, flags);
sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending));
read_lock_irqsave(&gpc->lock, flags);
- while (!kvm_gpc_check(gpc, gpc->gpa, sizeof(struct vcpu_info))) {
+ while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
read_unlock_irqrestore(&gpc->lock, flags);
/*
if (in_atomic() || !task_is_running(current))
return 1;
- if (kvm_gpc_refresh(gpc, gpc->gpa, sizeof(struct vcpu_info))) {
+ if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info))) {
/*
* If this failed, userspace has screwed up the
* vcpu_info mapping. No interrupts for you.
read_lock_irqsave(&gpc->lock, flags);
idx = srcu_read_lock(&kvm->srcu);
- if (!kvm_gpc_check(gpc, gpc->gpa, PAGE_SIZE))
+ if (!kvm_gpc_check(gpc, PAGE_SIZE))
goto out_rcu;
ret = false;
idx = srcu_read_lock(&kvm->srcu);
read_lock_irqsave(&gpc->lock, flags);
- if (!kvm_gpc_check(gpc, gpc->gpa, PAGE_SIZE))
+ if (!kvm_gpc_check(gpc, PAGE_SIZE))
goto out_rcu;
if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
gpc = &vcpu->arch.xen.vcpu_info_cache;
read_lock_irqsave(&gpc->lock, flags);
- if (!kvm_gpc_check(gpc, gpc->gpa, sizeof(struct vcpu_info))) {
+ if (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
/*
* Could not access the vcpu_info. Set the bit in-kernel
* and prod the vCPU to deliver it for itself.
break;
idx = srcu_read_lock(&kvm->srcu);
- rc = kvm_gpc_refresh(gpc, gpc->gpa, PAGE_SIZE);
+ rc = kvm_gpc_refresh(gpc, PAGE_SIZE);
srcu_read_unlock(&kvm->srcu, idx);
} while(!rc);
* kvm_gpc_check - check validity of a gfn_to_pfn_cache.
*
* @gpc: struct gfn_to_pfn_cache object.
- * @gpa: current guest physical address to map.
* @len: sanity check; the range being access must fit a single page.
*
* @return: %true if the cache is still valid and the address matches.
* Callers in IN_GUEST_MODE may do so without locking, although they should
* still hold a read lock on kvm->scru for the memslot checks.
*/
-bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len);
+bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len);
/**
* kvm_gpc_refresh - update a previously initialized cache.
*
* @gpc: struct gfn_to_pfn_cache object.
- * @gpa: updated guest physical address to map.
* @len: sanity check; the range being access must fit a single page.
-
+ *
* @return: 0 for success.
* -EINVAL for a mapping which would cross a page boundary.
* -EFAULT for an untranslatable guest physical address.
* still lock and check the cache status, as this function does not return
* with the lock still held to permit access.
*/
-int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len);
+int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len);
/**
* kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache.
}
}
-bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
+bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
{
struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
if (!gpc->active)
return false;
- if ((gpa & ~PAGE_MASK) + len > PAGE_SIZE)
+ if ((gpc->gpa & ~PAGE_MASK) + len > PAGE_SIZE)
return false;
- if (gpc->gpa != gpa || gpc->generation != slots->generation ||
- kvm_is_error_hva(gpc->uhva))
+ if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva))
return false;
if (!gpc->valid)
return -EFAULT;
}
-int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
+static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
+ unsigned long len)
{
struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
unsigned long page_offset = gpa & ~PAGE_MASK;
return ret;
}
+
+int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
+{
+ return __kvm_gpc_refresh(gpc, gpc->gpa, len);
+}
EXPORT_SYMBOL_GPL(kvm_gpc_refresh);
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
gpc->active = true;
write_unlock_irq(&gpc->lock);
}
- return kvm_gpc_refresh(gpc, gpa, len);
+ return __kvm_gpc_refresh(gpc, gpa, len);
}
EXPORT_SYMBOL_GPL(kvm_gpc_activate);