@@ -3021,7 +3021,7 @@ static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
offset + sizeof(*guest_hv_clock))) {
read_unlock_irqrestore(&gpc->lock, flags);
- if (kvm_gpc_refresh(v->kvm, gpc, gpc->gpa,
+ if (kvm_gpc_refresh(gpc, gpc->gpa,
offset + sizeof(*guest_hv_clock)))
return;
@@ -224,7 +224,7 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
if (state == RUNSTATE_runnable)
return;
- if (kvm_gpc_refresh(v->kvm, gpc, gpc->gpa, user_len))
+ if (kvm_gpc_refresh(gpc, gpc->gpa, user_len))
return;
read_lock_irqsave(&gpc->lock, flags);
@@ -353,8 +353,7 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
while (!kvm_gpc_check(gpc, gpc->gpa, sizeof(struct vcpu_info))) {
read_unlock_irqrestore(&gpc->lock, flags);
- if (kvm_gpc_refresh(v->kvm, gpc, gpc->gpa,
- sizeof(struct vcpu_info)))
+ if (kvm_gpc_refresh(gpc, gpc->gpa, sizeof(struct vcpu_info)))
return;
read_lock_irqsave(&gpc->lock, flags);
@@ -428,8 +427,7 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
if (in_atomic() || !task_is_running(current))
return 1;
- if (kvm_gpc_refresh(v->kvm, gpc, gpc->gpa,
- sizeof(struct vcpu_info))) {
+ if (kvm_gpc_refresh(gpc, gpc->gpa, sizeof(struct vcpu_info))) {
/*
* If this failed, userspace has screwed up the
* vcpu_info mapping. No interrupts for you.
@@ -1479,7 +1477,7 @@ static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
break;
idx = srcu_read_lock(&kvm->srcu);
- rc = kvm_gpc_refresh(kvm, gpc, gpc->gpa, PAGE_SIZE);
+ rc = kvm_gpc_refresh(gpc, gpc->gpa, PAGE_SIZE);
srcu_read_unlock(&kvm->srcu, idx);
} while(!rc);
@@ -1301,35 +1301,33 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len);
/**
* kvm_gpc_refresh - update a previously initialized cache.
*
- * @kvm: pointer to kvm instance.
* @gpc: struct gfn_to_pfn_cache object.
* @gpa: updated guest physical address to map.
* @len: sanity check; the range being access must fit a single page.
*
* @return: 0 for success.
* -EINVAL for a mapping which would cross a page boundary.
- * -EFAULT for an untranslatable guest physical address.
+ * -EFAULT for an untranslatable guest physical address.
*
* This will attempt to refresh a gfn_to_pfn_cache. Note that a successful
- * returm from this function does not mean the page can be immediately
+ * return from this function does not mean the page can be immediately
* accessed because it may have raced with an invalidation. Callers must
* still lock and check the cache status, as this function does not return
* with the lock still held to permit access.
*/
-int kvm_gpc_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
+int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
unsigned long len);
/**
* kvm_gpc_unmap - temporarily unmap a gfn_to_pfn_cache.
*
- * @kvm: pointer to kvm instance.
* @gpc: struct gfn_to_pfn_cache object.
*
* This unmaps the referenced page. The cache is left in the invalid state
* but at least the mapping from GPA to userspace HVA will remain cached
* and can be reused on a subsequent refresh.
*/
-void kvm_gpc_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
+void kvm_gpc_unmap(struct gfn_to_pfn_cache *gpc);
/**
* kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache.
@@ -234,10 +234,9 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
return -EFAULT;
}
-int kvm_gpc_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
- unsigned long len)
+int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
{
- struct kvm_memslots *slots = kvm_memslots(kvm);
+ struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
unsigned long page_offset = gpa & ~PAGE_MASK;
kvm_pfn_t old_pfn, new_pfn;
unsigned long old_uhva;
@@ -318,7 +317,7 @@ int kvm_gpc_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
}
EXPORT_SYMBOL_GPL(kvm_gpc_refresh);
-void kvm_gpc_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
+void kvm_gpc_unmap(struct gfn_to_pfn_cache *gpc)
{
void *old_khva;
kvm_pfn_t old_pfn;
@@ -375,7 +374,7 @@ int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa)
list_add(&gpc->list, &gpc->kvm->gpc_list);
spin_unlock(&gpc->kvm->gpc_lock);
}
- return kvm_gpc_refresh(gpc->kvm, gpc, gpa, gpc->len);
+ return kvm_gpc_refresh(gpc, gpa, gpc->len);
}
EXPORT_SYMBOL_GPL(kvm_gpc_activate);
@@ -386,7 +385,7 @@ void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
list_del(&gpc->list);
spin_unlock(&gpc->kvm->gpc_lock);
- kvm_gpc_unmap(gpc->kvm, gpc);
+ kvm_gpc_unmap(gpc);
gpc->active = false;
}
}
Make kvm_gpc_refresh() use kvm instance cached in gfn_to_pfn_cache. First argument of kvm_gpc_unmap() becomes unneeded; remove it from function definition. Suggested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Michal Luczaj <mhal@rbox.co> --- arch/x86/kvm/x86.c | 2 +- arch/x86/kvm/xen.c | 10 ++++------ include/linux/kvm_host.h | 10 ++++------ virt/kvm/pfncache.c | 11 +++++------ 4 files changed, 14 insertions(+), 19 deletions(-)