@@ -520,26 +520,27 @@ static inline int hv_remote_flush_eptp(u64 eptp, struct kvm_tlb_range *range)
static int hv_remote_flush_tlb_with_range(struct kvm *kvm,
struct kvm_tlb_range *range)
{
+ struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
struct kvm_vcpu *vcpu;
int ret = 0, i;
- spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
+ spin_lock(&kvm_vmx->ept_pointer_lock);
- if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK)
+ if (kvm_vmx->ept_pointers_match == EPT_POINTERS_CHECK)
check_ept_pointer_match(kvm);
- if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) {
+ if (kvm_vmx->ept_pointers_match != EPT_POINTERS_MATCH) {
kvm_for_each_vcpu(i, vcpu, kvm) {
/* If ept_pointer is invalid pointer, bypass flush request. */
if (VALID_PAGE(to_vmx(vcpu)->ept_pointer))
ret |= hv_remote_flush_eptp(to_vmx(vcpu)->ept_pointer,
range);
}
- } else if (VALID_PAGE(to_kvm_vmx(kvm)->hv_tlb_eptp)) {
- ret = hv_remote_flush_eptp(to_kvm_vmx(kvm)->hv_tlb_eptp, range);
+ } else if (VALID_PAGE(kvm_vmx->hv_tlb_eptp)) {
+ ret = hv_remote_flush_eptp(kvm_vmx->hv_tlb_eptp, range);
}
- spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
+ spin_unlock(&kvm_vmx->ept_pointer_lock);
return ret;
}
static int hv_remote_flush_tlb(struct kvm *kvm)