@@ -125,18 +125,17 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
gfn_t table_gfn;
unsigned index, pt_access, uninitialized_var(pte_access);
gpa_t pte_gpa;
- bool eperm, present, rsvd_fault;
- int offset, write_fault, user_fault, fetch_fault;
-
- write_fault = access & PFERR_WRITE_MASK;
- user_fault = access & PFERR_USER_MASK;
- fetch_fault = access & PFERR_FETCH_MASK;
+ bool eperm;
+ int offset;
+ const int write_fault = access & PFERR_WRITE_MASK;
+ const int user_fault = access & PFERR_USER_MASK;
+ const int fetch_fault = access & PFERR_FETCH_MASK;
+ u16 errcode = 0;
trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
fetch_fault);
walk:
- present = true;
- eperm = rsvd_fault = false;
+ eperm = false;
walker->level = mmu->root_level;
pte = mmu->get_cr3(vcpu);
@@ -145,7 +144,7 @@ walk:
pte = kvm_pdptr_read_mmu(vcpu, mmu, (addr >> 30) & 3);
trace_kvm_mmu_paging_element(pte, walker->level);
if (!is_present_gpte(pte)) {
- present = false;
+ errcode |= PFERR_PRESENT_MASK;
goto error;
}
--walker->level;
@@ -171,34 +170,34 @@ walk:
real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
PFERR_USER_MASK|PFERR_WRITE_MASK);
if (unlikely(real_gfn == UNMAPPED_GVA)) {
- present = false;
- break;
+ errcode |= PFERR_PRESENT_MASK;
+ goto error;
}
real_gfn = gpa_to_gfn(real_gfn);
host_addr = gfn_to_hva(vcpu->kvm, real_gfn);
if (unlikely(kvm_is_error_hva(host_addr))) {
- present = false;
- break;
+ errcode |= PFERR_PRESENT_MASK;
+ goto error;
}
ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) {
- present = false;
- break;
+ errcode |= PFERR_PRESENT_MASK;
+ goto error;
}
trace_kvm_mmu_paging_element(pte, walker->level);
if (unlikely(!is_present_gpte(pte))) {
- present = false;
- break;
+ errcode |= PFERR_PRESENT_MASK;
+ goto error;
}
if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte,
walker->level))) {
- rsvd_fault = true;
- break;
+ errcode |= PFERR_RSVD_MASK;
+ goto error;
}
if (unlikely(write_fault && !is_writable_pte(pte)
@@ -213,16 +212,15 @@ walk:
eperm = true;
#endif
- if (!eperm && !rsvd_fault
- && unlikely(!(pte & PT_ACCESSED_MASK))) {
+ if (!eperm && unlikely(!(pte & PT_ACCESSED_MASK))) {
int ret;
trace_kvm_mmu_set_accessed_bit(table_gfn, index,
sizeof(pte));
ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
pte, pte|PT_ACCESSED_MASK);
if (unlikely(ret < 0)) {
- present = false;
- break;
+ errcode |= PFERR_PRESENT_MASK;
+ goto error;
} else if (ret)
goto walk;
@@ -276,7 +274,7 @@ walk:
--walker->level;
}
- if (unlikely(!present || eperm || rsvd_fault))
+ if (unlikely(eperm))
goto error;
if (write_fault && unlikely(!is_dirty_gpte(pte))) {
@@ -286,7 +284,7 @@ walk:
ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
pte, pte|PT_DIRTY_MASK);
if (unlikely(ret < 0)) {
- present = false;
+ errcode |= PFERR_PRESENT_MASK;
goto error;
} else if (ret)
goto walk;
@@ -303,20 +301,14 @@ walk:
return 1;
error:
- walker->fault.vector = PF_VECTOR;
- walker->fault.error_code_valid = true;
- walker->fault.error_code = 0;
- if (present)
- walker->fault.error_code |= PFERR_PRESENT_MASK;
-
- walker->fault.error_code |= write_fault | user_fault;
-
+ errcode |= write_fault | user_fault;
if (fetch_fault && (mmu->nx ||
kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
- walker->fault.error_code |= PFERR_FETCH_MASK;
- if (rsvd_fault)
- walker->fault.error_code |= PFERR_RSVD_MASK;
+ errcode |= PFERR_FETCH_MASK;
+ walker->fault.vector = PF_VECTOR;
+ walker->fault.error_code_valid = true;
+ walker->fault.error_code = errcode;
walker->fault.address = addr;
walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;