@@ -1427,18 +1427,16 @@ static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
{
u32 param = svm->vmcb->control.exit_info_1 & 1;
u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
- struct vmcb *nested_vmcb;
bool ret = false;
u32 t0, t1;
u8 *msrpm;
- nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, KM_USER0);
- msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER1);
+ msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0);
- if (!nested_vmcb || !msrpm)
+ if (!msrpm)
goto out;
- if (!(nested_vmcb->control.intercept & (1ULL << INTERCEPT_MSR_PROT)))
+ if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
return 0;
switch (msr) {
@@ -1464,8 +1462,7 @@ static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
ret = msrpm[t1] & ((1 << param) << t0);
out:
- nested_svm_unmap(nested_vmcb, KM_USER0);
- nested_svm_unmap(msrpm, KM_USER1);
+ nested_svm_unmap(msrpm, KM_USER0);
return ret;
}
Thanks to Joerg's previous series of cleanups, we now have almost all information we need to decide what to do on #VMEXIT because we get the variables from the VMCB on VMRUN. Unfortunately there's one piece that slipped through the conversion, namely the MSR intercept which still tries to map the nested VMCB to find out if MSRs are intercepted. So let's use the cached value, removing the need for two atomic maps (which breaks anyways) and fix an oops along the way. CC: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Alexander Graf <agraf@suse.de> --- arch/x86/kvm/svm.c | 11 ++++------- 1 files changed, 4 insertions(+), 7 deletions(-)