@@ -754,6 +754,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
svm->nested.hsave = page_address(hsave_page);
svm->nested.msrpm = page_address(nested_msrpm_pages);
+ svm_vcpu_init_msrpm(svm->nested.msrpm);
svm->vmcb = page_address(page);
clear_page(svm->vmcb);
@@ -1824,20 +1825,46 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
{
- u32 *nested_msrpm;
- struct page *page;
+ /*
+ * This function merges the msr permission bitmaps of kvm and the
+ * nested vmcb. It is omptimized in that it only merges the parts where
+ * the kvm msr permission bitmap may contain zero bits
+ */
+ static const u32 msrpm_offsets[] = {
+ 0x0000002c, /* SYSENTER_CS */
+
+ 0x00000038, /* LASTBRANCHFROMIP
+ LASTBRANCHTOIP
+ LASTINTFROMIP
+ LASTINTTOIP */
+
+ 0x00000820, /* STAR
+ LSTAR
+ CSTAR
+ SYSCALL_MASK */
+
+ 0x00000840, /* FS_BASE
+ GS_BASE
+ KERNEL_GS_BASE */
+
+ 0xffffffff, /* End of List */
+ };
int i;
- nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, &page);
- if (!nested_msrpm)
- return false;
+ for (i = 0; msrpm_offsets[i] != 0xffffffff; i++) {
+ u32 value, p;
+ u64 offset;
- for (i = 0; i < PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++)
- svm->nested.msrpm[i] = svm->msrpm[i] | nested_msrpm[i];
+ offset = svm->nested.vmcb_msrpm + msrpm_offsets[i];
+ p = msrpm_offsets[i] / 4;
- svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
+ if (kvm_read_guest(svm->vcpu.kvm, offset, &value, 4))
+ return false;
- nested_svm_unmap(page);
+ svm->nested.msrpm[p] = svm->msrpm[p] | value;
+ }
+
+ svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
return true;
}