Message ID | 1401916675-1568-2-git-send-email-bsd@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On 2014-06-04 23:17, Bandan Das wrote: > Verify that vmon fails with unaligned vmxon region or > any bits set beyong the physical address width. Also verify > failure with an invalid revision identifier. > > Signed-off-by: Bandan Das <bsd@redhat.com> > --- > x86/vmx.c | 46 +++++++++++++++++++++++++++++++++++++++++++--- > 1 file changed, 43 insertions(+), 3 deletions(-) > > diff --git a/x86/vmx.c b/x86/vmx.c > index 1182eef..207eb81 100644 > --- a/x86/vmx.c > +++ b/x86/vmx.c > @@ -37,7 +37,7 @@ > #include "smp.h" > #include "io.h" > > -u32 *vmxon_region; > +u64 *vmxon_region; > struct vmcs *vmcs_root; > u32 vpid_cnt; > void *guest_stack, *guest_syscall_stack; > @@ -598,13 +598,53 @@ static int test_vmx_feature_control(void) > > static int test_vmxon(void) > { > - int ret; > + int ret, ret1; > u64 rflags; > + u64 *tmp_region = vmxon_region; > + int width = cpuid(0x80000008).a & 0xff; > + > + /* Unaligned page access */ > + vmxon_region = (u64 *)((intptr_t)vmxon_region + 1); > + rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF; > + write_rflags(rflags); > + ret1 = vmx_on(); Can we ensure that the compiler doesn't inject any ops between write_rflags and the vmxon that overwrite CF or ZF? If you want those flags in a specific state, maybe it's better to pass that to vmx_on and do this in the assembly block with vmxon. > + report("test vmxon with unaligned vmxon region", ret1); > + if (!ret1) { > + ret = 1; > + goto out; > + } > > + /* gpa bits beyond physical address width are set*/ > + vmxon_region = (u64 *)((intptr_t)tmp_region | ((u64)1 << (width+1))); > + rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF; > + write_rflags(rflags); > + ret1 = vmx_on(); > + report("test vmxon with bits set beyond physical address width", ret1); > + if (!ret1) { > + ret = 1; > + goto out; > + } > + > + /* invalid revision indentifier */ > + vmxon_region = tmp_region; > + *vmxon_region = 0xba9da9; > + rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF; > + write_rflags(rflags); > + ret1 = vmx_on(); > + report("test vmxon with invalid revision identifier", ret1); > + if (!ret1) { > + ret = 1; > + goto out; > + } > + > + /* and finally a valid region */ > + *vmxon_region = basic.revision; > rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF; > write_rflags(rflags); > ret = vmx_on(); Oh, this pattern is not by you! OK, but let's address this first, then add the new tests on top. Jan > - report("test vmxon", !ret); > + report("test vmxon with valid vmxon region", !ret); > + > +out: > return ret; > } > >
Il 05/06/2014 09:00, Jan Kiszka ha scritto: >> > + rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF; >> > + write_rflags(rflags); >> > + ret1 = vmx_on(); > Can we ensure that the compiler doesn't inject any ops between > write_rflags and the vmxon that overwrite CF or ZF? No, in fact if vmx_on were not inlined, building the stack frame with "sub" would overwrite the flags. Paolo > If you want those > flags in a specific state, maybe it's better to pass that to vmx_on and > do this in the assembly block with vmxon. > -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/x86/vmx.c b/x86/vmx.c index 1182eef..207eb81 100644 --- a/x86/vmx.c +++ b/x86/vmx.c @@ -37,7 +37,7 @@ #include "smp.h" #include "io.h" -u32 *vmxon_region; +u64 *vmxon_region; struct vmcs *vmcs_root; u32 vpid_cnt; void *guest_stack, *guest_syscall_stack; @@ -598,13 +598,53 @@ static int test_vmx_feature_control(void) static int test_vmxon(void) { - int ret; + int ret, ret1; u64 rflags; + u64 *tmp_region = vmxon_region; + int width = cpuid(0x80000008).a & 0xff; + + /* Unaligned page access */ + vmxon_region = (u64 *)((intptr_t)vmxon_region + 1); + rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF; + write_rflags(rflags); + ret1 = vmx_on(); + report("test vmxon with unaligned vmxon region", ret1); + if (!ret1) { + ret = 1; + goto out; + } + /* gpa bits beyond physical address width are set*/ + vmxon_region = (u64 *)((intptr_t)tmp_region | ((u64)1 << (width+1))); + rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF; + write_rflags(rflags); + ret1 = vmx_on(); + report("test vmxon with bits set beyond physical address width", ret1); + if (!ret1) { + ret = 1; + goto out; + } + + /* invalid revision indentifier */ + vmxon_region = tmp_region; + *vmxon_region = 0xba9da9; + rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF; + write_rflags(rflags); + ret1 = vmx_on(); + report("test vmxon with invalid revision identifier", ret1); + if (!ret1) { + ret = 1; + goto out; + } + + /* and finally a valid region */ + *vmxon_region = basic.revision; rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF; write_rflags(rflags); ret = vmx_on(); - report("test vmxon", !ret); + report("test vmxon with valid vmxon region", !ret); + +out: return ret; }
Verify that vmon fails with unaligned vmxon region or any bits set beyong the physical address width. Also verify failure with an invalid revision identifier. Signed-off-by: Bandan Das <bsd@redhat.com> --- x86/vmx.c | 46 +++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 43 insertions(+), 3 deletions(-)