diff mbox

[v2,1/3] x86/vmx: introduce vmx_find_msr()

Message ID 20170217154233.28514-2-sergey.dyasli@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Sergey Dyasli Feb. 17, 2017, 3:42 p.m. UTC
Modify vmx_add_msr() to use a variation of insertion sort algorithm:
find a place for the new entry and shift all subsequent elements before
insertion.

The new vmx_find_msr() exploits the fact that MSR list is now sorted
and reuses the existing code for binary search.

Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
---
v1 --> v2:
- qsort (heap sort) is replaced with a variant of insertion sort
- both host and guest MSR lists are now kept sorted
- vmx_find_guest_msr() is replaced with more generic vmx_find_msr()

 xen/arch/x86/hvm/vmx/vmcs.c        | 45 ++++++++++++++++++++++++++++++++++++--
 xen/include/asm-x86/hvm/vmx/vmcs.h |  1 +
 2 files changed, 44 insertions(+), 2 deletions(-)

Comments

Sergey Dyasli Feb. 22, 2017, 8:37 a.m. UTC | #1
On Tue, 2017-02-21 at 09:29 +0000, Tian, Kevin wrote:
> > From: Sergey Dyasli [mailto:sergey.dyasli@citrix.com]

> > Sent: Friday, February 17, 2017 11:43 PM

> > 

> > Modify vmx_add_msr() to use a variation of insertion sort algorithm:

> > find a place for the new entry and shift all subsequent elements before

> > insertion.

> > 

> > The new vmx_find_msr() exploits the fact that MSR list is now sorted

> > and reuses the existing code for binary search.

> > 

> > Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>

> > ---

> > v1 --> v2:

> > - qsort (heap sort) is replaced with a variant of insertion sort

> > - both host and guest MSR lists are now kept sorted

> > - vmx_find_guest_msr() is replaced with more generic vmx_find_msr()

> > 

> >  xen/arch/x86/hvm/vmx/vmcs.c        | 45

> > ++++++++++++++++++++++++++++++++++++--

> >  xen/include/asm-x86/hvm/vmx/vmcs.h |  1 +

> >  2 files changed, 44 insertions(+), 2 deletions(-)

> > 

> > diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c

> > index 454d444..49587d6 100644

> > --- a/xen/arch/x86/hvm/vmx/vmcs.c

> > +++ b/xen/arch/x86/hvm/vmx/vmcs.c

> > @@ -1307,6 +1307,44 @@ static int construct_vmcs(struct vcpu *v)

> >      return 0;

> >  }

> > 

> > +static int vmx_msr_entry_key_cmp(const void *key, const void *elt)

> 

> what is 'elt' short for?


The prototype was taken directly from bsearch():

    int (*cmp)(const void *key, const void *elt)

I believe that elt stands for element.

> 

> > +{

> > +    const u32 *msr = key;

> > +    const struct vmx_msr_entry *entry = elt;

> > +

> > +    if ( *msr > entry->index )

> > +        return 1;

> > +    if ( *msr < entry->index )

> > +        return -1;

> > +

> > +    return 0;

> > +}

> > +

> > +struct vmx_msr_entry *vmx_find_msr(u32 msr, int type)

> > +{

> > +    struct vcpu *curr = current;

> > +    unsigned int msr_count;

> > +    struct vmx_msr_entry *msr_area;

> > +

> > +    if ( type == VMX_GUEST_MSR )

> > +    {

> > +        msr_count = curr->arch.hvm_vmx.msr_count;

> > +        msr_area = curr->arch.hvm_vmx.msr_area;

> > +    }

> > +    else

> > +    {

> > +        ASSERT(type == VMX_HOST_MSR);

> > +        msr_count = curr->arch.hvm_vmx.host_msr_count;

> > +        msr_area = curr->arch.hvm_vmx.host_msr_area;

> > +    }

> > +

> > +    if ( msr_area == NULL )

> > +        return NULL;

> > +

> > +    return bsearch(&msr, msr_area, msr_count, sizeof(struct vmx_msr_entry),

> > +                   vmx_msr_entry_key_cmp);

> > +}

> > +

> >  int vmx_read_guest_msr(u32 msr, u64 *val)

> >  {

> >      struct vcpu *curr = current;

> > @@ -1375,14 +1413,17 @@ int vmx_add_msr(u32 msr, int type)

> >              __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(*msr_area));

> >      }

> > 

> > -    for ( idx = 0; idx < *msr_count; idx++ )

> > +    for ( idx = 0; (*msr_area)[idx].index <= msr && idx < *msr_count; idx++ )

> 

> risk of out-of-boundary access. 


How exactly out-of-bounds access is possible? The original condition

    idx < *msr_count

Is still being checked on each loop iteration.

> 

> >          if ( (*msr_area)[idx].index == msr )

> >              return 0;

> > 

> >      if ( *msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )

> >          return -ENOSPC;

> > 

> > -    msr_area_elem = *msr_area + *msr_count;

> > +    memmove(*msr_area + idx + 1, *msr_area + idx,

> > +            sizeof(*msr_area_elem) * (*msr_count - idx));

> > +

> > +    msr_area_elem = *msr_area + idx;

> >      msr_area_elem->index = msr;

> >      msr_area_elem->mbz = 0;

> > 

> > diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h

> > b/xen/include/asm-x86/hvm/vmx/vmcs.h

> > index c30aab6..903af51 100644

> > --- a/xen/include/asm-x86/hvm/vmx/vmcs.h

> > +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h

> > @@ -529,6 +529,7 @@ void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr, int

> > type);

> >  void vmx_enable_intercept_for_msr(struct vcpu *v, u32 msr, int type);

> >  int vmx_read_guest_msr(u32 msr, u64 *val);

> >  int vmx_write_guest_msr(u32 msr, u64 val);

> > +struct vmx_msr_entry *vmx_find_msr(u32 msr, int type);

> >  int vmx_add_msr(u32 msr, int type);

> >  void vmx_vmcs_switch(paddr_t from, paddr_t to);

> >  void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector);

> > --

> > 2.9.3

> 

> 

-- 
Thanks,
Sergey
Tian, Kevin Feb. 22, 2017, 8:40 a.m. UTC | #2
> From: Sergey Dyasli [mailto:sergey.dyasli@citrix.com]

> Sent: Wednesday, February 22, 2017 4:38 PM

> 

> > >

> > > -    for ( idx = 0; idx < *msr_count; idx++ )

> > > +    for ( idx = 0; (*msr_area)[idx].index <= msr && idx < *msr_count; idx++ )

> >

> > risk of out-of-boundary access.

> 

> How exactly out-of-bounds access is possible? The original condition

> 

>     idx < *msr_count

> 

> Is still being checked on each loop iteration.

> 


Isn't "(*msr_area[idx]).index <= msr" checked before "idx < *msr_count"?

So if idx==*msr_count, you first hit an out-of-boundary access...

I think we should change the condition order here.

Thanks
Kevin
Sergey Dyasli Feb. 22, 2017, 8:45 a.m. UTC | #3
On Wed, 2017-02-22 at 08:40 +0000, Tian, Kevin wrote:
> > From: Sergey Dyasli [mailto:sergey.dyasli@citrix.com]

> > Sent: Wednesday, February 22, 2017 4:38 PM

> > 

> > > > 

> > > > -    for ( idx = 0; idx < *msr_count; idx++ )

> > > > +    for ( idx = 0; (*msr_area)[idx].index <= msr && idx < *msr_count; idx++ )

> > > 

> > > risk of out-of-boundary access.

> > 

> > How exactly out-of-bounds access is possible? The original condition

> > 

> >     idx < *msr_count

> > 

> > Is still being checked on each loop iteration.

> > 

> 

> Isn't "(*msr_area[idx]).index <= msr" checked before "idx < *msr_count"?

> 

> So if idx==*msr_count, you first hit an out-of-boundary access...

> 

> I think we should change the condition order here.

> 


You are right. I will fix this in v3.

-- 
Thanks,
Sergey
Jan Beulich Feb. 22, 2017, 10:14 a.m. UTC | #4
>>> On 22.02.17 at 09:45, <sergey.dyasli@citrix.com> wrote:
> On Wed, 2017-02-22 at 08:40 +0000, Tian, Kevin wrote:
>> > From: Sergey Dyasli [mailto:sergey.dyasli@citrix.com]
>> > Sent: Wednesday, February 22, 2017 4:38 PM
>> > 
>> > > > 
>> > > > -    for ( idx = 0; idx < *msr_count; idx++ )
>> > > > +    for ( idx = 0; (*msr_area)[idx].index <= msr && idx < *msr_count; idx++ 
> )
>> > > 
>> > > risk of out-of-boundary access.
>> > 
>> > How exactly out-of-bounds access is possible? The original condition
>> > 
>> >     idx < *msr_count
>> > 
>> > Is still being checked on each loop iteration.
>> > 
>> 
>> Isn't "(*msr_area[idx]).index <= msr" checked before "idx < *msr_count"?
>> 
>> So if idx==*msr_count, you first hit an out-of-boundary access...
>> 
>> I think we should change the condition order here.
>> 
> 
> You are right. I will fix this in v3.

And with that taken care of
Reviewed-by: Jan Beulich <jbeulich@suse.com>
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 454d444..49587d6 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1307,6 +1307,44 @@  static int construct_vmcs(struct vcpu *v)
     return 0;
 }
 
+static int vmx_msr_entry_key_cmp(const void *key, const void *elt)
+{
+    const u32 *msr = key;
+    const struct vmx_msr_entry *entry = elt;
+
+    if ( *msr > entry->index )
+        return 1;
+    if ( *msr < entry->index )
+        return -1;
+
+    return 0;
+}
+
+struct vmx_msr_entry *vmx_find_msr(u32 msr, int type)
+{
+    struct vcpu *curr = current;
+    unsigned int msr_count;
+    struct vmx_msr_entry *msr_area;
+
+    if ( type == VMX_GUEST_MSR )
+    {
+        msr_count = curr->arch.hvm_vmx.msr_count;
+        msr_area = curr->arch.hvm_vmx.msr_area;
+    }
+    else
+    {
+        ASSERT(type == VMX_HOST_MSR);
+        msr_count = curr->arch.hvm_vmx.host_msr_count;
+        msr_area = curr->arch.hvm_vmx.host_msr_area;
+    }
+
+    if ( msr_area == NULL )
+        return NULL;
+
+    return bsearch(&msr, msr_area, msr_count, sizeof(struct vmx_msr_entry),
+                   vmx_msr_entry_key_cmp);
+}
+
 int vmx_read_guest_msr(u32 msr, u64 *val)
 {
     struct vcpu *curr = current;
@@ -1375,14 +1413,17 @@  int vmx_add_msr(u32 msr, int type)
             __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(*msr_area));
     }
 
-    for ( idx = 0; idx < *msr_count; idx++ )
+    for ( idx = 0; (*msr_area)[idx].index <= msr && idx < *msr_count; idx++ )
         if ( (*msr_area)[idx].index == msr )
             return 0;
 
     if ( *msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
         return -ENOSPC;
 
-    msr_area_elem = *msr_area + *msr_count;
+    memmove(*msr_area + idx + 1, *msr_area + idx,
+            sizeof(*msr_area_elem) * (*msr_count - idx));
+
+    msr_area_elem = *msr_area + idx;
     msr_area_elem->index = msr;
     msr_area_elem->mbz = 0;
 
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
index c30aab6..903af51 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -529,6 +529,7 @@  void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr, int type);
 void vmx_enable_intercept_for_msr(struct vcpu *v, u32 msr, int type);
 int vmx_read_guest_msr(u32 msr, u64 *val);
 int vmx_write_guest_msr(u32 msr, u64 val);
+struct vmx_msr_entry *vmx_find_msr(u32 msr, int type);
 int vmx_add_msr(u32 msr, int type);
 void vmx_vmcs_switch(paddr_t from, paddr_t to);
 void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector);