diff mbox

vm_event: Allow subscribing to write events for specific MSR-s

Message ID 1460471669-3434-1-git-send-email-rcojocaru@bitdefender.com (mailing list archive)
State New, archived
Headers show

Commit Message

Razvan Cojocaru April 12, 2016, 2:34 p.m. UTC
Previously, subscribing to MSR write events was an all-or-none
approach, with special cases for introspection MSR-s. This patch
allows the vm_event consumer to specify exactly what MSR-s it is
interested in, and as a side-effect gets rid of the
vmx_introspection_force_enabled_msrs[] special case.
This replaces the previously posted "xen: Filter out MSR write
events" patch.

Signed-off-by: Razvan Cojocaru <rcojocaru@bitdefender.com>
---
 tools/libxc/include/xenctrl.h      |  4 +--
 tools/libxc/xc_monitor.c           |  6 ++--
 xen/arch/x86/hvm/event.c           |  3 +-
 xen/arch/x86/hvm/hvm.c             |  3 +-
 xen/arch/x86/hvm/vmx/vmcs.c        | 26 ++--------------
 xen/arch/x86/hvm/vmx/vmx.c         | 10 ++----
 xen/arch/x86/monitor.c             | 25 +++++++--------
 xen/arch/x86/vm_event.c            | 62 ++++++++++++++++++++++++++++++++++++++
 xen/include/asm-arm/vm_event.h     | 21 +++++++++++++
 xen/include/asm-x86/domain.h       |  4 +--
 xen/include/asm-x86/hvm/hvm.h      |  8 ++---
 xen/include/asm-x86/hvm/vmx/vmcs.h |  7 -----
 xen/include/asm-x86/vm_event.h     |  6 ++++
 xen/include/public/domctl.h        |  3 +-
 14 files changed, 122 insertions(+), 66 deletions(-)

Comments

Julien Grall April 12, 2016, 2:42 p.m. UTC | #1
Hello Razvan,

On 12/04/2016 15:34, Razvan Cojocaru wrote:
> Previously, subscribing to MSR write events was an all-or-none
> approach, with special cases for introspection MSR-s. This patch
> allows the vm_event consumer to specify exactly what MSR-s it is
> interested in, and as a side-effect gets rid of the
> vmx_introspection_force_enabled_msrs[] special case.
> This replaces the previously posted "xen: Filter out MSR write
> events" patch.
>
> Signed-off-by: Razvan Cojocaru <rcojocaru@bitdefender.com>
> ---
>   tools/libxc/include/xenctrl.h      |  4 +--
>   tools/libxc/xc_monitor.c           |  6 ++--
>   xen/arch/x86/hvm/event.c           |  3 +-
>   xen/arch/x86/hvm/hvm.c             |  3 +-
>   xen/arch/x86/hvm/vmx/vmcs.c        | 26 ++--------------
>   xen/arch/x86/hvm/vmx/vmx.c         | 10 ++----
>   xen/arch/x86/monitor.c             | 25 +++++++--------
>   xen/arch/x86/vm_event.c            | 62 ++++++++++++++++++++++++++++++++++++++
>   xen/include/asm-arm/vm_event.h     | 21 +++++++++++++

Can you explain why you've added stubs for ARM when nobody in the common 
code is calling them?

Regards,
Razvan Cojocaru April 12, 2016, 2:45 p.m. UTC | #2
On 04/12/2016 05:42 PM, Julien Grall wrote:
> Hello Razvan,
> 
> On 12/04/2016 15:34, Razvan Cojocaru wrote:
>> Previously, subscribing to MSR write events was an all-or-none
>> approach, with special cases for introspection MSR-s. This patch
>> allows the vm_event consumer to specify exactly what MSR-s it is
>> interested in, and as a side-effect gets rid of the
>> vmx_introspection_force_enabled_msrs[] special case.
>> This replaces the previously posted "xen: Filter out MSR write
>> events" patch.
>>
>> Signed-off-by: Razvan Cojocaru <rcojocaru@bitdefender.com>
>> ---
>>   tools/libxc/include/xenctrl.h      |  4 +--
>>   tools/libxc/xc_monitor.c           |  6 ++--
>>   xen/arch/x86/hvm/event.c           |  3 +-
>>   xen/arch/x86/hvm/hvm.c             |  3 +-
>>   xen/arch/x86/hvm/vmx/vmcs.c        | 26 ++--------------
>>   xen/arch/x86/hvm/vmx/vmx.c         | 10 ++----
>>   xen/arch/x86/monitor.c             | 25 +++++++--------
>>   xen/arch/x86/vm_event.c            | 62
>> ++++++++++++++++++++++++++++++++++++++
>>   xen/include/asm-arm/vm_event.h     | 21 +++++++++++++
> 
> Can you explain why you've added stubs for ARM when nobody in the common
> code is calling them?

Got carried away, just assumed symmetry is required. Will remove them in V2.


Thanks,
Razvan
Tamas K Lengyel April 12, 2016, 5:49 p.m. UTC | #3
>
> diff --git a/xen/arch/x86/monitor.c b/xen/arch/x86/monitor.c
> index 1fec412..1be058a 100644
> --- a/xen/arch/x86/monitor.c
> +++ b/xen/arch/x86/monitor.c
> @@ -20,6 +20,7 @@
>   */
>
>  #include <asm/monitor.h>
> +#include <asm/vm_event.h>
>  #include <public/vm_event.h>
>
>  int arch_monitor_domctl_event(struct domain *d,
> @@ -77,25 +78,25 @@ int arch_monitor_domctl_event(struct domain *d,
>
>      case XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR:
>      {
> -        bool_t old_status = ad->monitor.mov_to_msr_enabled;
> +        bool_t old_status;
> +        int rc;
> +        u32 msr = mop->u.mov_to_msr.msr;
>
> -        if ( unlikely(old_status == requested_status) )
> -            return -EEXIST;
> +        domain_pause(d);
>
> -        if ( requested_status && mop->u.mov_to_msr.extended_capture &&
> -             !hvm_enable_msr_exit_interception(d) )
> -            return -EOPNOTSUPP;
> +        old_status = vm_event_is_msr_enabled(d, msr);
>
> -        domain_pause(d);
> +        if ( unlikely(old_status == requested_status) )
> +            return -EEXIST;
>

Moving this test after the domain gets paused will leave the guest paused
on error condition. Any reason why this rearrangement is necessary?


>
> -        if ( requested_status && mop->u.mov_to_msr.extended_capture )
> -            ad->monitor.mov_to_msr_extended = 1;
> +        if ( requested_status )
> +            rc = vm_event_enable_msr(d, msr);
>          else
> -            ad->monitor.mov_to_msr_extended = 0;
> +            rc = vm_event_disable_msr(d, msr);
>
> -        ad->monitor.mov_to_msr_enabled = requested_status;
>          domain_unpause(d);
> -        break;
> +
> +        return rc;
>      }
>
>      case XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP:
> diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c
> index 5635603..b851e39 100644
> --- a/xen/arch/x86/vm_event.c
> +++ b/xen/arch/x86/vm_event.c
> @@ -27,6 +27,13 @@ int vm_event_init_domain(struct domain *d)
>  {
>      struct vcpu *v;
>
> +    d->arch.monitor_msr_bitmap = alloc_xenheap_page();
> +
> +    if ( !d->arch.monitor_msr_bitmap )
> +        return -ENOMEM;
> +
> +    memset(d->arch.monitor_msr_bitmap, 0, PAGE_SIZE);
> +
>      for_each_vcpu ( d, v )
>      {
>          if ( v->arch.vm_event )
> @@ -55,11 +62,66 @@ void vm_event_cleanup_domain(struct domain *d)
>          v->arch.vm_event = NULL;
>      }
>
> +    free_xenheap_page(d->arch.monitor_msr_bitmap);
> +    d->arch.monitor_msr_bitmap = NULL;
> +
>      d->arch.mem_access_emulate_each_rep = 0;
>      memset(&d->arch.monitor, 0, sizeof(d->arch.monitor));
>      memset(&d->monitor, 0, sizeof(d->monitor));
>  }
>
> +int vm_event_enable_msr(struct domain *d, u32 msr)
>

This function..


> +{
> +    if ( !d->arch.monitor_msr_bitmap )
> +        return -EINVAL;
> +
> +    if ( msr <= 0x1fff )
> +        set_bit(msr, d->arch.monitor_msr_bitmap + 0x000/BYTES_PER_LONG);
> +    else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
> +    {
> +        msr &= 0x1fff;
> +        set_bit(msr, d->arch.monitor_msr_bitmap + 0x400/BYTES_PER_LONG);
> +    }
> +
> +    hvm_enable_msr_interception(d, msr);
> +
> +    return 0;
> +}
> +
> +int vm_event_disable_msr(struct domain *d, u32 msr)
>

..and this..


> +{
> +    if ( !d->arch.monitor_msr_bitmap )
> +        return -EINVAL;
> +
> +    if ( msr <= 0x1fff )
> +        clear_bit(msr, d->arch.monitor_msr_bitmap + 0x000/BYTES_PER_LONG);
> +    else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
> +    {
> +        msr &= 0x1fff;
> +        clear_bit(msr, d->arch.monitor_msr_bitmap + 0x400/BYTES_PER_LONG);
> +    }
> +
> +    return 0;
> +}
> +
> +bool_t vm_event_is_msr_enabled(const struct domain *d, u32 msr)
>

..and this one has nothing to do with vm_event really. These belong in the
monitor.c side.


> +{
> +    bool_t rc = 0;
> +
> +    if ( !d->arch.monitor_msr_bitmap )
> +        return 0;
> +
> +    if ( msr <= 0x1fff )
> +        rc = test_bit(msr, d->arch.monitor_msr_bitmap +
> 0x000/BYTES_PER_LONG);
> +    else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
> +    {
> +        msr &= 0x1fff;
> +        rc = test_bit(msr, d->arch.monitor_msr_bitmap +
> 0x400/BYTES_PER_LONG);
> +    }
> +
> +    return rc;
> +}
> +
>  void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v)
>  {
>      if ( !is_hvm_domain(d) || !atomic_read(&v->vm_event_pause_count) )
>
>
Cheers,
Tamas
Razvan Cojocaru April 12, 2016, 5:57 p.m. UTC | #4
On 04/12/16 20:49, Tamas K Lengyel wrote:
>     diff --git a/xen/arch/x86/monitor.c b/xen/arch/x86/monitor.c
>     index 1fec412..1be058a 100644
>     --- a/xen/arch/x86/monitor.c
>     +++ b/xen/arch/x86/monitor.c
>     @@ -20,6 +20,7 @@
>       */
> 
>      #include <asm/monitor.h>
>     +#include <asm/vm_event.h>
>      #include <public/vm_event.h>
> 
>      int arch_monitor_domctl_event(struct domain *d,
>     @@ -77,25 +78,25 @@ int arch_monitor_domctl_event(struct domain *d,
> 
>          case XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR:
>          {
>     -        bool_t old_status = ad->monitor.mov_to_msr_enabled;
>     +        bool_t old_status;
>     +        int rc;
>     +        u32 msr = mop->u.mov_to_msr.msr;
> 
>     -        if ( unlikely(old_status == requested_status) )
>     -            return -EEXIST;
>     +        domain_pause(d);
> 
>     -        if ( requested_status && mop->u.mov_to_msr.extended_capture &&
>     -             !hvm_enable_msr_exit_interception(d) )
>     -            return -EOPNOTSUPP;
>     +        old_status = vm_event_is_msr_enabled(d, msr);
> 
>     -        domain_pause(d);
>     +        if ( unlikely(old_status == requested_status) )
>     +            return -EEXIST;
> 
> 
> Moving this test after the domain gets paused will leave the guest
> paused on error condition. Any reason why this rearrangement is necessary?

The rearrangement is necessary because vm_event_is_msr_enabled() uses
the per-domain bitmap to do its job, so I thought having the domain
paused when checking it would work best.

Leaving the domain paused there is an oversight, I need to correct that,
so thanks for noticing!

>     -        if ( requested_status && mop->u.mov_to_msr.extended_capture )
>     -            ad->monitor.mov_to_msr_extended = 1;
>     +        if ( requested_status )
>     +            rc = vm_event_enable_msr(d, msr);
>              else
>     -            ad->monitor.mov_to_msr_extended = 0;
>     +            rc = vm_event_disable_msr(d, msr);
> 
>     -        ad->monitor.mov_to_msr_enabled = requested_status;
>              domain_unpause(d);
>     -        break;
>     +
>     +        return rc;
>          }
> 
>          case XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP:
>     diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c
>     index 5635603..b851e39 100644
>     --- a/xen/arch/x86/vm_event.c
>     +++ b/xen/arch/x86/vm_event.c
>     @@ -27,6 +27,13 @@ int vm_event_init_domain(struct domain *d)
>      {
>          struct vcpu *v;
> 
>     +    d->arch.monitor_msr_bitmap = alloc_xenheap_page();
>     +
>     +    if ( !d->arch.monitor_msr_bitmap )
>     +        return -ENOMEM;
>     +
>     +    memset(d->arch.monitor_msr_bitmap, 0, PAGE_SIZE);
>     +
>          for_each_vcpu ( d, v )
>          {
>              if ( v->arch.vm_event )
>     @@ -55,11 +62,66 @@ void vm_event_cleanup_domain(struct domain *d)
>              v->arch.vm_event = NULL;
>          }
> 
>     +    free_xenheap_page(d->arch.monitor_msr_bitmap);
>     +    d->arch.monitor_msr_bitmap = NULL;
>     +
>          d->arch.mem_access_emulate_each_rep = 0;
>          memset(&d->arch.monitor, 0, sizeof(d->arch.monitor));
>          memset(&d->monitor, 0, sizeof(d->monitor));
>      }
> 
>     +int vm_event_enable_msr(struct domain *d, u32 msr)
> 
> 
> This function..
>  
> 
>     +{
>     +    if ( !d->arch.monitor_msr_bitmap )
>     +        return -EINVAL;
>     +
>     +    if ( msr <= 0x1fff )
>     +        set_bit(msr, d->arch.monitor_msr_bitmap +
>     0x000/BYTES_PER_LONG);
>     +    else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
>     +    {
>     +        msr &= 0x1fff;
>     +        set_bit(msr, d->arch.monitor_msr_bitmap +
>     0x400/BYTES_PER_LONG);
>     +    }
>     +
>     +    hvm_enable_msr_interception(d, msr);
>     +
>     +    return 0;
>     +}
>     +
>     +int vm_event_disable_msr(struct domain *d, u32 msr)
> 
> 
> ..and this..
>  
> 
>     +{
>     +    if ( !d->arch.monitor_msr_bitmap )
>     +        return -EINVAL;
>     +
>     +    if ( msr <= 0x1fff )
>     +        clear_bit(msr, d->arch.monitor_msr_bitmap +
>     0x000/BYTES_PER_LONG);
>     +    else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
>     +    {
>     +        msr &= 0x1fff;
>     +        clear_bit(msr, d->arch.monitor_msr_bitmap +
>     0x400/BYTES_PER_LONG);
>     +    }
>     +
>     +    return 0;
>     +}
>     +
>     +bool_t vm_event_is_msr_enabled(const struct domain *d, u32 msr)
> 
>  
> ..and this one has nothing to do with vm_event really. These belong in
> the monitor.c side.
>  
> 
>     +{
>     +    bool_t rc = 0;
>     +
>     +    if ( !d->arch.monitor_msr_bitmap )
>     +        return 0;
>     +
>     +    if ( msr <= 0x1fff )
>     +        rc = test_bit(msr, d->arch.monitor_msr_bitmap +
>     0x000/BYTES_PER_LONG);
>     +    else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
>     +    {
>     +        msr &= 0x1fff;
>     +        rc = test_bit(msr, d->arch.monitor_msr_bitmap +
>     0x400/BYTES_PER_LONG);
>     +    }
>     +
>     +    return rc;
>     +}
>     +
>      void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v)
>      {
>          if ( !is_hvm_domain(d) || !atomic_read(&v->vm_event_pause_count) )

Fair enough. Will move them.


Thanks,
Razvan
diff mbox

Patch

diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index f5a034a..9698d46 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -2183,8 +2183,8 @@  int xc_monitor_get_capabilities(xc_interface *xch, domid_t domain_id,
 int xc_monitor_write_ctrlreg(xc_interface *xch, domid_t domain_id,
                              uint16_t index, bool enable, bool sync,
                              bool onchangeonly);
-int xc_monitor_mov_to_msr(xc_interface *xch, domid_t domain_id, bool enable,
-                          bool extended_capture);
+int xc_monitor_mov_to_msr(xc_interface *xch, domid_t domain_id, uint32_t msr,
+                          bool enable);
 int xc_monitor_singlestep(xc_interface *xch, domid_t domain_id, bool enable);
 int xc_monitor_software_breakpoint(xc_interface *xch, domid_t domain_id,
                                    bool enable);
diff --git a/tools/libxc/xc_monitor.c b/tools/libxc/xc_monitor.c
index b1705dd..78131b2 100644
--- a/tools/libxc/xc_monitor.c
+++ b/tools/libxc/xc_monitor.c
@@ -86,8 +86,8 @@  int xc_monitor_write_ctrlreg(xc_interface *xch, domid_t domain_id,
     return do_domctl(xch, &domctl);
 }
 
-int xc_monitor_mov_to_msr(xc_interface *xch, domid_t domain_id, bool enable,
-                          bool extended_capture)
+int xc_monitor_mov_to_msr(xc_interface *xch, domid_t domain_id, uint32_t msr,
+                          bool enable)
 {
     DECLARE_DOMCTL;
 
@@ -96,7 +96,7 @@  int xc_monitor_mov_to_msr(xc_interface *xch, domid_t domain_id, bool enable,
     domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
                                     : XEN_DOMCTL_MONITOR_OP_DISABLE;
     domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR;
-    domctl.u.monitor_op.u.mov_to_msr.extended_capture = extended_capture;
+    domctl.u.monitor_op.u.mov_to_msr.msr = msr;
 
     return do_domctl(xch, &domctl);
 }
diff --git a/xen/arch/x86/hvm/event.c b/xen/arch/x86/hvm/event.c
index 56c5514..25d55de 100644
--- a/xen/arch/x86/hvm/event.c
+++ b/xen/arch/x86/hvm/event.c
@@ -57,9 +57,8 @@  bool_t hvm_event_cr(unsigned int index, unsigned long value, unsigned long old)
 void hvm_event_msr(unsigned int msr, uint64_t value)
 {
     struct vcpu *curr = current;
-    struct arch_domain *ad = &curr->domain->arch;
 
-    if ( ad->monitor.mov_to_msr_enabled )
+    if ( vm_event_is_msr_enabled(curr->domain, msr) )
     {
         vm_event_request_t req = {
             .reason = VM_EVENT_REASON_MOV_TO_MSR,
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index f24126d..ce78627 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3695,7 +3695,6 @@  int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
     bool_t mtrr;
     unsigned int edx, index;
     int ret = X86EMUL_OKAY;
-    struct arch_domain *currad = &current->domain->arch;
 
     HVMTRACE_3D(MSR_WRITE, msr,
                (uint32_t)msr_content, (uint32_t)(msr_content >> 32));
@@ -3703,7 +3702,7 @@  int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
     hvm_cpuid(1, NULL, NULL, NULL, &edx);
     mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR));
 
-    if ( may_defer && unlikely(currad->monitor.mov_to_msr_enabled) )
+    if ( may_defer && unlikely(vm_event_is_msr_enabled(v->domain, msr)) )
     {
         ASSERT(v->arch.vm_event);
 
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 8284281..fd5c772 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -39,6 +39,7 @@ 
 #include <asm/flushtlb.h>
 #include <asm/shadow.h>
 #include <asm/tboot.h>
+#include <asm/vm_event.h>
 #include <asm/apic.h>
 
 static bool_t __read_mostly opt_vpid_enabled = 1;
@@ -108,18 +109,6 @@  u64 vmx_ept_vpid_cap __read_mostly;
 u64 vmx_vmfunc __read_mostly;
 bool_t vmx_virt_exception __read_mostly;
 
-const u32 vmx_introspection_force_enabled_msrs[] = {
-    MSR_IA32_SYSENTER_EIP,
-    MSR_IA32_SYSENTER_ESP,
-    MSR_IA32_SYSENTER_CS,
-    MSR_IA32_MC0_CTL,
-    MSR_STAR,
-    MSR_LSTAR
-};
-
-const unsigned int vmx_introspection_force_enabled_msrs_size =
-    ARRAY_SIZE(vmx_introspection_force_enabled_msrs);
-
 static DEFINE_PER_CPU_READ_MOSTLY(paddr_t, vmxon_region);
 static DEFINE_PER_CPU(paddr_t, current_vmcs);
 static DEFINE_PER_CPU(struct list_head, active_vmcs_list);
@@ -810,17 +799,8 @@  void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr, int type)
     if ( msr_bitmap == NULL )
         return;
 
-    if ( unlikely(d->arch.monitor.mov_to_msr_enabled &&
-                  d->arch.monitor.mov_to_msr_extended) &&
-         vm_event_check_ring(&d->vm_event->monitor) )
-    {
-        unsigned int i;
-
-        /* Filter out MSR-s needed for memory introspection */
-        for ( i = 0; i < vmx_introspection_force_enabled_msrs_size; i++ )
-            if ( msr == vmx_introspection_force_enabled_msrs[i] )
-                return;
-    }
+    if ( unlikely(vm_event_is_msr_enabled(d, msr)) )
+        return;
 
     /*
      * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index bc4410f..9135441 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1958,16 +1958,12 @@  void vmx_hypervisor_cpuid_leaf(uint32_t sub_idx,
         *eax |= XEN_HVM_CPUID_X2APIC_VIRT;
 }
 
-static void vmx_enable_msr_exit_interception(struct domain *d)
+static void vmx_enable_msr_interception(struct domain *d, uint32_t msr)
 {
     struct vcpu *v;
-    unsigned int i;
 
-    /* Enable interception for MSRs needed for memory introspection. */
     for_each_vcpu ( d, v )
-        for ( i = 0; i < vmx_introspection_force_enabled_msrs_size; i++ )
-            vmx_enable_intercept_for_msr(v, vmx_introspection_force_enabled_msrs[i],
-                                         MSR_TYPE_W);
+        vmx_enable_intercept_for_msr(v, msr, MSR_TYPE_W);
 }
 
 static bool_t vmx_is_singlestep_supported(void)
@@ -2166,7 +2162,7 @@  static struct hvm_function_table __initdata vmx_function_table = {
     .handle_eoi           = vmx_handle_eoi,
     .nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
     .hypervisor_cpuid_leaf = vmx_hypervisor_cpuid_leaf,
-    .enable_msr_exit_interception = vmx_enable_msr_exit_interception,
+    .enable_msr_interception = vmx_enable_msr_interception,
     .is_singlestep_supported = vmx_is_singlestep_supported,
     .set_mode = vmx_set_mode,
     .altp2m_vcpu_update_p2m = vmx_vcpu_update_eptp,
diff --git a/xen/arch/x86/monitor.c b/xen/arch/x86/monitor.c
index 1fec412..1be058a 100644
--- a/xen/arch/x86/monitor.c
+++ b/xen/arch/x86/monitor.c
@@ -20,6 +20,7 @@ 
  */
 
 #include <asm/monitor.h>
+#include <asm/vm_event.h>
 #include <public/vm_event.h>
 
 int arch_monitor_domctl_event(struct domain *d,
@@ -77,25 +78,25 @@  int arch_monitor_domctl_event(struct domain *d,
 
     case XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR:
     {
-        bool_t old_status = ad->monitor.mov_to_msr_enabled;
+        bool_t old_status;
+        int rc;
+        u32 msr = mop->u.mov_to_msr.msr;
 
-        if ( unlikely(old_status == requested_status) )
-            return -EEXIST;
+        domain_pause(d);
 
-        if ( requested_status && mop->u.mov_to_msr.extended_capture &&
-             !hvm_enable_msr_exit_interception(d) )
-            return -EOPNOTSUPP;
+        old_status = vm_event_is_msr_enabled(d, msr);
 
-        domain_pause(d);
+        if ( unlikely(old_status == requested_status) )
+            return -EEXIST;
 
-        if ( requested_status && mop->u.mov_to_msr.extended_capture )
-            ad->monitor.mov_to_msr_extended = 1;
+        if ( requested_status )
+            rc = vm_event_enable_msr(d, msr);
         else
-            ad->monitor.mov_to_msr_extended = 0;
+            rc = vm_event_disable_msr(d, msr);
 
-        ad->monitor.mov_to_msr_enabled = requested_status;
         domain_unpause(d);
-        break;
+
+        return rc;
     }
 
     case XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP:
diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c
index 5635603..b851e39 100644
--- a/xen/arch/x86/vm_event.c
+++ b/xen/arch/x86/vm_event.c
@@ -27,6 +27,13 @@  int vm_event_init_domain(struct domain *d)
 {
     struct vcpu *v;
 
+    d->arch.monitor_msr_bitmap = alloc_xenheap_page();
+
+    if ( !d->arch.monitor_msr_bitmap )
+        return -ENOMEM;
+
+    memset(d->arch.monitor_msr_bitmap, 0, PAGE_SIZE);
+
     for_each_vcpu ( d, v )
     {
         if ( v->arch.vm_event )
@@ -55,11 +62,66 @@  void vm_event_cleanup_domain(struct domain *d)
         v->arch.vm_event = NULL;
     }
 
+    free_xenheap_page(d->arch.monitor_msr_bitmap);
+    d->arch.monitor_msr_bitmap = NULL;
+
     d->arch.mem_access_emulate_each_rep = 0;
     memset(&d->arch.monitor, 0, sizeof(d->arch.monitor));
     memset(&d->monitor, 0, sizeof(d->monitor));
 }
 
+int vm_event_enable_msr(struct domain *d, u32 msr)
+{
+    if ( !d->arch.monitor_msr_bitmap )
+        return -EINVAL;
+
+    if ( msr <= 0x1fff )
+        set_bit(msr, d->arch.monitor_msr_bitmap + 0x000/BYTES_PER_LONG);
+    else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
+    {
+        msr &= 0x1fff;
+        set_bit(msr, d->arch.monitor_msr_bitmap + 0x400/BYTES_PER_LONG);
+    }
+
+    hvm_enable_msr_interception(d, msr);
+
+    return 0;
+}
+
+int vm_event_disable_msr(struct domain *d, u32 msr)
+{
+    if ( !d->arch.monitor_msr_bitmap )
+        return -EINVAL;
+
+    if ( msr <= 0x1fff )
+        clear_bit(msr, d->arch.monitor_msr_bitmap + 0x000/BYTES_PER_LONG);
+    else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
+    {
+        msr &= 0x1fff;
+        clear_bit(msr, d->arch.monitor_msr_bitmap + 0x400/BYTES_PER_LONG);
+    }
+
+    return 0;
+}
+
+bool_t vm_event_is_msr_enabled(const struct domain *d, u32 msr)
+{
+    bool_t rc = 0;
+
+    if ( !d->arch.monitor_msr_bitmap )
+        return 0;
+
+    if ( msr <= 0x1fff )
+        rc = test_bit(msr, d->arch.monitor_msr_bitmap + 0x000/BYTES_PER_LONG);
+    else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
+    {
+        msr &= 0x1fff;
+        rc = test_bit(msr, d->arch.monitor_msr_bitmap + 0x400/BYTES_PER_LONG);
+    }
+
+    return rc;
+}
+
 void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v)
 {
     if ( !is_hvm_domain(d) || !atomic_read(&v->vm_event_pause_count) )
diff --git a/xen/include/asm-arm/vm_event.h b/xen/include/asm-arm/vm_event.h
index 014d9ba..9215f6c 100644
--- a/xen/include/asm-arm/vm_event.h
+++ b/xen/include/asm-arm/vm_event.h
@@ -37,6 +37,27 @@  void vm_event_cleanup_domain(struct domain *d)
 }
 
 static inline
+int vm_event_enable_msr(struct domain *d, u32 msr)
+{
+    /* Not supported on ARM. */
+    return 0;
+}
+
+static inline
+int vm_event_disable_msr(struct domain *d, u32 msr)
+{
+    /* Not supported on ARM. */
+    return 0;
+}
+
+static inline
+bool_t vm_event_is_msr_enabled(const struct domain *d, u32 msr)
+{
+    /* Not supported on ARM. */
+    return 0;
+}
+
+static inline
 void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v)
 {
     /* Not supported on ARM. */
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index d393ed2..d8d91c2 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -398,12 +398,12 @@  struct arch_domain
         unsigned int write_ctrlreg_enabled       : 4;
         unsigned int write_ctrlreg_sync          : 4;
         unsigned int write_ctrlreg_onchangeonly  : 4;
-        unsigned int mov_to_msr_enabled          : 1;
-        unsigned int mov_to_msr_extended         : 1;
         unsigned int singlestep_enabled          : 1;
         unsigned int software_breakpoint_enabled : 1;
     } monitor;
 
+    unsigned long *monitor_msr_bitmap;
+
     /* Mem_access emulation control */
     bool_t mem_access_emulate_each_rep;
 
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 7b7ff3f..9d1c0ef 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -211,7 +211,7 @@  struct hvm_function_table {
                                   uint32_t *eax, uint32_t *ebx,
                                   uint32_t *ecx, uint32_t *edx);
 
-    void (*enable_msr_exit_interception)(struct domain *d);
+    void (*enable_msr_interception)(struct domain *d, uint32_t msr);
     bool_t (*is_singlestep_supported)(void);
     int (*set_mode)(struct vcpu *v, int mode);
 
@@ -565,11 +565,11 @@  static inline enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v)
     return hvm_funcs.nhvm_intr_blocked(v);
 }
 
-static inline bool_t hvm_enable_msr_exit_interception(struct domain *d)
+static inline bool_t hvm_enable_msr_interception(struct domain *d, uint32_t msr)
 {
-    if ( hvm_funcs.enable_msr_exit_interception )
+    if ( hvm_funcs.enable_msr_interception )
     {
-        hvm_funcs.enable_msr_exit_interception(d);
+        hvm_funcs.enable_msr_interception(d, msr);
         return 1;
     }
 
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
index b54f52f..7bf5326 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -562,13 +562,6 @@  enum vmcs_field {
     HOST_RIP                        = 0x00006c16,
 };
 
-/*
- * A set of MSR-s that need to be enabled for memory introspection
- * to work.
- */
-extern const u32 vmx_introspection_force_enabled_msrs[];
-extern const unsigned int vmx_introspection_force_enabled_msrs_size;
-
 #define VMCS_VPID_WIDTH 16
 
 #define MSR_TYPE_R 1
diff --git a/xen/include/asm-x86/vm_event.h b/xen/include/asm-x86/vm_event.h
index 0470240..67bfcb9 100644
--- a/xen/include/asm-x86/vm_event.h
+++ b/xen/include/asm-x86/vm_event.h
@@ -67,4 +67,10 @@  static inline uint32_t vm_event_monitor_get_capabilities(struct domain *d)
     return capabilities;
 }
 
+int vm_event_enable_msr(struct domain *d, u32 msr);
+
+int vm_event_disable_msr(struct domain *d, u32 msr);
+
+bool_t vm_event_is_msr_enabled(const struct domain *d, u32 msr);
+
 #endif /* __ASM_X86_VM_EVENT_H__ */
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 2457698..875c09a 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -1107,8 +1107,7 @@  struct xen_domctl_monitor_op {
         } mov_to_cr;
 
         struct {
-            /* Enable the capture of an extended set of MSRs */
-            uint8_t extended_capture;
+            uint32_t msr;
         } mov_to_msr;
 
         struct {