diff mbox series

[4/4] x86/hvm: create hvm_funcs for {svm,vmx}_{set,clear}_msr_intercept()

Message ID 20230227075652.3782973-5-burzalodowa@gmail.com (mailing list archive)
State New, archived
Headers show
Series hvm: add hvm_funcs hooks for msr intercept handling | expand

Commit Message

Xenia Ragiadakou Feb. 27, 2023, 7:56 a.m. UTC
Add hvm_funcs hooks for {set,clear}_msr_intercept() for controlling the msr
intercept in common vpmu code.

No functional change intended.

Signed-off-by: Xenia Ragiadakou <burzalodowa@gmail.com>
---
 xen/arch/x86/cpu/vpmu_amd.c             | 10 ++++-----
 xen/arch/x86/cpu/vpmu_intel.c           | 24 ++++++++++-----------
 xen/arch/x86/hvm/svm/svm.c              |  4 ++--
 xen/arch/x86/hvm/vmx/vmcs.c             |  4 ++--
 xen/arch/x86/hvm/vmx/vmx.c              |  2 ++
 xen/arch/x86/include/asm/hvm/hvm.h      | 28 +++++++++++++++++++++++++
 xen/arch/x86/include/asm/hvm/svm/vmcb.h |  4 ++--
 xen/arch/x86/include/asm/hvm/vmx/vmcs.h |  4 ++--
 8 files changed, 55 insertions(+), 25 deletions(-)

Comments

Jan Beulich Feb. 28, 2023, 2:58 p.m. UTC | #1
On 27.02.2023 08:56, Xenia Ragiadakou wrote:
> Add hvm_funcs hooks for {set,clear}_msr_intercept() for controlling the msr
> intercept in common vpmu code.

What is this going to buy us? All calls ...

> --- a/xen/arch/x86/cpu/vpmu_amd.c
> +++ b/xen/arch/x86/cpu/vpmu_amd.c
> @@ -165,9 +165,9 @@ static void amd_vpmu_set_msr_bitmap(struct vcpu *v)
>  
>      for ( i = 0; i < num_counters; i++ )
>      {
> -        svm_clear_msr_intercept(v, counters[i], MSR_RW);
> -        svm_set_msr_intercept(v, ctrls[i], MSR_W);
> -        svm_clear_msr_intercept(v, ctrls[i], MSR_R);
> +        hvm_clear_msr_intercept(v, counters[i], MSR_RW);
> +        hvm_set_msr_intercept(v, ctrls[i], MSR_W);
> +        hvm_clear_msr_intercept(v, ctrls[i], MSR_R);
>      }
>  
>      msr_bitmap_on(vpmu);
> @@ -180,8 +180,8 @@ static void amd_vpmu_unset_msr_bitmap(struct vcpu *v)
>  
>      for ( i = 0; i < num_counters; i++ )
>      {
> -        svm_set_msr_intercept(v, counters[i], MSR_RW);
> -        svm_set_msr_intercept(v, ctrls[i], MSR_RW);
> +        hvm_set_msr_intercept(v, counters[i], MSR_RW);
> +        hvm_set_msr_intercept(v, ctrls[i], MSR_RW);
>      }
>  
>      msr_bitmap_off(vpmu);

... here will got to the SVM functions anyway, while ...

> --- a/xen/arch/x86/cpu/vpmu_intel.c
> +++ b/xen/arch/x86/cpu/vpmu_intel.c
> @@ -230,22 +230,22 @@ static void core2_vpmu_set_msr_bitmap(struct vcpu *v)
>  
>      /* Allow Read/Write PMU Counters MSR Directly. */
>      for ( i = 0; i < fixed_pmc_cnt; i++ )
> -        vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
> +        hvm_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
>  
>      for ( i = 0; i < arch_pmc_cnt; i++ )
>      {
> -        vmx_clear_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
> +        hvm_clear_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
>  
>          if ( full_width_write )
> -            vmx_clear_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
> +            hvm_clear_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
>      }
>  
>      /* Allow Read PMU Non-global Controls Directly. */
>      for ( i = 0; i < arch_pmc_cnt; i++ )
> -        vmx_clear_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
> +        hvm_clear_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
>  
> -    vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
> -    vmx_clear_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
> +    hvm_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
> +    hvm_clear_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
>  }
>  
>  static void core2_vpmu_unset_msr_bitmap(struct vcpu *v)
> @@ -253,21 +253,21 @@ static void core2_vpmu_unset_msr_bitmap(struct vcpu *v)
>      unsigned int i;
>  
>      for ( i = 0; i < fixed_pmc_cnt; i++ )
> -        vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
> +        hvm_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
>  
>      for ( i = 0; i < arch_pmc_cnt; i++ )
>      {
> -        vmx_set_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
> +        hvm_set_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
>  
>          if ( full_width_write )
> -            vmx_set_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
> +            hvm_set_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
>      }
>  
>      for ( i = 0; i < arch_pmc_cnt; i++ )
> -        vmx_set_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
> +        hvm_set_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
>  
> -    vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
> -    vmx_set_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
> +    hvm_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
> +    hvm_set_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
>  }
>  
>  static inline void __core2_vpmu_save(struct vcpu *v)

... all calls here will go to VMX'es. For making either vpmu_<vendor>.c
build without that vendor's virtualization enabled, isn't it all it
takes to have ...

> @@ -916,6 +932,18 @@ static inline void hvm_set_reg(struct vcpu *v, unsigned int reg, uint64_t val)
>      ASSERT_UNREACHABLE();
>  }
>  
> +static inline void hvm_set_msr_intercept(struct vcpu *v, uint32_t msr,
> +                                         int flags)
> +{
> +    ASSERT_UNREACHABLE();
> +}
> +
> +static inline void hvm_clear_msr_intercept(struct vcpu *v, uint32_t msr,
> +                                           int flags)
> +{
> +    ASSERT_UNREACHABLE();
> +}

... respective SVM and VMX stubs in place instead?

Jan
Xenia Ragiadakou Feb. 28, 2023, 8:14 p.m. UTC | #2
On 2/28/23 16:58, Jan Beulich wrote:
> On 27.02.2023 08:56, Xenia Ragiadakou wrote:
>> Add hvm_funcs hooks for {set,clear}_msr_intercept() for controlling the msr
>> intercept in common vpmu code.
> 
> What is this going to buy us? All calls ...
> 
>> --- a/xen/arch/x86/cpu/vpmu_amd.c
>> +++ b/xen/arch/x86/cpu/vpmu_amd.c
>> @@ -165,9 +165,9 @@ static void amd_vpmu_set_msr_bitmap(struct vcpu *v)
>>   
>>       for ( i = 0; i < num_counters; i++ )
>>       {
>> -        svm_clear_msr_intercept(v, counters[i], MSR_RW);
>> -        svm_set_msr_intercept(v, ctrls[i], MSR_W);
>> -        svm_clear_msr_intercept(v, ctrls[i], MSR_R);
>> +        hvm_clear_msr_intercept(v, counters[i], MSR_RW);
>> +        hvm_set_msr_intercept(v, ctrls[i], MSR_W);
>> +        hvm_clear_msr_intercept(v, ctrls[i], MSR_R);
>>       }
>>   
>>       msr_bitmap_on(vpmu);
>> @@ -180,8 +180,8 @@ static void amd_vpmu_unset_msr_bitmap(struct vcpu *v)
>>   
>>       for ( i = 0; i < num_counters; i++ )
>>       {
>> -        svm_set_msr_intercept(v, counters[i], MSR_RW);
>> -        svm_set_msr_intercept(v, ctrls[i], MSR_RW);
>> +        hvm_set_msr_intercept(v, counters[i], MSR_RW);
>> +        hvm_set_msr_intercept(v, ctrls[i], MSR_RW);
>>       }
>>   
>>       msr_bitmap_off(vpmu);
> 
> ... here will got to the SVM functions anyway, while ...
> 
>> --- a/xen/arch/x86/cpu/vpmu_intel.c
>> +++ b/xen/arch/x86/cpu/vpmu_intel.c
>> @@ -230,22 +230,22 @@ static void core2_vpmu_set_msr_bitmap(struct vcpu *v)
>>   
>>       /* Allow Read/Write PMU Counters MSR Directly. */
>>       for ( i = 0; i < fixed_pmc_cnt; i++ )
>> -        vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
>> +        hvm_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
>>   
>>       for ( i = 0; i < arch_pmc_cnt; i++ )
>>       {
>> -        vmx_clear_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
>> +        hvm_clear_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
>>   
>>           if ( full_width_write )
>> -            vmx_clear_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
>> +            hvm_clear_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
>>       }
>>   
>>       /* Allow Read PMU Non-global Controls Directly. */
>>       for ( i = 0; i < arch_pmc_cnt; i++ )
>> -        vmx_clear_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
>> +        hvm_clear_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
>>   
>> -    vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
>> -    vmx_clear_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
>> +    hvm_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
>> +    hvm_clear_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
>>   }
>>   
>>   static void core2_vpmu_unset_msr_bitmap(struct vcpu *v)
>> @@ -253,21 +253,21 @@ static void core2_vpmu_unset_msr_bitmap(struct vcpu *v)
>>       unsigned int i;
>>   
>>       for ( i = 0; i < fixed_pmc_cnt; i++ )
>> -        vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
>> +        hvm_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
>>   
>>       for ( i = 0; i < arch_pmc_cnt; i++ )
>>       {
>> -        vmx_set_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
>> +        hvm_set_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
>>   
>>           if ( full_width_write )
>> -            vmx_set_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
>> +            hvm_set_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
>>       }
>>   
>>       for ( i = 0; i < arch_pmc_cnt; i++ )
>> -        vmx_set_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
>> +        hvm_set_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
>>   
>> -    vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
>> -    vmx_set_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
>> +    hvm_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
>> +    hvm_set_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
>>   }
>>   
>>   static inline void __core2_vpmu_save(struct vcpu *v)
> 
> ... all calls here will go to VMX'es. For making either vpmu_<vendor>.c
> build without that vendor's virtualization enabled, isn't it all it
> takes to have ...
> 
>> @@ -916,6 +932,18 @@ static inline void hvm_set_reg(struct vcpu *v, unsigned int reg, uint64_t val)
>>       ASSERT_UNREACHABLE();
>>   }
>>   
>> +static inline void hvm_set_msr_intercept(struct vcpu *v, uint32_t msr,
>> +                                         int flags)
>> +{
>> +    ASSERT_UNREACHABLE();
>> +}
>> +
>> +static inline void hvm_clear_msr_intercept(struct vcpu *v, uint32_t msr,
>> +                                           int flags)
>> +{
>> +    ASSERT_UNREACHABLE();
>> +}
> 
> ... respective SVM and VMX stubs in place instead?

IMO it is more readable and they looked very good candidates for being 
abstracted because they are doing the same thing under both technologies.
Are you suggesting that their usage in common code should be discouraged 
and should not be exported via the hvm_funcs interface? Or just that the 
amount of changes cannot be justified.
IIUC Andrew also suggested to use hvm_funcs for msr intercept handling 
but I 'm not sure whether he had this or sth else in mind.
Jan Beulich March 1, 2023, 9:14 a.m. UTC | #3
On 28.02.2023 21:14, Xenia Ragiadakou wrote:
> 
> On 2/28/23 16:58, Jan Beulich wrote:
>> On 27.02.2023 08:56, Xenia Ragiadakou wrote:
>>> Add hvm_funcs hooks for {set,clear}_msr_intercept() for controlling the msr
>>> intercept in common vpmu code.
>>
>> What is this going to buy us? All calls ...
>>
>>> --- a/xen/arch/x86/cpu/vpmu_amd.c
>>> +++ b/xen/arch/x86/cpu/vpmu_amd.c
>>> @@ -165,9 +165,9 @@ static void amd_vpmu_set_msr_bitmap(struct vcpu *v)
>>>   
>>>       for ( i = 0; i < num_counters; i++ )
>>>       {
>>> -        svm_clear_msr_intercept(v, counters[i], MSR_RW);
>>> -        svm_set_msr_intercept(v, ctrls[i], MSR_W);
>>> -        svm_clear_msr_intercept(v, ctrls[i], MSR_R);
>>> +        hvm_clear_msr_intercept(v, counters[i], MSR_RW);
>>> +        hvm_set_msr_intercept(v, ctrls[i], MSR_W);
>>> +        hvm_clear_msr_intercept(v, ctrls[i], MSR_R);
>>>       }
>>>   
>>>       msr_bitmap_on(vpmu);
>>> @@ -180,8 +180,8 @@ static void amd_vpmu_unset_msr_bitmap(struct vcpu *v)
>>>   
>>>       for ( i = 0; i < num_counters; i++ )
>>>       {
>>> -        svm_set_msr_intercept(v, counters[i], MSR_RW);
>>> -        svm_set_msr_intercept(v, ctrls[i], MSR_RW);
>>> +        hvm_set_msr_intercept(v, counters[i], MSR_RW);
>>> +        hvm_set_msr_intercept(v, ctrls[i], MSR_RW);
>>>       }
>>>   
>>>       msr_bitmap_off(vpmu);
>>
>> ... here will got to the SVM functions anyway, while ...
>>
>>> --- a/xen/arch/x86/cpu/vpmu_intel.c
>>> +++ b/xen/arch/x86/cpu/vpmu_intel.c
>>> @@ -230,22 +230,22 @@ static void core2_vpmu_set_msr_bitmap(struct vcpu *v)
>>>   
>>>       /* Allow Read/Write PMU Counters MSR Directly. */
>>>       for ( i = 0; i < fixed_pmc_cnt; i++ )
>>> -        vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
>>> +        hvm_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
>>>   
>>>       for ( i = 0; i < arch_pmc_cnt; i++ )
>>>       {
>>> -        vmx_clear_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
>>> +        hvm_clear_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
>>>   
>>>           if ( full_width_write )
>>> -            vmx_clear_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
>>> +            hvm_clear_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
>>>       }
>>>   
>>>       /* Allow Read PMU Non-global Controls Directly. */
>>>       for ( i = 0; i < arch_pmc_cnt; i++ )
>>> -        vmx_clear_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
>>> +        hvm_clear_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
>>>   
>>> -    vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
>>> -    vmx_clear_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
>>> +    hvm_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
>>> +    hvm_clear_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
>>>   }
>>>   
>>>   static void core2_vpmu_unset_msr_bitmap(struct vcpu *v)
>>> @@ -253,21 +253,21 @@ static void core2_vpmu_unset_msr_bitmap(struct vcpu *v)
>>>       unsigned int i;
>>>   
>>>       for ( i = 0; i < fixed_pmc_cnt; i++ )
>>> -        vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
>>> +        hvm_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
>>>   
>>>       for ( i = 0; i < arch_pmc_cnt; i++ )
>>>       {
>>> -        vmx_set_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
>>> +        hvm_set_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
>>>   
>>>           if ( full_width_write )
>>> -            vmx_set_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
>>> +            hvm_set_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
>>>       }
>>>   
>>>       for ( i = 0; i < arch_pmc_cnt; i++ )
>>> -        vmx_set_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
>>> +        hvm_set_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
>>>   
>>> -    vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
>>> -    vmx_set_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
>>> +    hvm_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
>>> +    hvm_set_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
>>>   }
>>>   
>>>   static inline void __core2_vpmu_save(struct vcpu *v)
>>
>> ... all calls here will go to VMX'es. For making either vpmu_<vendor>.c
>> build without that vendor's virtualization enabled, isn't it all it
>> takes to have ...
>>
>>> @@ -916,6 +932,18 @@ static inline void hvm_set_reg(struct vcpu *v, unsigned int reg, uint64_t val)
>>>       ASSERT_UNREACHABLE();
>>>   }
>>>   
>>> +static inline void hvm_set_msr_intercept(struct vcpu *v, uint32_t msr,
>>> +                                         int flags)
>>> +{
>>> +    ASSERT_UNREACHABLE();
>>> +}
>>> +
>>> +static inline void hvm_clear_msr_intercept(struct vcpu *v, uint32_t msr,
>>> +                                           int flags)
>>> +{
>>> +    ASSERT_UNREACHABLE();
>>> +}
>>
>> ... respective SVM and VMX stubs in place instead?
> 
> IMO it is more readable and they looked very good candidates for being 
> abstracted because they are doing the same thing under both technologies.
> Are you suggesting that their usage in common code should be discouraged 
> and should not be exported via the hvm_funcs interface? Or just that the 
> amount of changes cannot be justified.

The amount of changes is okay if the route taken is deemed useful going
forward. For now I view vPMU as too special to provide sufficient
justification for yet another pair of hook functions. The more that - as
indicated before - every single call site will only ever call one of the
two possible callees.

> IIUC Andrew also suggested to use hvm_funcs for msr intercept handling 
> but I 'm not sure whether he had this or sth else in mind.

Andrew, any chance you could outline your thinking / further plans here?
In particular, do you have other future use sites in mind that would
live outside of vendor-specific code?

Jan
diff mbox series

Patch

diff --git a/xen/arch/x86/cpu/vpmu_amd.c b/xen/arch/x86/cpu/vpmu_amd.c
index ed6706959e..a306297a69 100644
--- a/xen/arch/x86/cpu/vpmu_amd.c
+++ b/xen/arch/x86/cpu/vpmu_amd.c
@@ -165,9 +165,9 @@  static void amd_vpmu_set_msr_bitmap(struct vcpu *v)
 
     for ( i = 0; i < num_counters; i++ )
     {
-        svm_clear_msr_intercept(v, counters[i], MSR_RW);
-        svm_set_msr_intercept(v, ctrls[i], MSR_W);
-        svm_clear_msr_intercept(v, ctrls[i], MSR_R);
+        hvm_clear_msr_intercept(v, counters[i], MSR_RW);
+        hvm_set_msr_intercept(v, ctrls[i], MSR_W);
+        hvm_clear_msr_intercept(v, ctrls[i], MSR_R);
     }
 
     msr_bitmap_on(vpmu);
@@ -180,8 +180,8 @@  static void amd_vpmu_unset_msr_bitmap(struct vcpu *v)
 
     for ( i = 0; i < num_counters; i++ )
     {
-        svm_set_msr_intercept(v, counters[i], MSR_RW);
-        svm_set_msr_intercept(v, ctrls[i], MSR_RW);
+        hvm_set_msr_intercept(v, counters[i], MSR_RW);
+        hvm_set_msr_intercept(v, ctrls[i], MSR_RW);
     }
 
     msr_bitmap_off(vpmu);
diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
index bd91c79a36..46ae38a326 100644
--- a/xen/arch/x86/cpu/vpmu_intel.c
+++ b/xen/arch/x86/cpu/vpmu_intel.c
@@ -230,22 +230,22 @@  static void core2_vpmu_set_msr_bitmap(struct vcpu *v)
 
     /* Allow Read/Write PMU Counters MSR Directly. */
     for ( i = 0; i < fixed_pmc_cnt; i++ )
-        vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
+        hvm_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
 
     for ( i = 0; i < arch_pmc_cnt; i++ )
     {
-        vmx_clear_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
+        hvm_clear_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
 
         if ( full_width_write )
-            vmx_clear_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
+            hvm_clear_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
     }
 
     /* Allow Read PMU Non-global Controls Directly. */
     for ( i = 0; i < arch_pmc_cnt; i++ )
-        vmx_clear_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
+        hvm_clear_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
 
-    vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
-    vmx_clear_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
+    hvm_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
+    hvm_clear_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
 }
 
 static void core2_vpmu_unset_msr_bitmap(struct vcpu *v)
@@ -253,21 +253,21 @@  static void core2_vpmu_unset_msr_bitmap(struct vcpu *v)
     unsigned int i;
 
     for ( i = 0; i < fixed_pmc_cnt; i++ )
-        vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
+        hvm_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
 
     for ( i = 0; i < arch_pmc_cnt; i++ )
     {
-        vmx_set_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
+        hvm_set_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
 
         if ( full_width_write )
-            vmx_set_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
+            hvm_set_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
     }
 
     for ( i = 0; i < arch_pmc_cnt; i++ )
-        vmx_set_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
+        hvm_set_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
 
-    vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
-    vmx_set_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
+    hvm_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
+    hvm_set_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
 }
 
 static inline void __core2_vpmu_save(struct vcpu *v)
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index eb144272f4..e54dc08e8a 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -288,7 +288,7 @@  svm_msrbit(unsigned long *msr_bitmap, uint32_t msr)
     return msr_bit;
 }
 
-void svm_set_msr_intercept(struct vcpu *v, uint32_t msr, int flags)
+void cf_check svm_set_msr_intercept(struct vcpu *v, uint32_t msr, int flags)
 {
     unsigned long *msr_bit = svm_msrbit(v->arch.hvm.svm.msrpm, msr);
 
@@ -303,7 +303,7 @@  void svm_set_msr_intercept(struct vcpu *v, uint32_t msr, int flags)
         __set_bit(msr * 2 + 1, msr_bit);
 }
 
-void svm_clear_msr_intercept(struct vcpu *v, uint32_t msr, int flags)
+void cf_check svm_clear_msr_intercept(struct vcpu *v, uint32_t msr, int flags)
 {
     unsigned long *msr_bit = svm_msrbit(v->arch.hvm.svm.msrpm, msr);
 
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 22c12509d5..3d0022f392 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -902,7 +902,7 @@  static void vmx_set_host_env(struct vcpu *v)
               (unsigned long)&get_cpu_info()->guest_cpu_user_regs.error_code);
 }
 
-void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr, int type)
+void cf_check vmx_clear_msr_intercept(struct vcpu *v, uint32_t msr, int type)
 {
     struct vmx_msr_bitmap *msr_bitmap = v->arch.hvm.vmx.msr_bitmap;
     struct domain *d = v->domain;
@@ -933,7 +933,7 @@  void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr, int type)
         ASSERT(!"MSR out of range for interception\n");
 }
 
-void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr, int type)
+void cf_check vmx_set_msr_intercept(struct vcpu *v, uint32_t msr, int type)
 {
     struct vmx_msr_bitmap *msr_bitmap = v->arch.hvm.vmx.msr_bitmap;
 
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 87c47c002c..d3f2b3add4 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2765,6 +2765,8 @@  static struct hvm_function_table __initdata_cf_clobber vmx_function_table = {
     .nhvm_domain_relinquish_resources = nvmx_domain_relinquish_resources,
     .update_vlapic_mode = vmx_vlapic_msr_changed,
     .nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
+    .set_msr_intercept    = vmx_set_msr_intercept,
+    .clear_msr_intercept  = vmx_clear_msr_intercept,
     .enable_msr_interception = vmx_enable_msr_interception,
     .altp2m_vcpu_update_p2m = vmx_vcpu_update_eptp,
     .altp2m_vcpu_update_vmfunc_ve = vmx_vcpu_update_vmfunc_ve,
diff --git a/xen/arch/x86/include/asm/hvm/hvm.h b/xen/arch/x86/include/asm/hvm/hvm.h
index f853e2f3e8..dd9aa42d0a 100644
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -224,6 +224,8 @@  struct hvm_function_table {
                                 paddr_t *L1_gpa, unsigned int *page_order,
                                 uint8_t *p2m_acc, struct npfec npfec);
 
+    void (*set_msr_intercept)(struct vcpu *v, uint32_t msr, int flags);
+    void (*clear_msr_intercept)(struct vcpu *v, uint32_t msr, int flags);
     void (*enable_msr_interception)(struct domain *d, uint32_t msr);
 
     /* Alternate p2m */
@@ -658,6 +660,20 @@  static inline int nhvm_hap_walk_L1_p2m(
         v, L2_gpa, L1_gpa, page_order, p2m_acc, npfec);
 }
 
+static inline void hvm_set_msr_intercept(struct vcpu *v, uint32_t msr,
+                                         int flags)
+{
+    if ( hvm_funcs.set_msr_intercept )
+        alternative_vcall(hvm_funcs.set_msr_intercept, v, msr, flags);
+}
+
+static inline void hvm_clear_msr_intercept(struct vcpu *v, uint32_t msr,
+                                           int flags)
+{
+    if ( hvm_funcs.clear_msr_intercept )
+        alternative_vcall(hvm_funcs.clear_msr_intercept, v, msr, flags);
+}
+
 static inline void hvm_enable_msr_interception(struct domain *d, uint32_t msr)
 {
     alternative_vcall(hvm_funcs.enable_msr_interception, d, msr);
@@ -916,6 +932,18 @@  static inline void hvm_set_reg(struct vcpu *v, unsigned int reg, uint64_t val)
     ASSERT_UNREACHABLE();
 }
 
+static inline void hvm_set_msr_intercept(struct vcpu *v, uint32_t msr,
+                                         int flags)
+{
+    ASSERT_UNREACHABLE();
+}
+
+static inline void hvm_clear_msr_intercept(struct vcpu *v, uint32_t msr,
+                                           int flags)
+{
+    ASSERT_UNREACHABLE();
+}
+
 #define is_viridian_domain(d) ((void)(d), false)
 #define is_viridian_vcpu(v) ((void)(v), false)
 #define has_viridian_time_ref_count(d) ((void)(d), false)
diff --git a/xen/arch/x86/include/asm/hvm/svm/vmcb.h b/xen/arch/x86/include/asm/hvm/svm/vmcb.h
index ed2e55e5cf..dbe8ba89cc 100644
--- a/xen/arch/x86/include/asm/hvm/svm/vmcb.h
+++ b/xen/arch/x86/include/asm/hvm/svm/vmcb.h
@@ -585,8 +585,8 @@  void svm_destroy_vmcb(struct vcpu *v);
 
 void setup_vmcb_dump(void);
 
-void svm_set_msr_intercept(struct vcpu *v, uint32_t msr, int flags);
-void svm_clear_msr_intercept(struct vcpu *v, uint32_t msr, int flags);
+void cf_check svm_set_msr_intercept(struct vcpu *v, uint32_t msr, int flags);
+void cf_check svm_clear_msr_intercept(struct vcpu *v, uint32_t msr, int flags);
 #define svm_disable_intercept_for_msr(v, msr) \
     svm_clear_msr_intercept((v), (msr), MSR_RW)
 #define svm_enable_intercept_for_msr(v, msr) \
diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
index e08c506be5..f2880c8122 100644
--- a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
+++ b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
@@ -644,8 +644,8 @@  static inline int vmx_write_guest_msr(struct vcpu *v, uint32_t msr,
     return 0;
 }
 
-void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr, int type);
-void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr, int type);
+void cf_check vmx_clear_msr_intercept(struct vcpu *v, uint32_t msr, int type);
+void cf_check vmx_set_msr_intercept(struct vcpu *v, uint32_t msr, int type);
 void vmx_vmcs_switch(paddr_t from, paddr_t to);
 void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector);
 void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector);