diff mbox series

[XEN,v3,10/16] x86/domain: guard svm specific functions with usinc_svm macro

Message ID e03693d1daa386a31e09794b0167d282df5a8bfe.1717410850.git.Sergiy_Kibrik@epam.com (mailing list archive)
State New, archived
Headers show
Series x86: make cpu virtualization support configurable | expand

Commit Message

Sergiy Kibrik June 3, 2024, 11:26 a.m. UTC
From: Xenia Ragiadakou <burzalodowa@gmail.com>

Replace cpu_has_svm check with using_svm, so that not only SVM support in CPU
gets checked, but also presence of functions svm_load_segs() and
svm_load_segs_prefetch() in the build checked as well.

Since SVM depends on HVM, it can be used alone.

Signed-off-by: Xenia Ragiadakou <burzalodowa@gmail.com>
Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@epam.com>
---
changes in v3:
 - using_svm instead of IS_ENABLED(CONFIG_SVM)
 - updated description
---
 xen/arch/x86/domain.c | 8 +++-----
 1 file changed, 3 insertions(+), 5 deletions(-)

Comments

Jan Beulich June 10, 2024, 3:10 p.m. UTC | #1
On 03.06.2024 13:26, Sergiy Kibrik wrote:
> From: Xenia Ragiadakou <burzalodowa@gmail.com>
> 
> Replace cpu_has_svm check with using_svm, so that not only SVM support in CPU
> gets checked, but also presence of functions svm_load_segs() and
> svm_load_segs_prefetch() in the build checked as well.
> 
> Since SVM depends on HVM, it can be used alone.
> 
> Signed-off-by: Xenia Ragiadakou <burzalodowa@gmail.com>
> Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@epam.com>

The code you're touching is solely for PV, even if it's interacting with HVM
code. Therefore "x86/PV:" may be the better subject prefix.

> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -1731,11 +1731,9 @@ static void load_segments(struct vcpu *n)
>          if ( !(n->arch.flags & TF_kernel_mode) )
>              SWAP(gsb, gss);
>  
> -#ifdef CONFIG_HVM
> -        if ( cpu_has_svm && (uregs->fs | uregs->gs) <= 3 )
> +        if ( using_svm && (uregs->fs | uregs->gs) <= 3 )
>              fs_gs_done = svm_load_segs(n->arch.pv.ldt_ents, LDT_VIRT_START(n),
>                                         n->arch.pv.fs_base, gsb, gss);
> -#endif
>      }
>  
>      if ( !fs_gs_done )
> @@ -2048,9 +2046,9 @@ static void __context_switch(void)
>  
>      write_ptbase(n);
>  
> -#if defined(CONFIG_PV) && defined(CONFIG_HVM)
> +#if defined(CONFIG_PV)

In such a case, would you mind switching (back) to the shorter "#ifdef" form?
Then
Acked-by: Jan Beulich <jbeulich@suse.com>

Jan

>      /* Prefetch the VMCB if we expect to use it later in the context switch */
> -    if ( cpu_has_svm && is_pv_64bit_domain(nd) && !is_idle_domain(nd) )
> +    if ( using_svm && is_pv_64bit_domain(nd) && !is_idle_domain(nd) )
>          svm_load_segs_prefetch();
>  #endif
>
diff mbox series

Patch

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 536542841e..a2f19f8b46 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1731,11 +1731,9 @@  static void load_segments(struct vcpu *n)
         if ( !(n->arch.flags & TF_kernel_mode) )
             SWAP(gsb, gss);
 
-#ifdef CONFIG_HVM
-        if ( cpu_has_svm && (uregs->fs | uregs->gs) <= 3 )
+        if ( using_svm && (uregs->fs | uregs->gs) <= 3 )
             fs_gs_done = svm_load_segs(n->arch.pv.ldt_ents, LDT_VIRT_START(n),
                                        n->arch.pv.fs_base, gsb, gss);
-#endif
     }
 
     if ( !fs_gs_done )
@@ -2048,9 +2046,9 @@  static void __context_switch(void)
 
     write_ptbase(n);
 
-#if defined(CONFIG_PV) && defined(CONFIG_HVM)
+#if defined(CONFIG_PV)
     /* Prefetch the VMCB if we expect to use it later in the context switch */
-    if ( cpu_has_svm && is_pv_64bit_domain(nd) && !is_idle_domain(nd) )
+    if ( using_svm && is_pv_64bit_domain(nd) && !is_idle_domain(nd) )
         svm_load_segs_prefetch();
 #endif