diff mbox series

[v2,3/7] x86: shrink struct arch_{vcpu,domain} when !HVM

Message ID 014a655b-7080-3804-3a56-5e00851a2a7d@suse.com (mailing list archive)
State Superseded
Headers show
Series x86: build adjustments | expand

Commit Message

Jan Beulich Aug. 7, 2020, 11:33 a.m. UTC
While this won't affect overall memory overhead (struct vcpu as well as
struct domain get allocated as single pages) nor code size (the offsets
into the base structures are too large to be representable as signed 8-
bit displacements), it'll allow the tail of struct pv_{domain,vcpu} to
share a cache line with subsequent struct arch_{domain,vcpu} fields.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
RFC: There is a risk associated with this: If we still have code
     somewhere accessing the HVM parts of the structures without a prior
     type check of the guest, this going to end up worse than the so far
     not uncommon case of the access simply going to space unused by PV.
     We may therefore want to consider whether to further restrict when
     this conversion to union gets done.
     And of course there's also the risk of future compilers complaining
     about this abuse of unions. But this is limited to code that's dead
     in !HVM configs, so only an apparent problem.

Comments

Andrew Cooper Aug. 7, 2020, 5:14 p.m. UTC | #1
On 07/08/2020 12:33, Jan Beulich wrote:
> While this won't affect overall memory overhead (struct vcpu as well as
> struct domain get allocated as single pages) nor code size (the offsets
> into the base structures are too large to be representable as signed 8-
> bit displacements), it'll allow the tail of struct pv_{domain,vcpu} to
> share a cache line with subsequent struct arch_{domain,vcpu} fields.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> RFC: There is a risk associated with this: If we still have code
>      somewhere accessing the HVM parts of the structures without a prior
>      type check of the guest, this going to end up worse than the so far
>      not uncommon case of the access simply going to space unused by PV.
>      We may therefore want to consider whether to further restrict when
>      this conversion to union gets done.
>      And of course there's also the risk of future compilers complaining
>      about this abuse of unions. But this is limited to code that's dead
>      in !HVM configs, so only an apparent problem.
>
> --- a/xen/include/asm-x86/hvm/domain.h
> +++ b/xen/include/asm-x86/hvm/domain.h
> @@ -99,7 +99,13 @@ struct hvm_pi_ops {
>  
>  #define MAX_NR_IOREQ_SERVERS 8
>  
> -struct hvm_domain {
> +typedef
> +#ifdef CONFIG_HVM
> +struct
> +#else
> +union
> +#endif
> +hvm_domain {
>      /* Guest page range used for non-default ioreq servers */
>      struct {
>          unsigned long base;
> @@ -203,7 +209,7 @@ struct hvm_domain {
>  #ifdef CONFIG_MEM_SHARING
>      struct mem_sharing_domain mem_sharing;
>  #endif
> -};
> +} hvm_domain_t;

Honestly, I'd say no to this patch even it resulted in a 100x speedup,
because I am totally lost for words about this construct, and the effect
it comprehensibility of our code.

Seeing as improved cache locality appears to be the sole benefit,
marginal as it is, you can achieve that in a better way by rearranging
struct domain/vcpu to put the pv/hvm union at the end.

~Andrew
Jan Beulich Aug. 19, 2020, 8:35 a.m. UTC | #2
On 07.08.2020 19:14, Andrew Cooper wrote:
> On 07/08/2020 12:33, Jan Beulich wrote:
>> While this won't affect overall memory overhead (struct vcpu as well as
>> struct domain get allocated as single pages) nor code size (the offsets
>> into the base structures are too large to be representable as signed 8-
>> bit displacements), it'll allow the tail of struct pv_{domain,vcpu} to
>> share a cache line with subsequent struct arch_{domain,vcpu} fields.
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> ---
>> RFC: There is a risk associated with this: If we still have code
>>      somewhere accessing the HVM parts of the structures without a prior
>>      type check of the guest, this going to end up worse than the so far
>>      not uncommon case of the access simply going to space unused by PV.
>>      We may therefore want to consider whether to further restrict when
>>      this conversion to union gets done.
>>      And of course there's also the risk of future compilers complaining
>>      about this abuse of unions. But this is limited to code that's dead
>>      in !HVM configs, so only an apparent problem.
>>
>> --- a/xen/include/asm-x86/hvm/domain.h
>> +++ b/xen/include/asm-x86/hvm/domain.h
>> @@ -99,7 +99,13 @@ struct hvm_pi_ops {
>>  
>>  #define MAX_NR_IOREQ_SERVERS 8
>>  
>> -struct hvm_domain {
>> +typedef
>> +#ifdef CONFIG_HVM
>> +struct
>> +#else
>> +union
>> +#endif
>> +hvm_domain {
>>      /* Guest page range used for non-default ioreq servers */
>>      struct {
>>          unsigned long base;
>> @@ -203,7 +209,7 @@ struct hvm_domain {
>>  #ifdef CONFIG_MEM_SHARING
>>      struct mem_sharing_domain mem_sharing;
>>  #endif
>> -};
>> +} hvm_domain_t;
> 
> Honestly, I'd say no to this patch even it resulted in a 100x speedup,
> because I am totally lost for words about this construct, and the effect
> it comprehensibility of our code.

Okay - away it goes.

Jan
diff mbox series

Patch

--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -709,7 +709,7 @@  long arch_do_domctl(
         unsigned int fmp = domctl->u.ioport_mapping.first_mport;
         unsigned int np = domctl->u.ioport_mapping.nr_ports;
         unsigned int add = domctl->u.ioport_mapping.add_mapping;
-        struct hvm_domain *hvm;
+        hvm_domain_t *hvm;
         struct g2m_ioport *g2m_ioport;
         int found = 0;
 
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -319,7 +319,7 @@  struct arch_domain
 
     union {
         struct pv_domain pv;
-        struct hvm_domain hvm;
+        hvm_domain_t hvm;
     };
 
     struct paging_domain paging;
@@ -582,7 +582,7 @@  struct arch_vcpu
     /* Virtual Machine Extensions */
     union {
         struct pv_vcpu pv;
-        struct hvm_vcpu hvm;
+        hvm_vcpu_t hvm;
     };
 
     /*
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -99,7 +99,13 @@  struct hvm_pi_ops {
 
 #define MAX_NR_IOREQ_SERVERS 8
 
-struct hvm_domain {
+typedef
+#ifdef CONFIG_HVM
+struct
+#else
+union
+#endif
+hvm_domain {
     /* Guest page range used for non-default ioreq servers */
     struct {
         unsigned long base;
@@ -203,7 +209,7 @@  struct hvm_domain {
 #ifdef CONFIG_MEM_SHARING
     struct mem_sharing_domain mem_sharing;
 #endif
-};
+} hvm_domain_t;
 
 #endif /* __ASM_X86_HVM_DOMAIN_H__ */
 
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -149,7 +149,13 @@  struct altp2mvcpu {
 
 #define vcpu_altp2m(v) ((v)->arch.hvm.avcpu)
 
-struct hvm_vcpu {
+typedef
+#ifdef CONFIG_HVM
+struct
+#else
+union
+#endif
+hvm_vcpu {
     /* Guest control-register and EFER values, just as the guest sees them. */
     unsigned long       guest_cr[5];
     unsigned long       guest_efer;
@@ -213,7 +219,7 @@  struct hvm_vcpu {
     struct x86_event     inject_event;
 
     struct viridian_vcpu *viridian;
-};
+} hvm_vcpu_t;
 
 #endif /* __ASM_X86_HVM_VCPU_H__ */