diff mbox series

[for-4.19,v3,4/4] x86/shadow: Don't leave trace record field uninitialized

Message ID 20240621180658.92831-5-andrew.cooper3@citrix.com (mailing list archive)
State New, archived
Headers show
Series x86/shadow: Trace fixes and cleanup | expand

Commit Message

Andrew Cooper June 21, 2024, 6:06 p.m. UTC
From: Jan Beulich <jbeulich@suse.com>

The emulation_count field is set only conditionally right now. Convert
all field setting to an initializer, thus guaranteeing that field to be
set to 0 (default initialized) when GUEST_PAGING_LEVELS != 3.

Rework trace_shadow_emulate() to be consistent with the other trace helpers.

Coverity-ID: 1598430
Fixes: 9a86ac1aa3d2 ("xentrace 5/7: Additional tracing for the shadow code")
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Roger Pau Monné <roger.pau@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
Release-acked-by: Oleksii Kurochko <oleksii.kurochko@gmail.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Roger Pau Monné <roger.pau@citrix.com>
CC: George Dunlap <george.dunlap@citrix.com>
CC: Oleksii Kurochko <oleksii.kurochko@gmail.com>

v2:
 * Rebase over packing/sh_trace() cleanup.
---
 xen/arch/x86/mm/shadow/multi.c | 29 ++++++++++++++---------------
 1 file changed, 14 insertions(+), 15 deletions(-)
diff mbox series

Patch

diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 7f95d50be397..71a2673682f4 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -2063,30 +2063,29 @@  static void cf_check trace_emulate_write_val(
 #endif
 }
 
-static inline void trace_shadow_emulate(guest_l1e_t gl1e, unsigned long va)
+static inline void sh_trace_emulate(guest_l1e_t gl1e, unsigned long va)
 {
     if ( tb_init_done )
     {
         struct __packed {
-            /* for PAE, guest_l1e may be 64 while guest_va may be 32;
-               so put it first for alignment sake. */
+            /*
+             * For GUEST_PAGING_LEVELS=3 (PAE paging), guest_l1e is 64 while
+             * guest_va is 32.  Put it first to avoid padding.
+             */
             guest_l1e_t gl1e, write_val;
             guest_va_t va;
             uint32_t flags:29, emulation_count:3;
-        } d;
-        u32 event;
-
-        event = TRC_SHADOW_EMULATE | ((GUEST_PAGING_LEVELS-2)<<8);
-
-        d.gl1e = gl1e;
-        d.write_val.l1 = this_cpu(trace_emulate_write_val);
-        d.va = va;
+        } d = {
+            .gl1e = gl1e,
+            .write_val.l1 = this_cpu(trace_emulate_write_val),
+            .va = va,
 #if GUEST_PAGING_LEVELS == 3
-        d.emulation_count = this_cpu(trace_extra_emulation_count);
+            .emulation_count = this_cpu(trace_extra_emulation_count),
 #endif
-        d.flags = this_cpu(trace_shadow_path_flags);
+            .flags = this_cpu(trace_shadow_path_flags),
+        };
 
-        trace(event, sizeof(d), &d);
+        sh_trace(TRC_SHADOW_EMULATE, sizeof(d), &d);
     }
 }
 #endif /* CONFIG_HVM */
@@ -2815,7 +2814,7 @@  static int cf_check sh_page_fault(
     }
 #endif /* PAE guest */
 
-    trace_shadow_emulate(gw.l1e, va);
+    sh_trace_emulate(gw.l1e, va);
  emulate_done:
     SHADOW_PRINTK("emulated\n");
     return EXCRET_fault_fixed;