diff mbox series

[4/5] KVM: x86: Shrink struct kvm_vcpu_arch

Message ID 20230213163351.30704-5-minipli@grsecurity.net (mailing list archive)
State New, archived
Headers show
Series [1/5] KVM: x86: Shrink struct kvm_pmu | expand

Commit Message

Mathias Krause Feb. 13, 2023, 4:33 p.m. UTC
Reshuffle the members of struct kvm_vcpu_arch to make use of otherwise
unused padding holes, allowing denser packing without disrupting their
grouping.

This allows us to shrink the object size by 48 bytes for 64 bit builds.

Signed-off-by: Mathias Krause <minipli@grsecurity.net>
---
Instead of attempting to create an optimal shuffle by sorting members by
their alignment constraints, I intended to keep the members grouped by
their meaning to keep the maintainability of the code.

 arch/x86/include/asm/kvm_host.h | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 040eee3e9583..5036456b05b0 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -824,18 +824,18 @@  struct kvm_vcpu_arch {
 
 	int halt_request; /* real mode on Intel only */
 
+	u32 kvm_cpuid_base;
 	int cpuid_nent;
 	struct kvm_cpuid_entry2 *cpuid_entries;
-	u32 kvm_cpuid_base;
 
 	u64 reserved_gpa_bits;
 	int maxphyaddr;
 
 	/* emulate context */
 
-	struct x86_emulate_ctxt *emulate_ctxt;
 	bool emulate_regs_need_sync_to_vcpu;
 	bool emulate_regs_need_sync_from_vcpu;
+	struct x86_emulate_ctxt *emulate_ctxt;
 	int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
 
 	gpa_t time;
@@ -916,17 +916,17 @@  struct kvm_vcpu_arch {
 	unsigned long last_retry_addr;
 
 	struct {
-		bool halted;
 		gfn_t gfns[ASYNC_PF_PER_VCPU];
 		struct gfn_to_hva_cache data;
 		u64 msr_en_val; /* MSR_KVM_ASYNC_PF_EN */
 		u64 msr_int_val; /* MSR_KVM_ASYNC_PF_INT */
-		u16 vec;
 		u32 id;
+		u16 vec;
 		bool send_user_only;
 		u32 host_apf_flags;
 		bool delivery_as_pf_vmexit;
 		bool pageready_pending;
+		bool halted;
 	} apf;
 
 	/* OSVW MSRs (AMD only) */
@@ -942,6 +942,9 @@  struct kvm_vcpu_arch {
 
 	u64 msr_kvm_poll_control;
 
+	/* set at EPT violation at this point */
+	unsigned long exit_qualification;
+
 	/*
 	 * Indicates the guest is trying to write a gfn that contains one or
 	 * more of the PTEs used to translate the write itself, i.e. the access
@@ -959,9 +962,6 @@  struct kvm_vcpu_arch {
 	 */
 	bool write_fault_to_shadow_pgtable;
 
-	/* set at EPT violation at this point */
-	unsigned long exit_qualification;
-
 	/* pv related host specific info */
 	struct {
 		bool pv_unhalted;
@@ -979,9 +979,6 @@  struct kvm_vcpu_arch {
 	/* Host CPU on which VM-entry was most recently attempted */
 	int last_vmentry_cpu;
 
-	/* AMD MSRC001_0015 Hardware Configuration */
-	u64 msr_hwcr;
-
 	/* pv related cpuid info */
 	struct {
 		/*
@@ -1006,6 +1003,9 @@  struct kvm_vcpu_arch {
 	 */
 	bool pdptrs_from_userspace;
 
+	/* AMD MSRC001_0015 Hardware Configuration */
+	u64 msr_hwcr;
+
 #if IS_ENABLED(CONFIG_HYPERV)
 	hpa_t hv_root_tdp;
 #endif