@@ -824,18 +824,18 @@ struct kvm_vcpu_arch {
int halt_request; /* real mode on Intel only */
+ u32 kvm_cpuid_base;
int cpuid_nent;
struct kvm_cpuid_entry2 *cpuid_entries;
- u32 kvm_cpuid_base;
u64 reserved_gpa_bits;
int maxphyaddr;
/* emulate context */
- struct x86_emulate_ctxt *emulate_ctxt;
bool emulate_regs_need_sync_to_vcpu;
bool emulate_regs_need_sync_from_vcpu;
+ struct x86_emulate_ctxt *emulate_ctxt;
int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
gpa_t time;
@@ -916,17 +916,17 @@ struct kvm_vcpu_arch {
unsigned long last_retry_addr;
struct {
- bool halted;
gfn_t gfns[ASYNC_PF_PER_VCPU];
struct gfn_to_hva_cache data;
u64 msr_en_val; /* MSR_KVM_ASYNC_PF_EN */
u64 msr_int_val; /* MSR_KVM_ASYNC_PF_INT */
- u16 vec;
u32 id;
+ u16 vec;
bool send_user_only;
u32 host_apf_flags;
bool delivery_as_pf_vmexit;
bool pageready_pending;
+ bool halted;
} apf;
/* OSVW MSRs (AMD only) */
@@ -942,6 +942,9 @@ struct kvm_vcpu_arch {
u64 msr_kvm_poll_control;
+ /* set at EPT violation at this point */
+ unsigned long exit_qualification;
+
/*
* Indicates the guest is trying to write a gfn that contains one or
* more of the PTEs used to translate the write itself, i.e. the access
@@ -959,9 +962,6 @@ struct kvm_vcpu_arch {
*/
bool write_fault_to_shadow_pgtable;
- /* set at EPT violation at this point */
- unsigned long exit_qualification;
-
/* pv related host specific info */
struct {
bool pv_unhalted;
@@ -979,9 +979,6 @@ struct kvm_vcpu_arch {
/* Host CPU on which VM-entry was most recently attempted */
int last_vmentry_cpu;
- /* AMD MSRC001_0015 Hardware Configuration */
- u64 msr_hwcr;
-
/* pv related cpuid info */
struct {
/*
@@ -1006,6 +1003,9 @@ struct kvm_vcpu_arch {
*/
bool pdptrs_from_userspace;
+ /* AMD MSRC001_0015 Hardware Configuration */
+ u64 msr_hwcr;
+
#if IS_ENABLED(CONFIG_HYPERV)
hpa_t hv_root_tdp;
#endif
Reshuffle the members of struct kvm_vcpu_arch to make use of otherwise unused padding holes, allowing denser packing without disrupting their grouping. This allows us to shrink the object size by 48 bytes for 64 bit builds. Signed-off-by: Mathias Krause <minipli@grsecurity.net> --- Instead of attempting to create an optimal shuffle by sorting members by their alignment constraints, I intended to keep the members grouped by their meaning to keep the maintainability of the code. arch/x86/include/asm/kvm_host.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-)