@@ -330,8 +330,8 @@ struct kvm_vcpu {
int srcu_depth;
#endif
int mode;
+ unsigned int guest_debug;
u64 requests;
- unsigned long guest_debug;
struct mutex mutex;
struct kvm_run *run;
@@ -340,8 +340,8 @@ struct kvm_vcpu {
struct rcuwait wait;
#endif
struct pid __rcu *pid;
- int sigset_active;
sigset_t sigset;
+ int sigset_active;
unsigned int halt_poll_ns;
bool valid_wakeup;
@@ -356,10 +356,10 @@ struct kvm_vcpu {
#ifdef CONFIG_KVM_ASYNC_PF
struct {
- u32 queued;
struct list_head queue;
struct list_head done;
spinlock_t lock;
+ u32 queued;
} async_pf;
#endif
Reshuffle the members of struct kvm_vcpu to make use of otherwise unused padding holes, allowing denser packing without disrupting the grouping nor introducing wrong cacheline sharing. The embedded mutex and spinlocks continue to not share cachelines, so no regressions because of lock contention leading to cacheline trashing is expected. This allows us to save 40 bytes for 64 bit builds. Signed-off-by: Mathias Krause <minipli@grsecurity.net> --- include/linux/kvm_host.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)