diff mbox series

[RFC,v3,4/7] KVM: x86: Let userspace re-enable previously disabled exits

Message ID 20220615011622.136646-5-kechenl@nvidia.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86: add per-vCPU exits disable capability | expand

Commit Message

Kechen Lu June 15, 2022, 1:16 a.m. UTC
From: Sean Christopherson <seanjc@google.com>

Add an OVERRIDE flag to KVM_CAP_X86_DISABLE_EXITS allow userspace to
re-enable exits and/or override previous settings.  There's no real use
case for the the per-VM ioctl, but a future per-vCPU variant wants to let
userspace toggle interception while the vCPU is running; add the OVERRIDE
functionality now to provide consistent between between the per-VM and
per-vCPU variants.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 Documentation/virt/kvm/api.rst |  5 +++++
 arch/x86/kvm/x86.c             | 39 +++++++++++++++++++++++-----------
 include/uapi/linux/kvm.h       |  4 +++-
 3 files changed, 35 insertions(+), 13 deletions(-)

Comments

Chao Gao June 15, 2022, 2:51 a.m. UTC | #1
On Tue, Jun 14, 2022 at 06:16:19PM -0700, Kechen Lu wrote:
>From: Sean Christopherson <seanjc@google.com>
>
>Add an OVERRIDE flag to KVM_CAP_X86_DISABLE_EXITS allow userspace to
>re-enable exits and/or override previous settings.  There's no real use
>case for the the per-VM ioctl, but a future per-vCPU variant wants to let
>userspace toggle interception while the vCPU is running; add the OVERRIDE
>functionality now to provide consistent between between the per-VM and
>per-vCPU variants.
>
>Signed-off-by: Sean Christopherson <seanjc@google.com>
>---
> Documentation/virt/kvm/api.rst |  5 +++++
> arch/x86/kvm/x86.c             | 39 +++++++++++++++++++++++-----------
> include/uapi/linux/kvm.h       |  4 +++-
> 3 files changed, 35 insertions(+), 13 deletions(-)
>
>diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
>index d0d8749591a8..89e13b6783b5 100644
>--- a/Documentation/virt/kvm/api.rst
>+++ b/Documentation/virt/kvm/api.rst
>@@ -6941,6 +6941,7 @@ Valid bits in args[0] are::
>   #define KVM_X86_DISABLE_EXITS_HLT              (1 << 1)
>   #define KVM_X86_DISABLE_EXITS_PAUSE            (1 << 2)
>   #define KVM_X86_DISABLE_EXITS_CSTATE           (1 << 3)
>+  #define KVM_X86_DISABLE_EXITS_OVERRIDE         (1ull << 63)
> 
> Enabling this capability on a VM provides userspace with a way to no
> longer intercept some instructions for improved latency in some
>@@ -6949,6 +6950,10 @@ physical CPUs.  More bits can be added in the future; userspace can
> just pass the KVM_CHECK_EXTENSION result to KVM_ENABLE_CAP to disable
> all such vmexits.
> 
>+By default, this capability only disables exits.  To re-enable an exit, or to
>+override previous settings, userspace can set KVM_X86_DISABLE_EXITS_OVERRIDE,
>+in which case KVM will enable/disable according to the mask (a '1' == disable).
>+
> Do not enable KVM_FEATURE_PV_UNHALT if you disable HLT exits.
> 
> 7.14 KVM_CAP_S390_HPAGE_1M
>diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>index f31ebbb1b94f..7cc8ac550bc7 100644
>--- a/arch/x86/kvm/x86.c
>+++ b/arch/x86/kvm/x86.c
>@@ -4201,11 +4201,10 @@ static inline bool kvm_can_mwait_in_guest(void)
> 
> static u64 kvm_get_allowed_disable_exits(void)
> {
>-	u64 r = KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE |
>-		KVM_X86_DISABLE_EXITS_CSTATE;
>+	u64 r = KVM_X86_DISABLE_VALID_EXITS;
> 
>-	if(kvm_can_mwait_in_guest())
>-		r |= KVM_X86_DISABLE_EXITS_MWAIT;
>+	if (!kvm_can_mwait_in_guest())
>+		r &= ~KVM_X86_DISABLE_EXITS_MWAIT;

This hunk looks like a fix to patch 3; it can be squashed into that patch.
Sean Christopherson July 20, 2022, 5:18 p.m. UTC | #2
On Wed, Jun 15, 2022, Chao Gao wrote:
> On Tue, Jun 14, 2022 at 06:16:19PM -0700, Kechen Lu wrote:
> > 7.14 KVM_CAP_S390_HPAGE_1M
> >diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> >index f31ebbb1b94f..7cc8ac550bc7 100644
> >--- a/arch/x86/kvm/x86.c
> >+++ b/arch/x86/kvm/x86.c
> >@@ -4201,11 +4201,10 @@ static inline bool kvm_can_mwait_in_guest(void)
> > 
> > static u64 kvm_get_allowed_disable_exits(void)
> > {
> >-	u64 r = KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE |
> >-		KVM_X86_DISABLE_EXITS_CSTATE;
> >+	u64 r = KVM_X86_DISABLE_VALID_EXITS;
> > 
> >-	if(kvm_can_mwait_in_guest())
> >-		r |= KVM_X86_DISABLE_EXITS_MWAIT;
> >+	if (!kvm_can_mwait_in_guest())
> >+		r &= ~KVM_X86_DISABLE_EXITS_MWAIT;
> 
> This hunk looks like a fix to patch 3; it can be squashed into that patch.

It's not a fix, just an inversion of the logic to make it easier to maintain
going forward.  I intentionally made the change in patch 4 so that adding the
kvm_get_allowed_disable_exits() is a more "pure" movement of code from the "check"
path to a common helper.

I agree it's kinda odd, but I still think splitting the changes is desirable.
diff mbox series

Patch

diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index d0d8749591a8..89e13b6783b5 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -6941,6 +6941,7 @@  Valid bits in args[0] are::
   #define KVM_X86_DISABLE_EXITS_HLT              (1 << 1)
   #define KVM_X86_DISABLE_EXITS_PAUSE            (1 << 2)
   #define KVM_X86_DISABLE_EXITS_CSTATE           (1 << 3)
+  #define KVM_X86_DISABLE_EXITS_OVERRIDE         (1ull << 63)
 
 Enabling this capability on a VM provides userspace with a way to no
 longer intercept some instructions for improved latency in some
@@ -6949,6 +6950,10 @@  physical CPUs.  More bits can be added in the future; userspace can
 just pass the KVM_CHECK_EXTENSION result to KVM_ENABLE_CAP to disable
 all such vmexits.
 
+By default, this capability only disables exits.  To re-enable an exit, or to
+override previous settings, userspace can set KVM_X86_DISABLE_EXITS_OVERRIDE,
+in which case KVM will enable/disable according to the mask (a '1' == disable).
+
 Do not enable KVM_FEATURE_PV_UNHALT if you disable HLT exits.
 
 7.14 KVM_CAP_S390_HPAGE_1M
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f31ebbb1b94f..7cc8ac550bc7 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4201,11 +4201,10 @@  static inline bool kvm_can_mwait_in_guest(void)
 
 static u64 kvm_get_allowed_disable_exits(void)
 {
-	u64 r = KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE |
-		KVM_X86_DISABLE_EXITS_CSTATE;
+	u64 r = KVM_X86_DISABLE_VALID_EXITS;
 
-	if(kvm_can_mwait_in_guest())
-		r |= KVM_X86_DISABLE_EXITS_MWAIT;
+	if (!kvm_can_mwait_in_guest())
+		r &= ~KVM_X86_DISABLE_EXITS_MWAIT;
 
 	return r;
 }
@@ -5264,6 +5263,28 @@  static int kvm_vcpu_ioctl_device_attr(struct kvm_vcpu *vcpu,
 	return r;
 }
 
+
+#define kvm_ioctl_disable_exits(a, mask)				     \
+({									     \
+	if (!kvm_can_mwait_in_guest())                                       \
+		(mask) &= KVM_X86_DISABLE_EXITS_MWAIT;                       \
+	if ((mask) & KVM_X86_DISABLE_EXITS_OVERRIDE) {			     \
+		(a).mwait_in_guest = (mask) & KVM_X86_DISABLE_EXITS_MWAIT;   \
+		(a).hlt_in_guest = (mask) & KVM_X86_DISABLE_EXITS_HLT;	     \
+		(a).pause_in_guest = (mask) & KVM_X86_DISABLE_EXITS_PAUSE;   \
+		(a).cstate_in_guest = (mask) & KVM_X86_DISABLE_EXITS_CSTATE; \
+	} else {							     \
+		if ((mask) & KVM_X86_DISABLE_EXITS_MWAIT)		     \
+			(a).mwait_in_guest = true;			     \
+		if ((mask) & KVM_X86_DISABLE_EXITS_HLT)			     \
+			(a).hlt_in_guest = true;			     \
+		if ((mask) & KVM_X86_DISABLE_EXITS_PAUSE)		     \
+			(a).pause_in_guest = true;			     \
+		if ((mask) & KVM_X86_DISABLE_EXITS_CSTATE)		     \
+			(a).cstate_in_guest = true;			     \
+	}								     \
+})
+
 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
 				     struct kvm_enable_cap *cap)
 {
@@ -6018,14 +6039,8 @@  int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
 		if (kvm->created_vcpus)
 			goto disable_exits_unlock;
 
-		if (cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT)
-			kvm->arch.mwait_in_guest = true;
-		if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT)
-			kvm->arch.hlt_in_guest = true;
-		if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE)
-			kvm->arch.pause_in_guest = true;
-		if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE)
-			kvm->arch.cstate_in_guest = true;
+		kvm_ioctl_disable_exits(kvm->arch, cap->args[0]);
+
 		r = 0;
 disable_exits_unlock:
 		mutex_unlock(&kvm->lock);
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 5088bd9f1922..f2e76e436be5 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -814,10 +814,12 @@  struct kvm_ioeventfd {
 #define KVM_X86_DISABLE_EXITS_HLT            (1 << 1)
 #define KVM_X86_DISABLE_EXITS_PAUSE          (1 << 2)
 #define KVM_X86_DISABLE_EXITS_CSTATE         (1 << 3)
+#define KVM_X86_DISABLE_EXITS_OVERRIDE	     (1ull << 63)
 #define KVM_X86_DISABLE_VALID_EXITS          (KVM_X86_DISABLE_EXITS_MWAIT | \
                                               KVM_X86_DISABLE_EXITS_HLT | \
                                               KVM_X86_DISABLE_EXITS_PAUSE | \
-                                              KVM_X86_DISABLE_EXITS_CSTATE)
+                                              KVM_X86_DISABLE_EXITS_CSTATE | \
+					      KVM_X86_DISABLE_EXITS_OVERRIDE)
 
 /* for KVM_ENABLE_CAP */
 struct kvm_enable_cap {