diff mbox

[v2,1/4] KVM: add spinlock optimization framework

Message ID 1502165135-4784-2-git-send-email-longpeng2@huawei.com (mailing list archive)
State New, archived
Headers show

Commit Message

Longpeng(Mike) Aug. 8, 2017, 4:05 a.m. UTC
If the vcpu(me) exit due to request a usermode spinlock, then
the spinlock-holder may be preempted in usermode or kernmode.

But if the vcpu(me) is in kernmode, then the holder must be
preempted in kernmode, so we should choose a vcpu in kernmode
as the most eligible candidate.

This introduces kvm_arch_vcpu_in_kernel() to decide whether the
vcpu is in kernel-mode when it's preempted or spinlock exit.

Signed-off-by: Longpeng(Mike) <longpeng2@huawei.com>
---
 arch/arm/kvm/handle_exit.c   | 2 +-
 arch/arm64/kvm/handle_exit.c | 2 +-
 arch/mips/kvm/mips.c         | 6 ++++++
 arch/powerpc/kvm/powerpc.c   | 6 ++++++
 arch/s390/kvm/diag.c         | 2 +-
 arch/s390/kvm/kvm-s390.c     | 6 ++++++
 arch/x86/kvm/hyperv.c        | 2 +-
 arch/x86/kvm/svm.c           | 4 +++-
 arch/x86/kvm/vmx.c           | 2 +-
 arch/x86/kvm/x86.c           | 6 ++++++
 include/linux/kvm_host.h     | 3 ++-
 virt/kvm/arm/arm.c           | 5 +++++
 virt/kvm/kvm_main.c          | 4 +++-
 13 files changed, 42 insertions(+), 8 deletions(-)

Comments

Paolo Bonzini Aug. 8, 2017, 7:34 a.m. UTC | #1
On 08/08/2017 06:05, Longpeng(Mike) wrote:
>  	return 1;
> diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
> index ce865bd..4ea8c38 100644
> --- a/arch/s390/kvm/diag.c
> +++ b/arch/s390/kvm/diag.c
> @@ -150,7 +150,7 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
>  {
>  	VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
>  	vcpu->stat.diagnose_44++;
> -	kvm_vcpu_on_spin(vcpu);
> +	kvm_vcpu_on_spin(vcpu, kvm_arch_vcpu_in_kernel(vcpu));
>  	return 0;
>  }
>  

IIUC, diag is a privileged instruction so this an also be "true".

Paolo
Cornelia Huck Aug. 8, 2017, 7:43 a.m. UTC | #2
On Tue, 8 Aug 2017 09:34:14 +0200
Paolo Bonzini <pbonzini@redhat.com> wrote:

> On 08/08/2017 06:05, Longpeng(Mike) wrote:
> >  	return 1;
> > diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
> > index ce865bd..4ea8c38 100644
> > --- a/arch/s390/kvm/diag.c
> > +++ b/arch/s390/kvm/diag.c
> > @@ -150,7 +150,7 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
> >  {
> >  	VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
> >  	vcpu->stat.diagnose_44++;
> > -	kvm_vcpu_on_spin(vcpu);
> > +	kvm_vcpu_on_spin(vcpu, kvm_arch_vcpu_in_kernel(vcpu));
> >  	return 0;
> >  }
> >    
> 
> IIUC, diag is a privileged instruction so this an also be "true".
> 
> Paolo

Yes, indeed.
David Hildenbrand Aug. 8, 2017, 8:42 a.m. UTC | #3
> +bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
> +{
> +	return false;
> +}

why don't we need an EXPORT_SYMBOL here?

> +
>  /* Just ensure a guest exit from a particular CPU */
>  static void exit_vm_noop(void *info)
>  {
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 15252d7..e7720d2 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -2317,7 +2317,7 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
>  #endif
>  }
>  
> -void kvm_vcpu_on_spin(struct kvm_vcpu *me)
> +void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool me_in_kern)
>  {
>  	struct kvm *kvm = me->kvm;
>  	struct kvm_vcpu *vcpu;
> @@ -2348,6 +2348,8 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
>  				continue;
>  			if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
>  				continue;
> +			if (me_in_kern && !kvm_arch_vcpu_in_kernel(vcpu))
> +				continue;


hm, does this patch compile? (me_in_kern)

I would even move this to an other patch.

Maybe even split into

a) introducing kvm_arch_vcpu_in_kernel() for all archs
b) modifying kvm_vcpu_on_spin(), passing the result from
kvm_arch_vcpu_in_kernel()
c) filling kvm_arch_vcpu_in_kernel() with life for different archs
(multiple patches)
d) pimping kvm_vcpu_on_spin()

>  			if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
>  				continue;
>  
>
David Hildenbrand Aug. 8, 2017, 8:44 a.m. UTC | #4
On 08.08.2017 10:42, David Hildenbrand wrote:
> 
>> +bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
>> +{
>> +	return false;
>> +}
> 
> why don't we need an EXPORT_SYMBOL here?
> 
>> +
>>  /* Just ensure a guest exit from a particular CPU */
>>  static void exit_vm_noop(void *info)
>>  {
>> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
>> index 15252d7..e7720d2 100644
>> --- a/virt/kvm/kvm_main.c
>> +++ b/virt/kvm/kvm_main.c
>> @@ -2317,7 +2317,7 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
>>  #endif
>>  }
>>  
>> -void kvm_vcpu_on_spin(struct kvm_vcpu *me)
>> +void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool me_in_kern)
>>  {
>>  	struct kvm *kvm = me->kvm;
>>  	struct kvm_vcpu *vcpu;
>> @@ -2348,6 +2348,8 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
>>  				continue;
>>  			if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
>>  				continue;
>> +			if (me_in_kern && !kvm_arch_vcpu_in_kernel(vcpu))
>> +				continue;
> 
> 
> hm, does this patch compile? (me_in_kern)

pardon me, missed the parameter, so ignore this comment. comment
regarding splitting up below still holds :)

> 
> I would even move this to an other patch.
> 
> Maybe even split into
> 
> a) introducing kvm_arch_vcpu_in_kernel() for all archs
> b) modifying kvm_vcpu_on_spin(), passing the result from
> kvm_arch_vcpu_in_kernel()
> c) filling kvm_arch_vcpu_in_kernel() with life for different archs
> (multiple patches)
> d) pimping kvm_vcpu_on_spin()
> 
>>  			if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
>>  				continue;
>>  
>>
> 
>
diff mbox

Patch

diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index 54442e3..a7ea5db 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -67,7 +67,7 @@  static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
 	if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE) {
 		trace_kvm_wfx(*vcpu_pc(vcpu), true);
 		vcpu->stat.wfe_exit_stat++;
-		kvm_vcpu_on_spin(vcpu);
+		kvm_vcpu_on_spin(vcpu, kvm_arch_vcpu_in_kernel(vcpu));
 	} else {
 		trace_kvm_wfx(*vcpu_pc(vcpu), false);
 		vcpu->stat.wfi_exit_stat++;
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 17d8a16..d6c8cb6 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -84,7 +84,7 @@  static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
 	if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
 		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
 		vcpu->stat.wfe_exit_stat++;
-		kvm_vcpu_on_spin(vcpu);
+		kvm_vcpu_on_spin(vcpu, kvm_arch_vcpu_in_kernel(vcpu));
 	} else {
 		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
 		vcpu->stat.wfi_exit_stat++;
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index d4b2ad1..70208be 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -98,6 +98,12 @@  int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 	return !!(vcpu->arch.pending_exceptions);
 }
 
+bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
+{
+	return false;
+}
+EXPORT_SYMBOL_GPL(kvm_arch_vcpu_in_kernel);
+
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 {
 	return 1;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 1a75c0b..6184c45 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -58,6 +58,12 @@  int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
 	return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
 }
 
+bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
+{
+	return false;
+}
+EXPORT_SYMBOL_GPL(kvm_arch_vcpu_in_kernel);
+
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 {
 	return 1;
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index ce865bd..4ea8c38 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -150,7 +150,7 @@  static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
 {
 	VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
 	vcpu->stat.diagnose_44++;
-	kvm_vcpu_on_spin(vcpu);
+	kvm_vcpu_on_spin(vcpu, kvm_arch_vcpu_in_kernel(vcpu));
 	return 0;
 }
 
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index af09d34..0b0c689 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -2447,6 +2447,12 @@  int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 	return kvm_s390_vcpu_has_irq(vcpu, 0);
 }
 
+bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
+{
+	return false;
+}
+EXPORT_SYMBOL_GPL(kvm_arch_vcpu_in_kernel);
+
 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
 {
 	atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 337b6d2..cd0e6e6 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1268,7 +1268,7 @@  int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
 
 	switch (code) {
 	case HVCALL_NOTIFY_LONG_SPIN_WAIT:
-		kvm_vcpu_on_spin(vcpu);
+		kvm_vcpu_on_spin(vcpu, kvm_arch_vcpu_in_kernel(vcpu));
 		break;
 	case HVCALL_POST_MESSAGE:
 	case HVCALL_SIGNAL_EVENT:
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1107626..e6ed24e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3749,7 +3749,9 @@  static int interrupt_window_interception(struct vcpu_svm *svm)
 
 static int pause_interception(struct vcpu_svm *svm)
 {
-	kvm_vcpu_on_spin(&(svm->vcpu));
+	struct kvm_vcpu *vcpu = &(svm->vcpu);
+
+	kvm_vcpu_on_spin(vcpu, kvm_arch_vcpu_in_kernel(vcpu));
 	return 1;
 }
 
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 9b21b12..9d6223a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6761,7 +6761,7 @@  static int handle_pause(struct kvm_vcpu *vcpu)
 	if (ple_gap)
 		grow_ple_window(vcpu);
 
-	kvm_vcpu_on_spin(vcpu);
+	kvm_vcpu_on_spin(vcpu, kvm_arch_vcpu_in_kernel(vcpu));
 	return kvm_skip_emulated_instruction(vcpu);
 }
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d734aa8..4430be6 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8439,6 +8439,12 @@  int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 	return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
 }
 
+bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
+{
+	return false;
+}
+EXPORT_SYMBOL_GPL(kvm_arch_vcpu_in_kernel);
+
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 {
 	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 21a6fd6..91460aa 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -720,7 +720,7 @@  int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 int kvm_vcpu_yield_to(struct kvm_vcpu *target);
-void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
+void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool me_in_kern);
 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
 
@@ -800,6 +800,7 @@  int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 void kvm_arch_hardware_unsetup(void);
 void kvm_arch_check_processor_compat(void *rtn);
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
+bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
 
 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index a39a1e1..862f820 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -416,6 +416,11 @@  int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
 		&& !v->arch.power_off && !v->arch.pause);
 }
 
+bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
+{
+	return false;
+}
+
 /* Just ensure a guest exit from a particular CPU */
 static void exit_vm_noop(void *info)
 {
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 15252d7..e7720d2 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2317,7 +2317,7 @@  static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
 #endif
 }
 
-void kvm_vcpu_on_spin(struct kvm_vcpu *me)
+void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool me_in_kern)
 {
 	struct kvm *kvm = me->kvm;
 	struct kvm_vcpu *vcpu;
@@ -2348,6 +2348,8 @@  void kvm_vcpu_on_spin(struct kvm_vcpu *me)
 				continue;
 			if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
 				continue;
+			if (me_in_kern && !kvm_arch_vcpu_in_kernel(vcpu))
+				continue;
 			if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
 				continue;