diff mbox

[3/3] KVM: implement spinlock optimization logic for arm/s390

Message ID 1502095466-21312-4-git-send-email-longpeng2@huawei.com (mailing list archive)
State New, archived
Headers show

Commit Message

Longpeng(Mike) Aug. 7, 2017, 8:44 a.m. UTC
Implements the kvm_arch_vcpu_spin/preempt_in_kernel() for arm/s390,
they needn't cache the result.

Signed-off-by: Longpeng(Mike) <longpeng2@huawei.com>
---
 arch/s390/kvm/kvm-s390.c | 4 ++--
 virt/kvm/arm/arm.c       | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)

Comments

David Hildenbrand Aug. 7, 2017, 8:52 a.m. UTC | #1
On 07.08.2017 10:44, Longpeng(Mike) wrote:
> Implements the kvm_arch_vcpu_spin/preempt_in_kernel() for arm/s390,
> they needn't cache the result.
> 
> Signed-off-by: Longpeng(Mike) <longpeng2@huawei.com>
> ---
>  arch/s390/kvm/kvm-s390.c | 4 ++--
>  virt/kvm/arm/arm.c       | 4 ++--
>  2 files changed, 4 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
> index f78cdc2..49b9178 100644
> --- a/arch/s390/kvm/kvm-s390.c
> +++ b/arch/s390/kvm/kvm-s390.c
> @@ -2449,12 +2449,12 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
>  
>  bool kvm_arch_vcpu_spin_in_kernel(struct kvm_vcpu *vcpu)
>  {
> -	return false;
> +	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
>  }
>  
>  bool kvm_arch_vcpu_preempt_in_kernel(struct kvm_vcpu *vcpu)
>  {
> -	return false;
> +	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
>  }
>  
>  void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
> diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
> index e45f780..956f025 100644
> --- a/virt/kvm/arm/arm.c
> +++ b/virt/kvm/arm/arm.c
> @@ -418,12 +418,12 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
>  
>  bool kvm_arch_vcpu_spin_in_kernel(struct kvm_vcpu *vcpu)
>  {
> -	return false;
> +	return vcpu_mode_priv(vcpu);
>  }
>  
>  bool kvm_arch_vcpu_preempt_in_kernel(struct kvm_vcpu *vcpu)
>  {
> -	return false;
> +	return vcpu_mode_priv(vcpu);
>  }
>  
>  /* Just ensure a guest exit from a particular CPU */
> 

Can you split that into two parts? (arm and s390x?)
Longpeng(Mike) Aug. 7, 2017, 8:54 a.m. UTC | #2
On 2017/8/7 16:52, David Hildenbrand wrote:

> On 07.08.2017 10:44, Longpeng(Mike) wrote:
>> Implements the kvm_arch_vcpu_spin/preempt_in_kernel() for arm/s390,
>> they needn't cache the result.
>>
>> Signed-off-by: Longpeng(Mike) <longpeng2@huawei.com>
>> ---
>>  arch/s390/kvm/kvm-s390.c | 4 ++--
>>  virt/kvm/arm/arm.c       | 4 ++--
>>  2 files changed, 4 insertions(+), 4 deletions(-)
>>
>> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
>> index f78cdc2..49b9178 100644
>> --- a/arch/s390/kvm/kvm-s390.c
>> +++ b/arch/s390/kvm/kvm-s390.c
>> @@ -2449,12 +2449,12 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
>>  
>>  bool kvm_arch_vcpu_spin_in_kernel(struct kvm_vcpu *vcpu)
>>  {
>> -	return false;
>> +	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
>>  }
>>  
>>  bool kvm_arch_vcpu_preempt_in_kernel(struct kvm_vcpu *vcpu)
>>  {
>> -	return false;
>> +	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
>>  }
>>  
>>  void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
>> diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
>> index e45f780..956f025 100644
>> --- a/virt/kvm/arm/arm.c
>> +++ b/virt/kvm/arm/arm.c
>> @@ -418,12 +418,12 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
>>  
>>  bool kvm_arch_vcpu_spin_in_kernel(struct kvm_vcpu *vcpu)
>>  {
>> -	return false;
>> +	return vcpu_mode_priv(vcpu);
>>  }
>>  
>>  bool kvm_arch_vcpu_preempt_in_kernel(struct kvm_vcpu *vcpu)
>>  {
>> -	return false;
>> +	return vcpu_mode_priv(vcpu);
>>  }
>>  
>>  /* Just ensure a guest exit from a particular CPU */
>>
> 
> Can you split that into two parts? (arm and s390x?)


OK, I'll split in V2. :)

>
diff mbox

Patch

diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f78cdc2..49b9178 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -2449,12 +2449,12 @@  int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 
 bool kvm_arch_vcpu_spin_in_kernel(struct kvm_vcpu *vcpu)
 {
-	return false;
+	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
 }
 
 bool kvm_arch_vcpu_preempt_in_kernel(struct kvm_vcpu *vcpu)
 {
-	return false;
+	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
 }
 
 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index e45f780..956f025 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -418,12 +418,12 @@  int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
 
 bool kvm_arch_vcpu_spin_in_kernel(struct kvm_vcpu *vcpu)
 {
-	return false;
+	return vcpu_mode_priv(vcpu);
 }
 
 bool kvm_arch_vcpu_preempt_in_kernel(struct kvm_vcpu *vcpu)
 {
-	return false;
+	return vcpu_mode_priv(vcpu);
 }
 
 /* Just ensure a guest exit from a particular CPU */