diff mbox

[08/11] qspinlock: Revert to test-and-set on hypervisors

Message ID 20140615130153.940699466@chello.nl (mailing list archive)
State New, archived
Headers show

Commit Message

Peter Zijlstra June 15, 2014, 12:47 p.m. UTC
When we detect a hypervisor (!paravirt, see later patches), revert to
a simple test-and-set lock to avoid the horrors of queue preemption.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
---
 arch/x86/include/asm/qspinlock.h |   14 ++++++++++++++
 include/asm-generic/qspinlock.h  |    7 +++++++
 kernel/locking/qspinlock.c       |    3 +++
 3 files changed, 24 insertions(+)



--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Comments

Waiman Long June 16, 2014, 9:57 p.m. UTC | #1
On 06/15/2014 08:47 AM, Peter Zijlstra wrote:
> When we detect a hypervisor (!paravirt, see later patches), revert to
> a simple test-and-set lock to avoid the horrors of queue preemption.
>
> Signed-off-by: Peter Zijlstra<peterz@infradead.org>
> ---
>   arch/x86/include/asm/qspinlock.h |   14 ++++++++++++++
>   include/asm-generic/qspinlock.h  |    7 +++++++
>   kernel/locking/qspinlock.c       |    3 +++
>   3 files changed, 24 insertions(+)
>
> --- a/arch/x86/include/asm/qspinlock.h
> +++ b/arch/x86/include/asm/qspinlock.h
> @@ -1,6 +1,7 @@
>   #ifndef _ASM_X86_QSPINLOCK_H
>   #define _ASM_X86_QSPINLOCK_H
>
> +#include<asm/cpufeature.h>
>   #include<asm-generic/qspinlock_types.h>
>
>   #if !defined(CONFIG_X86_OOSTORE)&&  !defined(CONFIG_X86_PPRO_FENCE)
> @@ -20,6 +21,19 @@ static inline void queue_spin_unlock(str
>
>   #endif /* !CONFIG_X86_OOSTORE&&  !CONFIG_X86_PPRO_FENCE */
>
> +#define virt_queue_spin_lock virt_queue_spin_lock
> +
> +static inline bool virt_queue_spin_lock(struct qspinlock *lock)
> +{
> +	if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
> +		return false;
> +
> +	while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0)
> +		cpu_relax();
> +
> +	return true;
> +}
> +
>   #include<asm-generic/qspinlock.h>
>
>   #endif /* _ASM_X86_QSPINLOCK_H */
> --- a/include/asm-generic/qspinlock.h
> +++ b/include/asm-generic/qspinlock.h
> @@ -98,6 +98,13 @@ static __always_inline void queue_spin_u
>   }
>   #endif
>
> +#ifndef virt_queue_spin_lock
> +static __always_inline bool virt_queue_spin_lock(struct qspinlock *lock)
> +{
> +	return false;
> +}
> +#endif
> +
>   /*
>    * Initializier
>    */
> --- a/kernel/locking/qspinlock.c
> +++ b/kernel/locking/qspinlock.c
> @@ -247,6 +247,9 @@ void queue_spin_lock_slowpath(struct qsp
>
>   	BUILD_BUG_ON(CONFIG_NR_CPUS>= (1U<<  _Q_TAIL_CPU_BITS));
>
> +	if (virt_queue_spin_lock(lock))
> +		return;
> +
>   	/*
>   	 * wait for in-progress pending->locked hand-overs
>   	 *

I just wonder if it is better to allow the kernel distributors to decide 
if unfair lock should be the default for virtual guest. Anyway, I have 
no objection to that myself.

-Longman
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Konrad Rzeszutek Wilk June 18, 2014, 4:40 p.m. UTC | #2
On Sun, Jun 15, 2014 at 02:47:05PM +0200, Peter Zijlstra wrote:
> When we detect a hypervisor (!paravirt, see later patches), revert to

Please spell out the name of the patches.

> a simple test-and-set lock to avoid the horrors of queue preemption.

Heheh.
> 
> Signed-off-by: Peter Zijlstra <peterz@infradead.org>
> ---
>  arch/x86/include/asm/qspinlock.h |   14 ++++++++++++++
>  include/asm-generic/qspinlock.h  |    7 +++++++
>  kernel/locking/qspinlock.c       |    3 +++
>  3 files changed, 24 insertions(+)
> 
> --- a/arch/x86/include/asm/qspinlock.h
> +++ b/arch/x86/include/asm/qspinlock.h
> @@ -1,6 +1,7 @@
>  #ifndef _ASM_X86_QSPINLOCK_H
>  #define _ASM_X86_QSPINLOCK_H
>  
> +#include <asm/cpufeature.h>
>  #include <asm-generic/qspinlock_types.h>
>  
>  #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
> @@ -20,6 +21,19 @@ static inline void queue_spin_unlock(str
>  
>  #endif /* !CONFIG_X86_OOSTORE && !CONFIG_X86_PPRO_FENCE */
>  
> +#define virt_queue_spin_lock virt_queue_spin_lock
> +
> +static inline bool virt_queue_spin_lock(struct qspinlock *lock)
> +{
> +	if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
> +		return false;
> +
> +	while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0)
> +		cpu_relax();
> +
> +	return true;
> +}
> +
>  #include <asm-generic/qspinlock.h>
>  
>  #endif /* _ASM_X86_QSPINLOCK_H */
> --- a/include/asm-generic/qspinlock.h
> +++ b/include/asm-generic/qspinlock.h
> @@ -98,6 +98,13 @@ static __always_inline void queue_spin_u
>  }
>  #endif
>  
> +#ifndef virt_queue_spin_lock
> +static __always_inline bool virt_queue_spin_lock(struct qspinlock *lock)
> +{
> +	return false;
> +}
> +#endif
> +
>  /*
>   * Initializier
>   */
> --- a/kernel/locking/qspinlock.c
> +++ b/kernel/locking/qspinlock.c
> @@ -247,6 +247,9 @@ void queue_spin_lock_slowpath(struct qsp
>  
>  	BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
>  
> +	if (virt_queue_spin_lock(lock))
> +		return;
> +
>  	/*
>  	 * wait for in-progress pending->locked hand-overs
>  	 *
> 
> 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -1,6 +1,7 @@ 
 #ifndef _ASM_X86_QSPINLOCK_H
 #define _ASM_X86_QSPINLOCK_H
 
+#include <asm/cpufeature.h>
 #include <asm-generic/qspinlock_types.h>
 
 #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
@@ -20,6 +21,19 @@  static inline void queue_spin_unlock(str
 
 #endif /* !CONFIG_X86_OOSTORE && !CONFIG_X86_PPRO_FENCE */
 
+#define virt_queue_spin_lock virt_queue_spin_lock
+
+static inline bool virt_queue_spin_lock(struct qspinlock *lock)
+{
+	if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
+		return false;
+
+	while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0)
+		cpu_relax();
+
+	return true;
+}
+
 #include <asm-generic/qspinlock.h>
 
 #endif /* _ASM_X86_QSPINLOCK_H */
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -98,6 +98,13 @@  static __always_inline void queue_spin_u
 }
 #endif
 
+#ifndef virt_queue_spin_lock
+static __always_inline bool virt_queue_spin_lock(struct qspinlock *lock)
+{
+	return false;
+}
+#endif
+
 /*
  * Initializier
  */
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -247,6 +247,9 @@  void queue_spin_lock_slowpath(struct qsp
 
 	BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
 
+	if (virt_queue_spin_lock(lock))
+		return;
+
 	/*
 	 * wait for in-progress pending->locked hand-overs
 	 *