diff mbox

[11/11] qspinlock, kvm: Add paravirt support

Message ID 20140615130154.400698797@chello.nl (mailing list archive)
State New, archived
Headers show

Commit Message

Peter Zijlstra June 15, 2014, 12:47 p.m. UTC
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
---
 arch/x86/kernel/kvm.c |   58 ++++++++++++++++++++++++++++++++++++++++++++++++++
 kernel/Kconfig.locks  |    2 -
 2 files changed, 59 insertions(+), 1 deletion(-)



--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Comments

Raghavendra K T June 22, 2014, 4:36 p.m. UTC | #1
On 06/15/2014 06:17 PM, Peter Zijlstra wrote:
> Signed-off-by: Peter Zijlstra<peterz@infradead.org>
> ---
[...]
> +
> +void kvm_wait(int *ptr, int val)
> +{
> +	unsigned long flags;
> +
> +	if (in_nmi())
> +		return;
> +
> +	/*
> +	 * Make sure an interrupt handler can't upset things in a
> +	 * partially setup state.
> +	 */

I am seeing hang with even 2 cpu guest (with patches on top of 3.15-rc6 ).
looking further with gdb I see one cpu is stuck with native_halt with
slowpath flag(_Q_LOCKED_SLOW) set when it was called.

(gdb) bt
#0  native_halt () at /test/master/arch/x86/include/asm/irqflags.h:55
#1  0xffffffff81033118 in halt (ptr=0xffffffff81eb0e58, val=524291) at 
/test/master/arch/x86/include/asm/paravirt.h:116
#2  kvm_wait (ptr=0xffffffff81eb0e58, val=524291) at 
arch/x86/kernel/kvm.c:835
#3  kvm_wait (ptr=0xffffffff81eb0e58, val=524291) at 
arch/x86/kernel/kvm.c:809
#4  0xffffffff810a2d8e in pv_wait (lock=0xffffffff81eb0e58) at 
/test/master/arch/x86/include/asm/paravirt.h:744
#5  __pv_wait_head (lock=0xffffffff81eb0e58) at 
kernel/locking/qspinlock.c:352

Value of lock seem to be 524288 (means already unlocked?)
So apart from races Waiman mentioned, are we also in need of smp_mb()
here and/or native_queue_unlock()?.

Interestingly I see other cpu stuck at multi_cpu_stop().

(gdb) thr 1
[Switching to thread 1 (Thread 1)]#0  multi_cpu_stop 
(data=0xffff8802140d1da0) at kernel/stop_machine.c:192
192			if (msdata->state != curstate) {

Or is it I am missing something.

please let me know if .config need to be shared.

> +	local_irq_save(flags);
> +
> +	/*
> +	 * check again make sure it didn't become free while
> +	 * we weren't looking.
> +	 */
> +	if (ACCESS_ONCE(*ptr) != val)
> +		goto out;
> +
> +	/*
> +	 * halt until it's our turn and kicked. Note that we do safe halt
> +	 * for irq enabled case to avoid hang when lock info is overwritten
> +	 * in irq spinlock slowpath and no spurious interrupt occur to save us.
> +	 */
> +	if (arch_irqs_disabled_flags(flags))
> +		halt();
> +	else
> +		safe_halt();
> +
> +out:
> +	local_irq_restore(flags);
> +}
> +#endif /* QUEUE_SPINLOCK */

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Peter Zijlstra July 7, 2014, 3:23 p.m. UTC | #2
On Sun, Jun 22, 2014 at 10:06:18PM +0530, Raghavendra K T wrote:
> On 06/15/2014 06:17 PM, Peter Zijlstra wrote:
> >Signed-off-by: Peter Zijlstra<peterz@infradead.org>
> >---
> [...]
> >+
> >+void kvm_wait(int *ptr, int val)
> >+{
> >+	unsigned long flags;
> >+
> >+	if (in_nmi())
> >+		return;
> >+
> >+	/*
> >+	 * Make sure an interrupt handler can't upset things in a
> >+	 * partially setup state.
> >+	 */
> 
> I am seeing hang with even 2 cpu guest (with patches on top of 3.15-rc6 ).
> looking further with gdb I see one cpu is stuck with native_halt with
> slowpath flag(_Q_LOCKED_SLOW) set when it was called.

Like said in 0/n I think, I only booted the kernel in kvm, didn't
actually do anything with it.

It took me most of the day to figure out how to get paravirt working at
all, didn't feel like spending another many hours trying to figure out
how to make the crap thing do actual work.

But I'll see what I can do after we can 'conceptually' agree on the
paravirt patch.
diff mbox

Patch

Index: linux-2.6/arch/x86/kernel/kvm.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/kvm.c
+++ linux-2.6/arch/x86/kernel/kvm.c
@@ -569,6 +569,7 @@  static void kvm_kick_cpu(int cpu)
 	kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
 }
 
+#ifndef CONFIG_QUEUE_SPINLOCK
 enum kvm_contention_stat {
 	TAKEN_SLOW,
 	TAKEN_SLOW_PICKUP,
@@ -796,6 +797,51 @@  static void kvm_unlock_kick(struct arch_
 		}
 	}
 }
+#else /* QUEUE_SPINLOCK */
+
+#include <asm-generic/qspinlock.h>
+
+PV_CALLEE_SAVE_REGS_THUNK(__pv_init_node);
+PV_CALLEE_SAVE_REGS_THUNK(__pv_link_and_wait_node);
+PV_CALLEE_SAVE_REGS_THUNK(__pv_kick_node);
+
+PV_CALLEE_SAVE_REGS_THUNK(__pv_wait_head);
+PV_CALLEE_SAVE_REGS_THUNK(__pv_queue_unlock);
+
+void kvm_wait(int *ptr, int val)
+{
+	unsigned long flags;
+
+	if (in_nmi())
+		return;
+
+	/*
+	 * Make sure an interrupt handler can't upset things in a
+	 * partially setup state.
+	 */
+	local_irq_save(flags);
+
+	/*
+	 * check again make sure it didn't become free while
+	 * we weren't looking.
+	 */
+	if (ACCESS_ONCE(*ptr) != val)
+		goto out;
+
+	/*
+	 * halt until it's our turn and kicked. Note that we do safe halt
+	 * for irq enabled case to avoid hang when lock info is overwritten
+	 * in irq spinlock slowpath and no spurious interrupt occur to save us.
+	 */
+	if (arch_irqs_disabled_flags(flags))
+		halt();
+	else
+		safe_halt();
+
+out:
+	local_irq_restore(flags);
+}
+#endif /* QUEUE_SPINLOCK */
 
 /*
  * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
@@ -808,8 +854,20 @@  void __init kvm_spinlock_init(void)
 	if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
 		return;
 
+#ifdef CONFIG_QUEUE_SPINLOCK
+	pv_lock_ops.init_node = PV_CALLEE_SAVE(__pv_init_node);
+	pv_lock_ops.link_and_wait_node = PV_CALLEE_SAVE(__pv_link_and_wait_node);
+	pv_lock_ops.kick_node = PV_CALLEE_SAVE(__pv_kick_node);
+
+	pv_lock_ops.wait_head = PV_CALLEE_SAVE(__pv_wait_head);
+	pv_lock_ops.queue_unlock = PV_CALLEE_SAVE(__pv_queue_unlock);
+
+	pv_lock_ops.wait = kvm_wait;
+	pv_lock_ops.kick = kvm_kick_cpu;
+#else
 	pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
 	pv_lock_ops.unlock_kick = kvm_unlock_kick;
+#endif
 }
 
 static __init int kvm_spinlock_init_jump(void)
Index: linux-2.6/kernel/Kconfig.locks
===================================================================
--- linux-2.6.orig/kernel/Kconfig.locks
+++ linux-2.6/kernel/Kconfig.locks
@@ -229,7 +229,7 @@  config ARCH_USE_QUEUE_SPINLOCK
 
 config QUEUE_SPINLOCK
 	def_bool y if ARCH_USE_QUEUE_SPINLOCK
-	depends on SMP && !PARAVIRT_SPINLOCKS
+	depends on SMP && !(PARAVIRT_SPINLOCKS && XEN)
 
 config ARCH_USE_QUEUE_RWLOCK
 	bool