diff mbox

x86 spinlock: Fix memory corruption on completing completions

Message ID 1423234148-13886-1-git-send-email-raghavendra.kt@linux.vnet.ibm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Raghavendra K T Feb. 6, 2015, 2:49 p.m. UTC
Paravirt spinlock clears slowpath flag after doing unlock.
As explained by Linus currently it does:
                prev = *lock;
                add_smp(&lock->tickets.head, TICKET_LOCK_INC);

                /* add_smp() is a full mb() */

                if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
                        __ticket_unlock_slowpath(lock, prev);


which is *exactly* the kind of things you cannot do with spinlocks,
because after you've done the "add_smp()" and released the spinlock
for the fast-path, you can't access the spinlock any more.  Exactly
because a fast-path lock might come in, and release the whole data
structure.

Linus suggested that we should not do any writes to lock after unlock(),
and we can move slowpath clearing to fastpath lock.

However it brings additional case to be handled, viz., slowpath still
could be set when somebody does arch_trylock. Handle that too by ignoring
slowpath flag during lock availability check.

Reported-by: Sasha Levin <sasha.levin@oracle.com>
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
---
 arch/x86/include/asm/spinlock.h | 70 ++++++++++++++++++++---------------------
 1 file changed, 34 insertions(+), 36 deletions(-)

Comments

Linus Torvalds Feb. 6, 2015, 4:25 p.m. UTC | #1
On Fri, Feb 6, 2015 at 6:49 AM, Raghavendra K T
<raghavendra.kt@linux.vnet.ibm.com> wrote:
> Paravirt spinlock clears slowpath flag after doing unlock.
[ fix edited out ]

So I'm not going to be applying this for 3.19, because it's much too
late and the patch is too scary. Plus the bug probably effectively
never shows up in real life (it is probably easy to trigger the
speculative *read* but probably never the actual speculative write
after dropping the lock last).

This will need a lot of testing by the paravirt people - both
performance and correctness. So *maybe* for 3.20, but maybe for even
later, and then marked for stable, of course.

Are there any good paravirt stress-tests that people could run for
extended times?

                            Linus
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Sasha Levin Feb. 6, 2015, 6:57 p.m. UTC | #2
On 02/06/2015 09:49 AM, Raghavendra K T wrote:
> Paravirt spinlock clears slowpath flag after doing unlock.
> As explained by Linus currently it does:
>                 prev = *lock;
>                 add_smp(&lock->tickets.head, TICKET_LOCK_INC);
> 
>                 /* add_smp() is a full mb() */
> 
>                 if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
>                         __ticket_unlock_slowpath(lock, prev);
> 
> 
> which is *exactly* the kind of things you cannot do with spinlocks,
> because after you've done the "add_smp()" and released the spinlock
> for the fast-path, you can't access the spinlock any more.  Exactly
> because a fast-path lock might come in, and release the whole data
> structure.
> 
> Linus suggested that we should not do any writes to lock after unlock(),
> and we can move slowpath clearing to fastpath lock.
> 
> However it brings additional case to be handled, viz., slowpath still
> could be set when somebody does arch_trylock. Handle that too by ignoring
> slowpath flag during lock availability check.
> 
> Reported-by: Sasha Levin <sasha.levin@oracle.com>
> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
> Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>

With this patch, my VMs lock up quickly after boot with:

[  161.613469] BUG: spinlock lockup suspected on CPU#31, kworker/31:1/5213
[  161.613469]  lock: purge_lock.28981+0x0/0x40, .magic: dead4ead, .owner: kworker/7:1/6400, .owner_cpu: 7
[  161.613469] CPU: 31 PID: 5213 Comm: kworker/31:1 Not tainted 3.19.0-rc7-next-20150204-sasha-00048-gee3a350 #1869
[  161.613469] Workqueue: events bpf_prog_free_deferred
[  161.613469]  0000000000000000 00000000f03dd27f ffff88056b227a88 ffffffffa1702276
[  161.613469]  0000000000000000 ffff88017cf70000 ffff88056b227aa8 ffffffff9e1d009c
[  161.613469]  ffffffffa3edae60 0000000086c3f830 ffff88056b227ad8 ffffffff9e1d01d7
[  161.613469] Call Trace:
[  161.613469] dump_stack (lib/dump_stack.c:52)
[  161.613469] spin_dump (kernel/locking/spinlock_debug.c:68 (discriminator 8))
[  161.613469] do_raw_spin_lock (include/linux/nmi.h:48 kernel/locking/spinlock_debug.c:119 kernel/locking/spinlock_debug.c:137)
[  161.613469] _raw_spin_lock (kernel/locking/spinlock.c:152)
[  161.613469] ? __purge_vmap_area_lazy (mm/vmalloc.c:615)
[  161.613469] __purge_vmap_area_lazy (mm/vmalloc.c:615)
[  161.613469] ? vm_unmap_aliases (mm/vmalloc.c:1021)
[  161.613469] vm_unmap_aliases (mm/vmalloc.c:1052)
[  161.613469] ? vm_unmap_aliases (mm/vmalloc.c:1021)
[  161.613469] ? __lock_acquire (kernel/locking/lockdep.c:2019 kernel/locking/lockdep.c:3184)
[  161.613469] change_page_attr_set_clr (arch/x86/mm/pageattr.c:1394)
[  161.613469] ? debug_object_deactivate (lib/debugobjects.c:463)
[  161.613469] set_memory_rw (arch/x86/mm/pageattr.c:1662)
[  161.613469] ? __lock_is_held (kernel/locking/lockdep.c:3518)
[  161.613469] bpf_jit_free (include/linux/filter.h:377 arch/x86/net/bpf_jit_comp.c:991)
[  161.613469] bpf_prog_free_deferred (kernel/bpf/core.c:646)
[  161.613469] process_one_work (kernel/workqueue.c:2014 include/linux/jump_label.h:114 include/trace/events/workqueue.h:111 kernel/workqueue.c:2019)
[  161.613469] ? process_one_work (./arch/x86/include/asm/atomic64_64.h:33 include/asm-generic/atomic-long.h:38 kernel/workqueue.c:598 kernel/workqueue.c:625 kernel/workqueue.c:2007)
[  161.613469] worker_thread (include/linux/list.h:189 kernel/workqueue.c:2147)
[  161.613469] ? process_one_work (kernel/workqueue.c:2091)
[  161.613469] kthread (kernel/kthread.c:207)
[  161.613469] ? finish_task_switch (./arch/x86/include/asm/current.h:14 kernel/sched/sched.h:1058 kernel/sched/core.c:2258)
[  161.613469] ? flush_kthread_work (kernel/kthread.c:176)
[  161.613469] ret_from_fork (arch/x86/kernel/entry_64.S:283)
[  161.613469] ? flush_kthread_work (kernel/kthread.c:176)

And a few soft lockup messages inside the scheduler after that.


Thanks,
Sasha


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Davidlohr Bueso Feb. 6, 2015, 7:42 p.m. UTC | #3
On Fri, 2015-02-06 at 08:25 -0800, Linus Torvalds wrote:
> On Fri, Feb 6, 2015 at 6:49 AM, Raghavendra K T
> <raghavendra.kt@linux.vnet.ibm.com> wrote:
> > Paravirt spinlock clears slowpath flag after doing unlock.
> [ fix edited out ]
> 
> So I'm not going to be applying this for 3.19, because it's much too
> late and the patch is too scary. Plus the bug probably effectively
> never shows up in real life (it is probably easy to trigger the
> speculative *read* but probably never the actual speculative write
> after dropping the lock last).
> 
> This will need a lot of testing by the paravirt people - both
> performance and correctness. So *maybe* for 3.20, but maybe for even
> later, and then marked for stable, of course.
> 
> Are there any good paravirt stress-tests that people could run for
> extended times?

locktorture inside a VM should give a proper pounding.

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Sasha Levin Feb. 6, 2015, 9:15 p.m. UTC | #4
On 02/06/2015 02:42 PM, Davidlohr Bueso wrote:
> On Fri, 2015-02-06 at 08:25 -0800, Linus Torvalds wrote:
>> On Fri, Feb 6, 2015 at 6:49 AM, Raghavendra K T
>> <raghavendra.kt@linux.vnet.ibm.com> wrote:
>>> Paravirt spinlock clears slowpath flag after doing unlock.
>> [ fix edited out ]
>>
>> So I'm not going to be applying this for 3.19, because it's much too
>> late and the patch is too scary. Plus the bug probably effectively
>> never shows up in real life (it is probably easy to trigger the
>> speculative *read* but probably never the actual speculative write
>> after dropping the lock last).
>>
>> This will need a lot of testing by the paravirt people - both
>> performance and correctness. So *maybe* for 3.20, but maybe for even
>> later, and then marked for stable, of course.
>>
>> Are there any good paravirt stress-tests that people could run for
>> extended times?
> 
> locktorture inside a VM should give a proper pounding.

Would it catch lifetime issues too? I thought it just tests out correctness.

I tried it and other unrelated stuff broke. I'll send separate mails for that...


Thanks,
Sasha
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Davidlohr Bueso Feb. 6, 2015, 11:24 p.m. UTC | #5
On Fri, 2015-02-06 at 16:15 -0500, Sasha Levin wrote:
> On 02/06/2015 02:42 PM, Davidlohr Bueso wrote:
> > On Fri, 2015-02-06 at 08:25 -0800, Linus Torvalds wrote:
> >> On Fri, Feb 6, 2015 at 6:49 AM, Raghavendra K T
> >> <raghavendra.kt@linux.vnet.ibm.com> wrote:
> >>> Paravirt spinlock clears slowpath flag after doing unlock.
> >> [ fix edited out ]
> >>
> >> So I'm not going to be applying this for 3.19, because it's much too
> >> late and the patch is too scary. Plus the bug probably effectively
> >> never shows up in real life (it is probably easy to trigger the
> >> speculative *read* but probably never the actual speculative write
> >> after dropping the lock last).
> >>
> >> This will need a lot of testing by the paravirt people - both
> >> performance and correctness. So *maybe* for 3.20, but maybe for even
> >> later, and then marked for stable, of course.
> >>
> >> Are there any good paravirt stress-tests that people could run for
> >> extended times?
> > 
> > locktorture inside a VM should give a proper pounding.
> 
> Would it catch lifetime issues too? I thought it just tests out correctness.

Given enough contention it should, yeah. The spinlock can be acquired by
another thread right after releasing the lock in the unlock's slowpath.
And all locktorture does is pound on locks with random hold times.

Thanks,
Davidlohr

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Raghavendra K T Feb. 8, 2015, 5:49 p.m. UTC | #6
On 02/06/2015 09:55 PM, Linus Torvalds wrote:
> On Fri, Feb 6, 2015 at 6:49 AM, Raghavendra K T
> <raghavendra.kt@linux.vnet.ibm.com> wrote:
>> Paravirt spinlock clears slowpath flag after doing unlock.
> [ fix edited out ]
>
> So I'm not going to be applying this for 3.19, because it's much too
> late and the patch is too scary. Plus the bug probably effectively
> never shows up in real life (it is probably easy to trigger the
> speculative *read* but probably never the actual speculative write
> after dropping the lock last).
>

Understood and agreed.

> This will need a lot of testing by the paravirt people - both
> performance and correctness. So *maybe* for 3.20, but maybe for even
> later, and then marked for stable, of course.
>
> Are there any good paravirt stress-tests that people could run for
> extended times?
>

I have been running several benchmarks (kern, sys, hack, ebizzy etc in
in 1x,2x scenarios. I run them for performance test as well.
(In the current patch I did not get kvm hang in normal run, But
overcommit reproduced it).

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Raghavendra K T Feb. 8, 2015, 5:57 p.m. UTC | #7
On 02/07/2015 12:27 AM, Sasha Levin wrote:
> On 02/06/2015 09:49 AM, Raghavendra K T wrote:
>> Paravirt spinlock clears slowpath flag after doing unlock.
>> As explained by Linus currently it does:
>>                  prev = *lock;
>>                  add_smp(&lock->tickets.head, TICKET_LOCK_INC);
>>
>>                  /* add_smp() is a full mb() */
>>
>>                  if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
>>                          __ticket_unlock_slowpath(lock, prev);
>>
>>
>> which is *exactly* the kind of things you cannot do with spinlocks,
>> because after you've done the "add_smp()" and released the spinlock
>> for the fast-path, you can't access the spinlock any more.  Exactly
>> because a fast-path lock might come in, and release the whole data
>> structure.
>>
>> Linus suggested that we should not do any writes to lock after unlock(),
>> and we can move slowpath clearing to fastpath lock.
>>
>> However it brings additional case to be handled, viz., slowpath still
>> could be set when somebody does arch_trylock. Handle that too by ignoring
>> slowpath flag during lock availability check.
>>
>> Reported-by: Sasha Levin <sasha.levin@oracle.com>
>> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
>> Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
>
> With this patch, my VMs lock up quickly after boot with:

Tried to reproduce the hang myself, and there seems to be still a
barrier (or logic I miss).

Looking closely below, unlock_kick got missed though we see
that SLOWPATH_FLAG is still set:

/me goes back to look closely

(gdb) bt
#0  native_halt () at ./arch/x86/include/asm/irqflags.h:55
#1  0xffffffff81037c27 in halt () at ./arch/x86/include/asm/paravirt.h:116
#2  kvm_lock_spinning (lock=0xffff88023ffe8240, want=52504) at 
arch/x86/kernel/kvm.c:786
#3  0xffffffff81037251 in __raw_callee_save_kvm_lock_spinning ()
#4  0xffff88023fc0edb0 in ?? ()
#5  0x0000000000000000 in ?? ()

(gdb) p *(arch_spinlock_t *)0xffff88023ffe8240
$1 = {{head_tail = 3441806612, tickets = {head = 52500, tail = 52517}}}
(gdb) t 2
[Switching to thread 2 (Thread 2)]
#0  native_halt () at ./arch/x86/include/asm/irqflags.h:55
55	}
(gdb) bt
#0  native_halt () at ./arch/x86/include/asm/irqflags.h:55
#1  0xffffffff81037c27 in halt () at ./arch/x86/include/asm/paravirt.h:116
#2  kvm_lock_spinning (lock=0xffff88023ffe8240, want=52502) at 
arch/x86/kernel/kvm.c:786
#3  0xffffffff81037251 in __raw_callee_save_kvm_lock_spinning ()
#4  0x0000000000000246 in irq_stack_union ()
#5  0x0000000000080750 in ?? ()
#6  0x0000000000020000 in ?? ()
#7  0x0000000000000004 in irq_stack_union ()
#8  0x000000000000cd16 in nmi_print_seq ()
Cannot access memory at address 0xbfc0
(gdb) t 3
[Switching to thread 3 (Thread 3)]
#0  native_halt () at ./arch/x86/include/asm/irqflags.h:55
55	}
(gdb) bt
#0  native_halt () at ./arch/x86/include/asm/irqflags.h:55
#1  0xffffffff81037c27 in halt () at ./arch/x86/include/asm/paravirt.h:116
#2  kvm_lock_spinning (lock=0xffff88023ffe8240, want=52512) at 
arch/x86/kernel/kvm.c:786
#3  0xffffffff81037251 in __raw_callee_save_kvm_lock_spinning ()
#4  0xffff88023fc8edb0 in ?? ()
#5  0x0000000000000000 in ?? ()

[...] //other threads with similar output

(gdb) t 8
[Switching to thread 8 (Thread 8)]
#0  native_halt () at ./arch/x86/include/asm/irqflags.h:55
55	}
(gdb) bt
#0  native_halt () at ./arch/x86/include/asm/irqflags.h:55
#1  0xffffffff81037c27 in halt () at ./arch/x86/include/asm/paravirt.h:116
#2  kvm_lock_spinning (lock=0xffff88023ffe8240, want=52500) at 
arch/x86/kernel/kvm.c:786
#3  0xffffffff81037251 in __raw_callee_save_kvm_lock_spinning ()
#4  0xffff88023fdcedb0 in ?? ()
#5  0x0000000000000000 in ?? ()

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jeremy Fitzhardinge Feb. 8, 2015, 9:14 p.m. UTC | #8
On 02/06/2015 06:49 AM, Raghavendra K T wrote:
> Paravirt spinlock clears slowpath flag after doing unlock.
> As explained by Linus currently it does:
>                 prev = *lock;
>                 add_smp(&lock->tickets.head, TICKET_LOCK_INC);
>
>                 /* add_smp() is a full mb() */
>
>                 if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
>                         __ticket_unlock_slowpath(lock, prev);
>
>
> which is *exactly* the kind of things you cannot do with spinlocks,
> because after you've done the "add_smp()" and released the spinlock
> for the fast-path, you can't access the spinlock any more.  Exactly
> because a fast-path lock might come in, and release the whole data
> structure.

Yeah, that's an embarrasingly obvious bug in retrospect.

> Linus suggested that we should not do any writes to lock after unlock(),
> and we can move slowpath clearing to fastpath lock.

Yep, that seems like a sound approach.

> However it brings additional case to be handled, viz., slowpath still
> could be set when somebody does arch_trylock. Handle that too by ignoring
> slowpath flag during lock availability check.
>
> Reported-by: Sasha Levin <sasha.levin@oracle.com>
> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
> Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
> ---
>  arch/x86/include/asm/spinlock.h | 70 ++++++++++++++++++++---------------------
>  1 file changed, 34 insertions(+), 36 deletions(-)
>
> diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
> index 625660f..0829f86 100644
> --- a/arch/x86/include/asm/spinlock.h
> +++ b/arch/x86/include/asm/spinlock.h
> @@ -49,6 +49,23 @@ static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
>  	set_bit(0, (volatile unsigned long *)&lock->tickets.tail);
>  }
>  
> +static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock)
> +{
> +	arch_spinlock_t old, new;
> +	__ticket_t diff;
> +
> +	old.tickets = READ_ONCE(lock->tickets);

Couldn't the caller pass in the lock state that it read rather than
re-reading it?

> +	diff = (old.tickets.tail & ~TICKET_SLOWPATH_FLAG) - old.tickets.head;
> +
> +	/* try to clear slowpath flag when there are no contenders */
> +	if ((old.tickets.tail & TICKET_SLOWPATH_FLAG) &&
> +		(diff == TICKET_LOCK_INC)) {
> +		new = old;
> +		new.tickets.tail &= ~TICKET_SLOWPATH_FLAG;
> +		cmpxchg(&lock->head_tail, old.head_tail, new.head_tail);
> +	}
> +}
> +
>  #else  /* !CONFIG_PARAVIRT_SPINLOCKS */
>  static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
>  							__ticket_t ticket)
> @@ -59,6 +76,10 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
>  {
>  }
>  
> +static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock)
> +{
> +}
> +
>  #endif /* CONFIG_PARAVIRT_SPINLOCKS */
>  
>  static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
> @@ -84,7 +105,7 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
>  	register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC };
>  
>  	inc = xadd(&lock->tickets, inc);
> -	if (likely(inc.head == inc.tail))
> +	if (likely(inc.head == (inc.tail & ~TICKET_SLOWPATH_FLAG)))

The intent of this conditional was to be the quickest possible path when
taking a fastpath lock, with the code below being used for all slowpath
locks (free or taken). So I don't think masking out SLOWPATH_FLAG is
necessary here.

>  		goto out;
>  
>  	inc.tail &= ~TICKET_SLOWPATH_FLAG;
> @@ -98,7 +119,10 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
>  		} while (--count);
>  		__ticket_lock_spinning(lock, inc.tail);
>  	}
> -out:	barrier();	/* make sure nothing creeps before the lock is taken */
> +out:
> +	__ticket_check_and_clear_slowpath(lock);
> +
> +	barrier();	/* make sure nothing creeps before the lock is taken */

Which means that if "goto out" path is only ever used for fastpath
locks, you can limit calling __ticket_check_and_clear_slowpath() to the
slowpath case.

>  }
>  
>  static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
> @@ -115,47 +139,21 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
>  	return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
>  }
>  
> -static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
> -					    arch_spinlock_t old)
> -{
> -	arch_spinlock_t new;
> -
> -	BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
> -
> -	/* Perform the unlock on the "before" copy */
> -	old.tickets.head += TICKET_LOCK_INC;

NB (see below)

> -
> -	/* Clear the slowpath flag */
> -	new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT);
> -
> -	/*
> -	 * If the lock is uncontended, clear the flag - use cmpxchg in
> -	 * case it changes behind our back though.
> -	 */
> -	if (new.tickets.head != new.tickets.tail ||
> -	    cmpxchg(&lock->head_tail, old.head_tail,
> -					new.head_tail) != old.head_tail) {
> -		/*
> -		 * Lock still has someone queued for it, so wake up an
> -		 * appropriate waiter.
> -		 */
> -		__ticket_unlock_kick(lock, old.tickets.head);
> -	}
> -}
> -
>  static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
>  {
>  	if (TICKET_SLOWPATH_FLAG &&
> -	    static_key_false(&paravirt_ticketlocks_enabled)) {
> -		arch_spinlock_t prev;
> +		static_key_false(&paravirt_ticketlocks_enabled)) {
> +		__ticket_t prev_head;
>  
> -		prev = *lock;
> +		prev_head = lock->tickets.head;
>  		add_smp(&lock->tickets.head, TICKET_LOCK_INC);
>  
>  		/* add_smp() is a full mb() */
>  
> -		if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
> -			__ticket_unlock_slowpath(lock, prev);
> +		if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG)) {

So we're OK with still having a ("speculative"?) read-after-unlock here?
I guess the only way to avoid it is to make the add_smp an xadd, but
that's pretty expensive even compared to a locked add (at least last
time I checked, which was at least a couple of microarchitectures ago).
An unlocked add followed by lfence should also do the trick, but that
was also much worse in practice.

> +			BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
> +			__ticket_unlock_kick(lock, prev_head);

Should be "prev_head + TICKET_LOCK_INC" to match the previous code,
otherwise it won't find the CPU waiting for the new head.

    J

> +		}
>  	} else
>  		__add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
>  }
> @@ -164,7 +162,7 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock)
>  {
>  	struct __raw_tickets tmp = READ_ONCE(lock->tickets);
>  
> -	return tmp.tail != tmp.head;
> +	return (tmp.tail & ~TICKET_SLOWPATH_FLAG) != tmp.head;
>  }
>  
>  static inline int arch_spin_is_contended(arch_spinlock_t *lock)

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Raghavendra K T Feb. 9, 2015, 9:34 a.m. UTC | #9
On 02/09/2015 02:44 AM, Jeremy Fitzhardinge wrote:
> On 02/06/2015 06:49 AM, Raghavendra K T wrote:
[...]
>
>> Linus suggested that we should not do any writes to lock after unlock(),
>> and we can move slowpath clearing to fastpath lock.
>
> Yep, that seems like a sound approach.

Current approach seem to be working now. (though we could not avoid read).
Related question: Do you think we could avoid SLOWPATH_FLAG itself by
checking head and tail difference. or is it costly because it may
result in unnecessary unlock_kicks?

>> However it brings additional case to be handled, viz., slowpath still
>> could be set when somebody does arch_trylock. Handle that too by ignoring
>> slowpath flag during lock availability check.
>>
>> Reported-by: Sasha Levin <sasha.levin@oracle.com>
>> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
>> Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
>> ---
>>   arch/x86/include/asm/spinlock.h | 70 ++++++++++++++++++++---------------------
>>   1 file changed, 34 insertions(+), 36 deletions(-)
>>
>> diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
>> index 625660f..0829f86 100644
>> --- a/arch/x86/include/asm/spinlock.h
>> +++ b/arch/x86/include/asm/spinlock.h
>> @@ -49,6 +49,23 @@ static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
>>   	set_bit(0, (volatile unsigned long *)&lock->tickets.tail);
>>   }
>>
>> +static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock)
>> +{
>> +	arch_spinlock_t old, new;
>> +	__ticket_t diff;
>> +
>> +	old.tickets = READ_ONCE(lock->tickets);
>
> Couldn't the caller pass in the lock state that it read rather than
> re-reading it?
>

Yes we could. do you mean we could pass additional read value apart from 
lock, (because lock will be anyway needed for cmpxchg).

>>
>> +static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock)
>> +{
>> +}
>> +
>>   #endif /* CONFIG_PARAVIRT_SPINLOCKS */
>>
>>   static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
>> @@ -84,7 +105,7 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
>>   	register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC };
>>
>>   	inc = xadd(&lock->tickets, inc);
>> -	if (likely(inc.head == inc.tail))
>> +	if (likely(inc.head == (inc.tail & ~TICKET_SLOWPATH_FLAG)))
>

good point, we can get rid of this as well.

> The intent of this conditional was to be the quickest possible path when
> taking a fastpath lock, with the code below being used for all slowpath
> locks (free or taken). So I don't think masking out SLOWPATH_FLAG is
> necessary here.
>
>>   		goto out;
>>
>>   	inc.tail &= ~TICKET_SLOWPATH_FLAG;
>> @@ -98,7 +119,10 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
>>   		} while (--count);
>>   		__ticket_lock_spinning(lock, inc.tail);
>>   	}
>> -out:	barrier();	/* make sure nothing creeps before the lock is taken */
>> +out:
>> +	__ticket_check_and_clear_slowpath(lock);
>> +
>> +	barrier();	/* make sure nothing creeps before the lock is taken */
>
> Which means that if "goto out" path is only ever used for fastpath
> locks, you can limit calling __ticket_check_and_clear_slowpath() to the
> slowpath case.
>

Yes, I ll move that call up.

>>   }
>>
>>   static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
>> @@ -115,47 +139,21 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
>>   	return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
>>   }
>>
>> -static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
>> -					    arch_spinlock_t old)
>> -{
>> -	arch_spinlock_t new;
>> -
>> -	BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
>> -
>> -	/* Perform the unlock on the "before" copy */
>> -	old.tickets.head += TICKET_LOCK_INC;
>
> NB (see below)

Thanks for pointing, this solved the hang issue. I
missed this exact addition.

>
>> -
>> -	/* Clear the slowpath flag */
>> -	new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT);
>> -
>> -	/*
>> -	 * If the lock is uncontended, clear the flag - use cmpxchg in
>> -	 * case it changes behind our back though.
>> -	 */
>> -	if (new.tickets.head != new.tickets.tail ||
>> -	    cmpxchg(&lock->head_tail, old.head_tail,
>> -					new.head_tail) != old.head_tail) {
>> -		/*
>> -		 * Lock still has someone queued for it, so wake up an
>> -		 * appropriate waiter.
>> -		 */
>> -		__ticket_unlock_kick(lock, old.tickets.head);
>> -	}
>> -}
>> -
>>   static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
>>   {
>>   	if (TICKET_SLOWPATH_FLAG &&
>> -	    static_key_false(&paravirt_ticketlocks_enabled)) {
>> -		arch_spinlock_t prev;
>> +		static_key_false(&paravirt_ticketlocks_enabled)) {
>> +		__ticket_t prev_head;
>>
>> -		prev = *lock;
>> +		prev_head = lock->tickets.head;
>>   		add_smp(&lock->tickets.head, TICKET_LOCK_INC);
>>
>>   		/* add_smp() is a full mb() */
>>
>> -		if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
>> -			__ticket_unlock_slowpath(lock, prev);
>> +		if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG)) {
>
> So we're OK with still having a ("speculative"?) read-after-unlock here?
> I guess the only way to avoid it is to make the add_smp an xadd, but
> that's pretty expensive even compared to a locked add (at least last
> time I checked, which was at least a couple of microarchitectures ago).
> An unlocked add followed by lfence should also do the trick, but that
> was also much worse in practice.

So we have 3 choices,
1. xadd
2. continue with current approach.
3. a read before unlock and also after that.

>
>> +			BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
>> +			__ticket_unlock_kick(lock, prev_head);
>
> Should be "prev_head + TICKET_LOCK_INC" to match the previous code,
> otherwise it won't find the CPU waiting for the new head.

Yes it is :)


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Peter Zijlstra Feb. 9, 2015, 12:02 p.m. UTC | #10
On Mon, Feb 09, 2015 at 03:04:22PM +0530, Raghavendra K T wrote:
> So we have 3 choices,
> 1. xadd
> 2. continue with current approach.
> 3. a read before unlock and also after that.

For the truly paranoid we have probe_kernel_address(), suppose the lock
was in module space and the module just got unloaded under us.


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Raghavendra K T Feb. 9, 2015, 12:52 p.m. UTC | #11
On 02/09/2015 05:32 PM, Peter Zijlstra wrote:
> On Mon, Feb 09, 2015 at 03:04:22PM +0530, Raghavendra K T wrote:
>> So we have 3 choices,
>> 1. xadd
>> 2. continue with current approach.
>> 3. a read before unlock and also after that.
>
> For the truly paranoid we have probe_kernel_address(), suppose the lock
> was in module space and the module just got unloaded under us.
>

Thanks.. Good idea, How costly is it?
atleast we could do probe_kernel_address() and check the value of
slowpath flag if people as us to address invalid read problem.

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Linus Torvalds Feb. 10, 2015, 12:53 a.m. UTC | #12
On Mon, Feb 9, 2015 at 4:02 AM, Peter Zijlstra <peterz@infradead.org> wrote:
> On Mon, Feb 09, 2015 at 03:04:22PM +0530, Raghavendra K T wrote:
>> So we have 3 choices,
>> 1. xadd
>> 2. continue with current approach.
>> 3. a read before unlock and also after that.
>
> For the truly paranoid we have probe_kernel_address(), suppose the lock
> was in module space and the module just got unloaded under us.

That's much too expensive.

The xadd shouldn't be noticeably more expensive than the current
"add_smp()". Yes, "lock xadd" used to be several cycles slower than
just "lock add" on some early cores, but I think these days it's down
to a single-cycle difference, which is not really different from doing
a separate load after the add.

The real problem with xadd used to be that we always had to do magic
special-casing for i386, but that's one of the reasons we dropped
support for original 80386.

So I think Raghavendra's last version (which hopefully fixes the
lockup problem that Sasha reported) together with changing that

        add_smp(&lock->tickets.head, TICKET_LOCK_INC);
        if (READ_ONCE(lock->tickets.tail) & TICKET_SLOWPATH_FLAG) ..

into something like

        val = xadd((&lock->ticket.head_tail, TICKET_LOCK_INC << TICKET_SHIFT);
        if (unlikely(val & TICKET_SLOWPATH_FLAG)) ...

would be the right thing to do. Somebody should just check that I got
that shift right, and that the tail is in the high bytes (head really
needs to be high to work, if it's in the low byte(s) the xadd would
overflow from head into tail which would be wrong).

                     Linus
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Raghavendra K T Feb. 10, 2015, 9:30 a.m. UTC | #13
On 02/10/2015 06:23 AM, Linus Torvalds wrote:
> On Mon, Feb 9, 2015 at 4:02 AM, Peter Zijlstra <peterz@infradead.org> wrote:
>> On Mon, Feb 09, 2015 at 03:04:22PM +0530, Raghavendra K T wrote:
>>> So we have 3 choices,
>>> 1. xadd
>>> 2. continue with current approach.
>>> 3. a read before unlock and also after that.
>>
>> For the truly paranoid we have probe_kernel_address(), suppose the lock
>> was in module space and the module just got unloaded under us.
>
> That's much too expensive.
>
> The xadd shouldn't be noticeably more expensive than the current
> "add_smp()". Yes, "lock xadd" used to be several cycles slower than
> just "lock add" on some early cores, but I think these days it's down
> to a single-cycle difference, which is not really different from doing
> a separate load after the add.
>
> The real problem with xadd used to be that we always had to do magic
> special-casing for i386, but that's one of the reasons we dropped
> support for original 80386.
>
> So I think Raghavendra's last version (which hopefully fixes the
> lockup problem that Sasha reported) together with changing that

V2 did pass the stress, but getting confirmation Sasha would help.


>          add_smp(&lock->tickets.head, TICKET_LOCK_INC);
>          if (READ_ONCE(lock->tickets.tail) & TICKET_SLOWPATH_FLAG) ..
>
> into something like
>
>          val = xadd((&lock->ticket.head_tail, TICKET_LOCK_INC << TICKET_SHIFT);
>          if (unlikely(val & TICKET_SLOWPATH_FLAG)) ...
>
> would be the right thing to do. Somebody should just check that I got
> that shift right, and that the tail is in the high bytes (head really
> needs to be high to work, if it's in the low byte(s) the xadd would
> overflow from head into tail which would be wrong).

Unfortunately xadd could result in head overflow as tail is high.

The other option was repeated cmpxchg which is bad I believe.
Any suggestions?

( I have the V3 with Oleg's suggestion and performance numbers but
without this getting resolved, It will be one unnecessary iteration).

How about getting rid off SLOW_PATH_FLAG in spinlock (i.e. use it only
  as hint for paravirt), but do unlock_kick whenever we see that
(tail-head) > TICKET_LOCK_INC?. (but this also may need cmpxchg in loop
in unlock but we will be able to get rid of clear slowpath logic)

Only problem is we may do unnecessary kicks even in 1x load.








--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Denys Vlasenko Feb. 10, 2015, 1:18 p.m. UTC | #14
On Tue, Feb 10, 2015 at 10:30 AM, Raghavendra K T
<raghavendra.kt@linux.vnet.ibm.com> wrote:
> On 02/10/2015 06:23 AM, Linus Torvalds wrote:
>>          add_smp(&lock->tickets.head, TICKET_LOCK_INC);
>>          if (READ_ONCE(lock->tickets.tail) & TICKET_SLOWPATH_FLAG) ..
>>
>> into something like
>>
>>          val = xadd((&lock->ticket.head_tail, TICKET_LOCK_INC <<
>> TICKET_SHIFT);
>>          if (unlikely(val & TICKET_SLOWPATH_FLAG)) ...
>>
>> would be the right thing to do. Somebody should just check that I got
>> that shift right, and that the tail is in the high bytes (head really
>> needs to be high to work, if it's in the low byte(s) the xadd would
>> overflow from head into tail which would be wrong).
>
> Unfortunately xadd could result in head overflow as tail is high.

xadd can overflow, but is this really a problem?

# define HEAD_MASK (TICKET_SLOWPATH_FLAG-1)

...
unlock_again:

val = xadd((&lock->ticket.head_tail, TICKET_LOCK_INC);
if (unlikely(!(val & HEAD_MASK))) {
    /* overflow. we inadvertently incremented the tail word.
     * tail's lsb is TICKET_SLOWPATH_FLAG.
     * Increment inverted this bit, fix it up.
     * (inc _may_ have messed up tail counter too,
     * will deal with it after kick.)
    */
    val ^= TICKET_SLOWPATH_FLAG;
}

if (unlikely(val & TICKET_SLOWPATH_FLAG)) {
    ...kick the waiting task...

   val -= TICKET_SLOWPATH_FLAG;
   if (unlikely(!(val & HEAD_MASK))) {
        /* overflow. we inadvertently incremented tail word, *and*
         * TICKET_SLOWPATH_FLAG was set, increment overflowed
         * that bit too and incremented tail counter.
         * This means we (inadvertently) taking the lock again!
         * Oh well. Take it, and unlock it again...
         */
        while (1) {
            if (READ_ONCE(lock->tickets.head) != TICKET_TAIL(val))
                cpu_relax();
        }
        goto unlock_again;
}


Granted, this looks ugly.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Denys Vlasenko Feb. 10, 2015, 1:20 p.m. UTC | #15
On Tue, Feb 10, 2015 at 2:18 PM, Denys Vlasenko
<vda.linux@googlemail.com> wrote:
>         while (1) {
>             if (READ_ONCE(lock->tickets.head) != TICKET_TAIL(val))
>                 cpu_relax();
>         }

Doh.... should be

         while (READ_ONCE(lock->tickets.head) != TICKET_TAIL(val)
             cpu_relax();
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Sasha Levin Feb. 10, 2015, 1:23 p.m. UTC | #16
On 02/10/2015 04:30 AM, Raghavendra K T wrote:
>>
>> So I think Raghavendra's last version (which hopefully fixes the
>> lockup problem that Sasha reported) together with changing that
> 
> V2 did pass the stress, but getting confirmation Sasha would help.

I've been running it for the last two days, and didn't see any lockups
or other strange behaviour aside from some invalid reads which we
expected.


Thanks,
Sasha
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Oleg Nesterov Feb. 10, 2015, 2:24 p.m. UTC | #17
On 02/10, Denys Vlasenko wrote:
>
> # define HEAD_MASK (TICKET_SLOWPATH_FLAG-1)
>
> ...
> unlock_again:
>
> val = xadd((&lock->ticket.head_tail, TICKET_LOCK_INC);
> if (unlikely(!(val & HEAD_MASK))) {
>     /* overflow. we inadvertently incremented the tail word.
>      * tail's lsb is TICKET_SLOWPATH_FLAG.
>      * Increment inverted this bit, fix it up.
>      * (inc _may_ have messed up tail counter too,
>      * will deal with it after kick.)
>     */
>     val ^= TICKET_SLOWPATH_FLAG;
> }
>
> if (unlikely(val & TICKET_SLOWPATH_FLAG)) {
>     ...kick the waiting task...
>
>    val -= TICKET_SLOWPATH_FLAG;
>    if (unlikely(!(val & HEAD_MASK))) {
>         /* overflow. we inadvertently incremented tail word, *and*
>          * TICKET_SLOWPATH_FLAG was set, increment overflowed
>          * that bit too and incremented tail counter.
>          * This means we (inadvertently) taking the lock again!
>          * Oh well. Take it, and unlock it again...
>          */
>         while (1) {
>             if (READ_ONCE(lock->tickets.head) != TICKET_TAIL(val))
>                 cpu_relax();
>         }
>         goto unlock_again;
> }
>
>
> Granted, this looks ugly.

complicated ;)

But "Take it, and unlock it again" simply can't work, this can deadlock.
Note that unlock() can be called after successful try_lock(). And other
problems with lock-ordering, like

	lock(X);
	lock(Y);
	
	unlock(X);

Oleg.

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 625660f..0829f86 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -49,6 +49,23 @@  static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
 	set_bit(0, (volatile unsigned long *)&lock->tickets.tail);
 }
 
+static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock)
+{
+	arch_spinlock_t old, new;
+	__ticket_t diff;
+
+	old.tickets = READ_ONCE(lock->tickets);
+	diff = (old.tickets.tail & ~TICKET_SLOWPATH_FLAG) - old.tickets.head;
+
+	/* try to clear slowpath flag when there are no contenders */
+	if ((old.tickets.tail & TICKET_SLOWPATH_FLAG) &&
+		(diff == TICKET_LOCK_INC)) {
+		new = old;
+		new.tickets.tail &= ~TICKET_SLOWPATH_FLAG;
+		cmpxchg(&lock->head_tail, old.head_tail, new.head_tail);
+	}
+}
+
 #else  /* !CONFIG_PARAVIRT_SPINLOCKS */
 static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
 							__ticket_t ticket)
@@ -59,6 +76,10 @@  static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
 {
 }
 
+static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock)
+{
+}
+
 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
 
 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
@@ -84,7 +105,7 @@  static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
 	register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC };
 
 	inc = xadd(&lock->tickets, inc);
-	if (likely(inc.head == inc.tail))
+	if (likely(inc.head == (inc.tail & ~TICKET_SLOWPATH_FLAG)))
 		goto out;
 
 	inc.tail &= ~TICKET_SLOWPATH_FLAG;
@@ -98,7 +119,10 @@  static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
 		} while (--count);
 		__ticket_lock_spinning(lock, inc.tail);
 	}
-out:	barrier();	/* make sure nothing creeps before the lock is taken */
+out:
+	__ticket_check_and_clear_slowpath(lock);
+
+	barrier();	/* make sure nothing creeps before the lock is taken */
 }
 
 static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
@@ -115,47 +139,21 @@  static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
 	return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
 }
 
-static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
-					    arch_spinlock_t old)
-{
-	arch_spinlock_t new;
-
-	BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
-
-	/* Perform the unlock on the "before" copy */
-	old.tickets.head += TICKET_LOCK_INC;
-
-	/* Clear the slowpath flag */
-	new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT);
-
-	/*
-	 * If the lock is uncontended, clear the flag - use cmpxchg in
-	 * case it changes behind our back though.
-	 */
-	if (new.tickets.head != new.tickets.tail ||
-	    cmpxchg(&lock->head_tail, old.head_tail,
-					new.head_tail) != old.head_tail) {
-		/*
-		 * Lock still has someone queued for it, so wake up an
-		 * appropriate waiter.
-		 */
-		__ticket_unlock_kick(lock, old.tickets.head);
-	}
-}
-
 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
 	if (TICKET_SLOWPATH_FLAG &&
-	    static_key_false(&paravirt_ticketlocks_enabled)) {
-		arch_spinlock_t prev;
+		static_key_false(&paravirt_ticketlocks_enabled)) {
+		__ticket_t prev_head;
 
-		prev = *lock;
+		prev_head = lock->tickets.head;
 		add_smp(&lock->tickets.head, TICKET_LOCK_INC);
 
 		/* add_smp() is a full mb() */
 
-		if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
-			__ticket_unlock_slowpath(lock, prev);
+		if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG)) {
+			BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
+			__ticket_unlock_kick(lock, prev_head);
+		}
 	} else
 		__add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
 }
@@ -164,7 +162,7 @@  static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 {
 	struct __raw_tickets tmp = READ_ONCE(lock->tickets);
 
-	return tmp.tail != tmp.head;
+	return (tmp.tail & ~TICKET_SLOWPATH_FLAG) != tmp.head;
 }
 
 static inline int arch_spin_is_contended(arch_spinlock_t *lock)