Message ID | 20230910082911.3378782-2-guoren@kernel.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | riscv: Add Native/Paravirt qspinlock support | expand |
On Sun, 2023-09-10 at 04:28 -0400, guoren@kernel.org wrote: > From: Guo Ren <guoren@linux.alibaba.com> > > The arch_spinlock_t of qspinlock has contained the atomic_t val, which > satisfies the ticket-lock requirement. Thus, unify the arch_spinlock_t > into qspinlock_types.h. This is the preparation for the next combo > spinlock. > > Signed-off-by: Guo Ren <guoren@kernel.org> > Signed-off-by: Guo Ren <guoren@linux.alibaba.com> > --- > include/asm-generic/spinlock.h | 14 +++++++------- > include/asm-generic/spinlock_types.h | 12 ++---------- > 2 files changed, 9 insertions(+), 17 deletions(-) > > diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h > index 90803a826ba0..4773334ee638 100644 > --- a/include/asm-generic/spinlock.h > +++ b/include/asm-generic/spinlock.h > @@ -32,7 +32,7 @@ > > static __always_inline void arch_spin_lock(arch_spinlock_t *lock) > { > - u32 val = atomic_fetch_add(1<<16, lock); > + u32 val = atomic_fetch_add(1<<16, &lock->val); > u16 ticket = val >> 16; > > if (ticket == (u16)val) > @@ -46,31 +46,31 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock) > * have no outstanding writes due to the atomic_fetch_add() the extra > * orderings are free. > */ > - atomic_cond_read_acquire(lock, ticket == (u16)VAL); > + atomic_cond_read_acquire(&lock->val, ticket == (u16)VAL); > smp_mb(); > } > > static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock) > { > - u32 old = atomic_read(lock); > + u32 old = atomic_read(&lock->val); > > if ((old >> 16) != (old & 0xffff)) > return false; > > - return atomic_try_cmpxchg(lock, &old, old + (1<<16)); /* SC, for RCsc */ > + return atomic_try_cmpxchg(&lock->val, &old, old + (1<<16)); /* SC, for RCsc */ > } > > static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) > { > u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); > - u32 val = atomic_read(lock); > + u32 val = atomic_read(&lock->val); > > smp_store_release(ptr, (u16)val + 1); > } > > static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) > { > - u32 val = lock.counter; > + u32 val = lock.val.counter; > > return ((val >> 16) == (val & 0xffff)); > } This one seems to be different in torvalds/master, but I suppose it's because of the requirement patches I have not merged. > @@ -84,7 +84,7 @@ static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock) > > static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock) > { > - u32 val = atomic_read(lock); > + u32 val = atomic_read(&lock->val); > > return (s16)((val >> 16) - (val & 0xffff)) > 1; > } > diff --git a/include/asm-generic/spinlock_types.h b/include/asm-generic/spinlock_types.h > index 8962bb730945..f534aa5de394 100644 > --- a/include/asm-generic/spinlock_types.h > +++ b/include/asm-generic/spinlock_types.h > @@ -3,15 +3,7 @@ > #ifndef __ASM_GENERIC_SPINLOCK_TYPES_H > #define __ASM_GENERIC_SPINLOCK_TYPES_H > > -#include <linux/types.h> > -typedef atomic_t arch_spinlock_t; > - > -/* > - * qrwlock_types depends on arch_spinlock_t, so we must typedef that before the > - * include. > - */ > -#include <asm/qrwlock_types.h> > - > -#define __ARCH_SPIN_LOCK_UNLOCKED ATOMIC_INIT(0) > +#include <asm-generic/qspinlock_types.h> > +#include <asm-generic/qrwlock_types.h> > > #endif /* __ASM_GENERIC_SPINLOCK_TYPES_H */ FWIW, LGTM: Reviewed-by: Leonardo Bras <leobras@redhat.com> Just a suggestion: In this patch I could see a lot of usage changes to arch_spinlock_t, and only at the end I could see the actual change in the .h file. In cases like this, it looks nicer to see the .h file first. I recently found out about this git diff.orderFile option, which helps to achieve exactly this. I use the following git.orderfile, adapted from qemu: ############################################################################ # # order file for git, to produce patches which are easier to review # by diffing the important stuff like interface changes first. # # one-off usage: # git diff -O scripts/git.orderfile ... # # add to git config: # git config diff.orderFile scripts/git.orderfile # MAINTAINERS # Documentation Documentation/* *.rst *.rst.inc # build system Kbuild Makefile* *.mak # semantic patches *.cocci # headers *.h *.h.inc # code *.c *.c.inc
On Tue, Sep 12, 2023 at 3:05 AM Leonardo Brás <leobras@redhat.com> wrote: > > On Sun, 2023-09-10 at 04:28 -0400, guoren@kernel.org wrote: > > From: Guo Ren <guoren@linux.alibaba.com> > > > > The arch_spinlock_t of qspinlock has contained the atomic_t val, which > > satisfies the ticket-lock requirement. Thus, unify the arch_spinlock_t > > into qspinlock_types.h. This is the preparation for the next combo > > spinlock. > > > > Signed-off-by: Guo Ren <guoren@kernel.org> > > Signed-off-by: Guo Ren <guoren@linux.alibaba.com> > > --- > > include/asm-generic/spinlock.h | 14 +++++++------- > > include/asm-generic/spinlock_types.h | 12 ++---------- > > 2 files changed, 9 insertions(+), 17 deletions(-) > > > > diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h > > index 90803a826ba0..4773334ee638 100644 > > --- a/include/asm-generic/spinlock.h > > +++ b/include/asm-generic/spinlock.h > > @@ -32,7 +32,7 @@ > > > > static __always_inline void arch_spin_lock(arch_spinlock_t *lock) > > { > > - u32 val = atomic_fetch_add(1<<16, lock); > > + u32 val = atomic_fetch_add(1<<16, &lock->val); > > u16 ticket = val >> 16; > > > > if (ticket == (u16)val) > > @@ -46,31 +46,31 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock) > > * have no outstanding writes due to the atomic_fetch_add() the extra > > * orderings are free. > > */ > > - atomic_cond_read_acquire(lock, ticket == (u16)VAL); > > + atomic_cond_read_acquire(&lock->val, ticket == (u16)VAL); > > smp_mb(); > > } > > > > static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock) > > { > > - u32 old = atomic_read(lock); > > + u32 old = atomic_read(&lock->val); > > > > if ((old >> 16) != (old & 0xffff)) > > return false; > > > > - return atomic_try_cmpxchg(lock, &old, old + (1<<16)); /* SC, for RCsc */ > > + return atomic_try_cmpxchg(&lock->val, &old, old + (1<<16)); /* SC, for RCsc */ > > } > > > > static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) > > { > > u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); > > - u32 val = atomic_read(lock); > > + u32 val = atomic_read(&lock->val); > > > > smp_store_release(ptr, (u16)val + 1); > > } > > > > static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) > > { > > - u32 val = lock.counter; > > + u32 val = lock.val.counter; > > > > return ((val >> 16) == (val & 0xffff)); > > } > > This one seems to be different in torvalds/master, but I suppose it's because of > the requirement patches I have not merged. > > > @@ -84,7 +84,7 @@ static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock) > > > > static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock) > > { > > - u32 val = atomic_read(lock); > > + u32 val = atomic_read(&lock->val); > > > > return (s16)((val >> 16) - (val & 0xffff)) > 1; > > } > > diff --git a/include/asm-generic/spinlock_types.h b/include/asm-generic/spinlock_types.h > > index 8962bb730945..f534aa5de394 100644 > > --- a/include/asm-generic/spinlock_types.h > > +++ b/include/asm-generic/spinlock_types.h > > @@ -3,15 +3,7 @@ > > #ifndef __ASM_GENERIC_SPINLOCK_TYPES_H > > #define __ASM_GENERIC_SPINLOCK_TYPES_H > > > > -#include <linux/types.h> > > -typedef atomic_t arch_spinlock_t; > > - > > -/* > > - * qrwlock_types depends on arch_spinlock_t, so we must typedef that before the > > - * include. > > - */ > > -#include <asm/qrwlock_types.h> > > - > > -#define __ARCH_SPIN_LOCK_UNLOCKED ATOMIC_INIT(0) > > +#include <asm-generic/qspinlock_types.h> > > +#include <asm-generic/qrwlock_types.h> > > > > #endif /* __ASM_GENERIC_SPINLOCK_TYPES_H */ > > FWIW, LGTM: > > Reviewed-by: Leonardo Bras <leobras@redhat.com> > > > Just a suggestion: In this patch I could see a lot of usage changes to > arch_spinlock_t, and only at the end I could see the actual change in the .h > file. include/asm-generic/spinlock.h | 14 +++++++------- include/asm-generic/spinlock_types.h | 12 ++---------- All are .h files. So, how to use git.orderfile? > > In cases like this, it looks nicer to see the .h file first. > > I recently found out about this git diff.orderFile option, which helps to > achieve exactly this. > > I use the following git.orderfile, adapted from qemu: > > ############################################################################ > # > # order file for git, to produce patches which are easier to review > # by diffing the important stuff like interface changes first. > # > # one-off usage: > # git diff -O scripts/git.orderfile ... > # > # add to git config: > # git config diff.orderFile scripts/git.orderfile > # > > MAINTAINERS > > # Documentation > Documentation/* > *.rst > *.rst.inc > > # build system > Kbuild > Makefile* > *.mak > > # semantic patches > *.cocci > > # headers > *.h > *.h.inc > > # code > *.c > *.c.inc > >
On Wed, Sep 13, 2023 at 09:55:31AM +0800, Guo Ren wrote: > On Tue, Sep 12, 2023 at 3:05 AM Leonardo Brás <leobras@redhat.com> wrote: > > > > On Sun, 2023-09-10 at 04:28 -0400, guoren@kernel.org wrote: > > > From: Guo Ren <guoren@linux.alibaba.com> > > > > > > The arch_spinlock_t of qspinlock has contained the atomic_t val, which > > > satisfies the ticket-lock requirement. Thus, unify the arch_spinlock_t > > > into qspinlock_types.h. This is the preparation for the next combo > > > spinlock. > > > > > > Signed-off-by: Guo Ren <guoren@kernel.org> > > > Signed-off-by: Guo Ren <guoren@linux.alibaba.com> > > > --- > > > include/asm-generic/spinlock.h | 14 +++++++------- > > > include/asm-generic/spinlock_types.h | 12 ++---------- > > > 2 files changed, 9 insertions(+), 17 deletions(-) > > > > > > diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h > > > index 90803a826ba0..4773334ee638 100644 > > > --- a/include/asm-generic/spinlock.h > > > +++ b/include/asm-generic/spinlock.h > > > @@ -32,7 +32,7 @@ > > > > > > static __always_inline void arch_spin_lock(arch_spinlock_t *lock) > > > { > > > - u32 val = atomic_fetch_add(1<<16, lock); > > > + u32 val = atomic_fetch_add(1<<16, &lock->val); > > > u16 ticket = val >> 16; > > > > > > if (ticket == (u16)val) > > > @@ -46,31 +46,31 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock) > > > * have no outstanding writes due to the atomic_fetch_add() the extra > > > * orderings are free. > > > */ > > > - atomic_cond_read_acquire(lock, ticket == (u16)VAL); > > > + atomic_cond_read_acquire(&lock->val, ticket == (u16)VAL); > > > smp_mb(); > > > } > > > > > > static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock) > > > { > > > - u32 old = atomic_read(lock); > > > + u32 old = atomic_read(&lock->val); > > > > > > if ((old >> 16) != (old & 0xffff)) > > > return false; > > > > > > - return atomic_try_cmpxchg(lock, &old, old + (1<<16)); /* SC, for RCsc */ > > > + return atomic_try_cmpxchg(&lock->val, &old, old + (1<<16)); /* SC, for RCsc */ > > > } > > > > > > static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) > > > { > > > u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); > > > - u32 val = atomic_read(lock); > > > + u32 val = atomic_read(&lock->val); > > > > > > smp_store_release(ptr, (u16)val + 1); > > > } > > > > > > static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) > > > { > > > - u32 val = lock.counter; > > > + u32 val = lock.val.counter; > > > > > > return ((val >> 16) == (val & 0xffff)); > > > } > > > > This one seems to be different in torvalds/master, but I suppose it's because of > > the requirement patches I have not merged. > > > > > @@ -84,7 +84,7 @@ static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock) > > > > > > static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock) > > > { > > > - u32 val = atomic_read(lock); > > > + u32 val = atomic_read(&lock->val); > > > > > > return (s16)((val >> 16) - (val & 0xffff)) > 1; > > > } > > > diff --git a/include/asm-generic/spinlock_types.h b/include/asm-generic/spinlock_types.h > > > index 8962bb730945..f534aa5de394 100644 > > > --- a/include/asm-generic/spinlock_types.h > > > +++ b/include/asm-generic/spinlock_types.h > > > @@ -3,15 +3,7 @@ > > > #ifndef __ASM_GENERIC_SPINLOCK_TYPES_H > > > #define __ASM_GENERIC_SPINLOCK_TYPES_H > > > > > > -#include <linux/types.h> > > > -typedef atomic_t arch_spinlock_t; > > > - > > > -/* > > > - * qrwlock_types depends on arch_spinlock_t, so we must typedef that before the > > > - * include. > > > - */ > > > -#include <asm/qrwlock_types.h> > > > - > > > -#define __ARCH_SPIN_LOCK_UNLOCKED ATOMIC_INIT(0) > > > +#include <asm-generic/qspinlock_types.h> > > > +#include <asm-generic/qrwlock_types.h> > > > > > > #endif /* __ASM_GENERIC_SPINLOCK_TYPES_H */ > > > > FWIW, LGTM: > > > > Reviewed-by: Leonardo Bras <leobras@redhat.com> > > > > > > Just a suggestion: In this patch I could see a lot of usage changes to > > arch_spinlock_t, and only at the end I could see the actual change in the .h > > file. > include/asm-generic/spinlock.h | 14 +++++++------- > include/asm-generic/spinlock_types.h | 12 ++---------- > > All are .h files. So, how to use git.orderfile? Yeap, you are right. For some reason I got confused about seeing functions before type definition. But in any way, we can get the same result with: *types.h *.h *.c Meaning 'spinlock_types.h' will appear before 'spinlock.h'. After first suggesting this, I also sent a patch providing a default orderFile for the kernel, and I also added this to the latest version: https://lore.kernel.org/all/20230913075550.90934-2-leobras@redhat.com/ > > > > > In cases like this, it looks nicer to see the .h file first. > > > > I recently found out about this git diff.orderFile option, which helps to > > achieve exactly this. > > > > I use the following git.orderfile, adapted from qemu: > > > > ############################################################################ > > # > > # order file for git, to produce patches which are easier to review > > # by diffing the important stuff like interface changes first. > > # > > # one-off usage: > > # git diff -O scripts/git.orderfile ... > > # > > # add to git config: > > # git config diff.orderFile scripts/git.orderfile > > # > > > > MAINTAINERS > > > > # Documentation > > Documentation/* > > *.rst > > *.rst.inc > > > > # build system > > Kbuild > > Makefile* > > *.mak > > > > # semantic patches > > *.cocci > > > > # headers > > *.h > > *.h.inc > > > > # code > > *.c > > *.c.inc > > > > > > > -- > Best Regards > Guo Ren >
diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h index 90803a826ba0..4773334ee638 100644 --- a/include/asm-generic/spinlock.h +++ b/include/asm-generic/spinlock.h @@ -32,7 +32,7 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock) { - u32 val = atomic_fetch_add(1<<16, lock); + u32 val = atomic_fetch_add(1<<16, &lock->val); u16 ticket = val >> 16; if (ticket == (u16)val) @@ -46,31 +46,31 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock) * have no outstanding writes due to the atomic_fetch_add() the extra * orderings are free. */ - atomic_cond_read_acquire(lock, ticket == (u16)VAL); + atomic_cond_read_acquire(&lock->val, ticket == (u16)VAL); smp_mb(); } static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock) { - u32 old = atomic_read(lock); + u32 old = atomic_read(&lock->val); if ((old >> 16) != (old & 0xffff)) return false; - return atomic_try_cmpxchg(lock, &old, old + (1<<16)); /* SC, for RCsc */ + return atomic_try_cmpxchg(&lock->val, &old, old + (1<<16)); /* SC, for RCsc */ } static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) { u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); - u32 val = atomic_read(lock); + u32 val = atomic_read(&lock->val); smp_store_release(ptr, (u16)val + 1); } static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) { - u32 val = lock.counter; + u32 val = lock.val.counter; return ((val >> 16) == (val & 0xffff)); } @@ -84,7 +84,7 @@ static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock) static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock) { - u32 val = atomic_read(lock); + u32 val = atomic_read(&lock->val); return (s16)((val >> 16) - (val & 0xffff)) > 1; } diff --git a/include/asm-generic/spinlock_types.h b/include/asm-generic/spinlock_types.h index 8962bb730945..f534aa5de394 100644 --- a/include/asm-generic/spinlock_types.h +++ b/include/asm-generic/spinlock_types.h @@ -3,15 +3,7 @@ #ifndef __ASM_GENERIC_SPINLOCK_TYPES_H #define __ASM_GENERIC_SPINLOCK_TYPES_H -#include <linux/types.h> -typedef atomic_t arch_spinlock_t; - -/* - * qrwlock_types depends on arch_spinlock_t, so we must typedef that before the - * include. - */ -#include <asm/qrwlock_types.h> - -#define __ARCH_SPIN_LOCK_UNLOCKED ATOMIC_INIT(0) +#include <asm-generic/qspinlock_types.h> +#include <asm-generic/qrwlock_types.h> #endif /* __ASM_GENERIC_SPINLOCK_TYPES_H */