@@ -333,6 +333,15 @@ config NODES_SHIFT
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accommodate various tables.
+config RISCV_USE_QUEUED_SPINLOCKS
+ bool "Using queued spinlock instead of ticket-lock"
+ depends on SMP && MMU
+ select ARCH_USE_QUEUED_SPINLOCKS
+ default y
+ help
+ Make sure your micro arch LL/SC has a strong forward progress guarantee.
+ Otherwise, stay at ticket-lock.
+
config RISCV_ALTERNATIVE
bool
depends on !XIP_KERNEL
@@ -2,7 +2,9 @@
generic-y += early_ioremap.h
generic-y += flat.h
generic-y += kvm_para.h
+generic-y += mcs_spinlock.h
generic-y += parport.h
+generic-y += qspinlock.h
generic-y += spinlock.h
generic-y += spinlock_types.h
generic-y += qrwlock.h
@@ -17,6 +17,23 @@
__typeof__(new) __new = (new); \
__typeof__(*(ptr)) __ret; \
switch (size) { \
+ case 2: { \
+ u32 temp; \
+ u32 shif = ((ulong)__ptr & 2) ? 16 : 0; \
+ u32 mask = 0xffff << shif; \
+ __ptr = (__typeof__(ptr))((ulong)__ptr & ~(ulong)2); \
+ __asm__ __volatile__ ( \
+ "0: lr.w %0, %2\n" \
+ " and %1, %0, %z3\n" \
+ " or %1, %1, %z4\n" \
+ " sc.w %1, %1, %2\n" \
+ " bnez %1, 0b\n" \
+ : "=&r" (__ret), "=&r" (temp), "+A" (*__ptr) \
+ : "rJ" (~mask), "rJ" (__new << shif) \
+ : "memory"); \
+ __ret = (__ret & mask) >> shif; \
+ break; \
+ } \
case 4: \
__asm__ __volatile__ ( \
" amoswap.w %0, %2, %1\n" \
@@ -295,6 +295,10 @@ void __init setup_arch(char **cmdline_p)
setup_smp();
#endif
+#if !defined(CONFIG_NUMA) && defined(CONFIG_QUEUED_SPINLOCKS)
+ static_branch_disable(&use_qspinlock_key);
+#endif
+
riscv_fill_hwcap();
apply_boot_alternatives();
}