@@ -800,6 +800,24 @@ config PARAVIRT_SPINLOCKS
If you are unsure how to answer this question, answer Y.
+config NUMA_AWARE_SPINLOCKS
+ bool "Numa-aware spinlocks"
+ depends on NUMA
+ depends on QUEUED_SPINLOCKS
+ depends on 64BIT
+ # For now, we depend on PARAVIRT_SPINLOCKS to make the patching work.
+ depends on PARAVIRT_SPINLOCKS
+ default y
+ help
+ Introduce NUMA (Non Uniform Memory Access) awareness into
+ the slow path of spinlocks.
+
+ In this variant of qspinlock, the kernel will try to keep the lock
+ on the same node, thus reducing the number of remote cache misses,
+ while trading some of the short term fairness for better performance.
+
+ Say N if you want absolute first come first serve fairness.
+
endmenu # "Kernel features"
menu "Boot options"
@@ -12,6 +12,11 @@ void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
void __pv_init_lock_hash(void);
void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+#ifdef CONFIG_NUMA_AWARE_SPINLOCKS
+bool cna_configure_spin_lock_slowpath(void);
+void __cna_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+#endif
+
static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
{
static_call(pv_queued_spin_lock_slowpath)(lock, val);
@@ -193,8 +193,10 @@ void __init pv_qspinlock_init(void)
if (num_possible_cpus() == 1)
return;
- if(sbi_get_firmware_id() != SBI_EXT_BASE_IMPL_ID_KVM)
+ if(sbi_get_firmware_id() != SBI_EXT_BASE_IMPL_ID_KVM) {
+ goto cna_qspinlock;
return;
+ }
if (!sbi_probe_extension(SBI_EXT_PVLOCK))
return;
@@ -204,5 +206,13 @@ void __init pv_qspinlock_init(void)
static_call_update(pv_queued_spin_lock_slowpath, __pv_queued_spin_lock_slowpath);
static_call_update(pv_queued_spin_unlock, __pv_queued_spin_unlock);
+ return;
+
+cna_qspinlock:
+#ifdef CONFIG_NUMA_AWARE_SPINLOCKS
+ if (cna_configure_spin_lock_slowpath())
+ static_call_update(pv_queued_spin_lock_slowpath,
+ __cna_queued_spin_lock_slowpath);
+#endif
}
#endif