@@ -7,6 +7,5 @@ generic-y += parport.h
generic-y += spinlock_types.h
generic-y += qrwlock.h
generic-y += qrwlock_types.h
-generic-y += qspinlock.h
generic-y += user.h
generic-y += vmlinux.lds.h
@@ -26,4 +26,24 @@ int __init pv_time_init(void);
#endif // CONFIG_PARAVIRT
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+
+void pv_wait(u8 *ptr, u8 val);
+void pv_kick(int cpu);
+
+void dummy_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+void dummy_queued_spin_unlock(struct qspinlock *lock);
+
+DECLARE_STATIC_CALL(pv_queued_spin_lock_slowpath, dummy_queued_spin_lock_slowpath);
+DECLARE_STATIC_CALL(pv_queued_spin_unlock, dummy_queued_spin_unlock);
+
+void __init pv_qspinlock_init(void);
+
+static inline bool pv_is_native_spin_unlock(void)
+{
+ return false;
+}
+
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+
#endif
new file mode 100644
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_QSPINLOCK_H
+#define _ASM_RISCV_QSPINLOCK_H
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#include <asm/paravirt.h>
+
+/* How long a lock should spin before we consider blocking */
+#define SPIN_THRESHOLD (1 << 15)
+
+void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+void __pv_init_lock_hash(void);
+void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+
+static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+{
+ static_call(pv_queued_spin_lock_slowpath)(lock, val);
+}
+
+#define queued_spin_unlock queued_spin_unlock
+static inline void queued_spin_unlock(struct qspinlock *lock)
+{
+ static_call(pv_queued_spin_unlock)(lock);
+}
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+
+#include <asm-generic/qspinlock.h>
+
+#endif /* _ASM_RISCV_QSPINLOCK_H */
new file mode 100644
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_QSPINLOCK_PARAVIRT_H
+#define _ASM_RISCV_QSPINLOCK_PARAVIRT_H
+
+void __pv_queued_spin_unlock(struct qspinlock *lock);
+
+#endif /* _ASM_RISCV_QSPINLOCK_PARAVIRT_H */
@@ -39,7 +39,7 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
#undef arch_spin_trylock
#undef arch_spin_unlock
-#include <asm-generic/qspinlock.h>
+#include <asm/qspinlock.h>
#include <asm/hwcap.h>
#undef arch_spin_is_locked
@@ -130,3 +130,53 @@ int __init pv_time_init(void)
return 0;
}
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#include <asm/qspinlock_paravirt.h>
+
+void pv_kick(int cpu)
+{
+ return;
+}
+
+void pv_wait(u8 *ptr, u8 val)
+{
+ unsigned long flags;
+
+ if (in_nmi())
+ return;
+
+ local_irq_save(flags);
+ if (READ_ONCE(*ptr) != val)
+ goto out;
+
+ /* wait_for_interrupt(); */
+out:
+ local_irq_restore(flags);
+}
+
+static void native_queued_spin_unlock(struct qspinlock *lock)
+{
+ smp_store_release(&lock->locked, 0);
+}
+
+DEFINE_STATIC_CALL(pv_queued_spin_lock_slowpath, native_queued_spin_lock_slowpath);
+DEFINE_STATIC_CALL(pv_queued_spin_unlock, native_queued_spin_unlock);
+EXPORT_SYMBOL(__SCK__pv_queued_spin_lock_slowpath);
+EXPORT_SYMBOL(__SCK__pv_queued_spin_unlock);
+
+void __init pv_qspinlock_init(void)
+{
+ if (num_possible_cpus() == 1)
+ return;
+
+ if(sbi_get_firmware_id() != SBI_EXT_BASE_IMPL_ID_KVM)
+ return;
+
+ pr_info("PV qspinlocks enabled\n");
+ __pv_init_lock_hash();
+
+ static_call_update(pv_queued_spin_lock_slowpath, __pv_queued_spin_lock_slowpath);
+ static_call_update(pv_queued_spin_unlock, __pv_queued_spin_unlock);
+}
+#endif
@@ -329,6 +329,9 @@ void __init setup_arch(char **cmdline_p)
void __init arch_cpu_finalize_init(void)
{
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+ pv_qspinlock_init();
+#endif
virt_spin_lock_init();
}