@@ -20,6 +20,7 @@ config RISCV
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_HAS_BINFMT_FLAT
select ARCH_HAS_CURRENT_STACK_POINTER
+ select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEBUG_WX
@@ -51,6 +51,13 @@ enum sbi_ext_base_fid {
SBI_EXT_BASE_GET_MIMPID,
};
+enum sbi_ext_base_impl_id {
+ SBI_EXT_BASE_IMPL_ID_BBL = 0,
+ SBI_EXT_BASE_IMPL_ID_OPENSBI,
+ SBI_EXT_BASE_IMPL_ID_XVISOR,
+ SBI_EXT_BASE_IMPL_ID_KVM,
+};
+
enum sbi_ext_time_fid {
SBI_EXT_TIME_SET_TIMER = 0,
};
@@ -286,6 +293,7 @@ int sbi_console_getchar(void);
long sbi_get_mvendorid(void);
long sbi_get_marchid(void);
long sbi_get_mimpid(void);
+long sbi_get_firmware_id(void);
void sbi_set_timer(uint64_t stime_value);
void sbi_shutdown(void);
void sbi_send_ipi(unsigned int cpu);
@@ -4,6 +4,28 @@
#define __ASM_RISCV_SPINLOCK_H
#ifdef CONFIG_QUEUED_SPINLOCKS
+/*
+ * The KVM guests fall back to a Test-and-Set spinlock, because fair locks
+ * have horrible lock 'holder' preemption issues. The virt_spin_lock_key
+ * would shortcut for the queued_spin_lock_slowpath() function that allow
+ * virt_spin_lock to hijack it.
+ */
+DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
+
+#define virt_spin_lock virt_spin_lock
+static inline bool virt_spin_lock(struct qspinlock *lock)
+{
+ if (!static_branch_likely(&virt_spin_lock_key))
+ return false;
+
+ do {
+ while (atomic_read(&lock->val) != 0)
+ cpu_relax();
+ } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
+
+ return true;
+}
+
#define _Q_PENDING_LOOPS (1 << 9)
#endif
@@ -21,6 +21,7 @@
#include <asm/hwcap.h>
#include <asm/patch.h>
#include <asm/processor.h>
+#include <asm/sbi.h>
#include <asm/vector.h>
#define NUM_ALPHA_EXTS ('z' - 'a' + 1)
@@ -343,7 +344,8 @@ void __init riscv_fill_hwcap(void)
* ticket_spinlock, but can not be vice.
*/
if (!force_qspinlock &&
- !riscv_has_errata_thead_qspinlock()) {
+ !riscv_has_errata_thead_qspinlock() &&
+ (sbi_get_firmware_id() != SBI_EXT_BASE_IMPL_ID_KVM)) {
set_bit(RISCV_ISA_EXT_XTICKETLOCK, isainfo->isa);
}
#endif
@@ -555,7 +555,7 @@ static inline long sbi_get_spec_version(void)
return __sbi_base_ecall(SBI_EXT_BASE_GET_SPEC_VERSION);
}
-static inline long sbi_get_firmware_id(void)
+long sbi_get_firmware_id(void)
{
return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_ID);
}
@@ -26,6 +26,7 @@
#include <asm/alternative.h>
#include <asm/cacheflush.h>
#include <asm/cpu_ops.h>
+#include <asm/cpufeature.h>
#include <asm/early_ioremap.h>
#include <asm/pgtable.h>
#include <asm/setup.h>
@@ -264,6 +265,19 @@ static void __init parse_dtb(void)
#endif
}
+#ifdef CONFIG_QUEUED_SPINLOCKS
+DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
+
+static void __init virt_spin_lock_init(void)
+{
+ if (sbi_get_firmware_id() != SBI_EXT_BASE_IMPL_ID_KVM ||
+ force_qspinlock)
+ static_branch_disable(&virt_spin_lock_key);
+}
+#else
+static void __init virt_spin_lock_init(void) {}
+#endif
+
extern void __init init_rt_signal_env(void);
void __init setup_arch(char **cmdline_p)
@@ -313,6 +327,11 @@ void __init setup_arch(char **cmdline_p)
riscv_noncoherent_supported();
}
+void __init arch_cpu_finalize_init(void)
+{
+ virt_spin_lock_init();
+}
+
static int __init topology_init(void)
{
int i, ret;