@@ -19,10 +19,12 @@ static inline u64 paravirt_steal_clock(int cpu)
}
int __init pv_time_init(void);
+int __init pv_lock_init(void);
#else
#define pv_time_init() do {} while (0)
+#define pv_lock_init() do {} while (0)
#endif // CONFIG_PARAVIRT
@@ -10,7 +10,20 @@
/* See include/linux/spinlock.h */
#define smp_mb__after_spinlock() smp_mb()
+#define vcpu_is_preempted vcpu_is_preempted
+
+#ifdef CONFIG_PARAVIRT
+#include <linux/static_call_types.h>
+
+bool dummy_vcpu_is_preempted(int cpu);
+DECLARE_STATIC_CALL(pv_vcpu_is_preempted, dummy_vcpu_is_preempted);
+static inline bool vcpu_is_preempted(int cpu)
+{
+ return static_call(pv_vcpu_is_preempted)(cpu);
+}
+
+#else
/*
* Changing this will break osq_lock() thanks to the call inside
* smp_cond_load_relaxed().
@@ -18,10 +31,11 @@
* See:
* https://lore.kernel.org/lkml/20200110100612.GC2827@hirez.programming.kicks-ass.net
*/
-#define vcpu_is_preempted vcpu_is_preempted
static inline bool vcpu_is_preempted(int cpu)
{
return false;
}
+#endif /* CONFIG_PARAVIRT */
+
#endif /* __ASM_SPINLOCK_H */
@@ -20,8 +20,10 @@
#include <linux/types.h>
#include <linux/static_call.h>
+#include <asm/hypervisor.h>
#include <asm/paravirt.h>
#include <asm/pvclock-abi.h>
+#include <asm/pvlock-abi.h>
#include <asm/smp_plat.h>
struct static_key paravirt_steal_enabled;
@@ -38,7 +40,12 @@ struct pv_time_stolen_time_region {
struct pvclock_vcpu_stolen_time __rcu *kaddr;
};
+struct pv_lock_state_region {
+ struct pvlock_vcpu_state __rcu *kaddr;
+};
+
static DEFINE_PER_CPU(struct pv_time_stolen_time_region, stolen_time_region);
+static DEFINE_PER_CPU(struct pv_lock_state_region, lock_state_region);
static bool steal_acc = true;
static int __init parse_no_stealacc(char *arg)
@@ -178,3 +185,108 @@ int __init pv_time_init(void)
return 0;
}
+
+static bool native_vcpu_is_preempted(int cpu)
+{
+ return false;
+}
+
+DEFINE_STATIC_CALL(pv_vcpu_is_preempted, native_vcpu_is_preempted);
+
+static bool para_vcpu_is_preempted(int cpu)
+{
+ struct pv_lock_state_region *reg;
+ __le64 preempted_le;
+
+ reg = per_cpu_ptr(&lock_state_region, cpu);
+ if (!reg->kaddr) {
+ pr_warn_once("PV lock enabled but not configured for cpu %d\n",
+ cpu);
+ return false;
+ }
+
+ preempted_le = le64_to_cpu(READ_ONCE(reg->kaddr->preempted));
+
+ return !!(preempted_le);
+}
+
+static int pvlock_vcpu_state_dying_cpu(unsigned int cpu)
+{
+ struct pv_lock_state_region *reg;
+
+ reg = this_cpu_ptr(&lock_state_region);
+ if (!reg->kaddr)
+ return 0;
+
+ memunmap(reg->kaddr);
+ memset(reg, 0, sizeof(*reg));
+
+ return 0;
+}
+
+static int init_pvlock_vcpu_state(unsigned int cpu)
+{
+ struct pv_lock_state_region *reg;
+ struct arm_smccc_res res;
+
+ reg = this_cpu_ptr(&lock_state_region);
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_PV_LOCK_FUNC_ID, &res);
+
+ if (res.a0 == SMCCC_RET_NOT_SUPPORTED) {
+ pr_warn("Failed to init PV lock data structure\n");
+ return -EINVAL;
+ }
+
+ reg->kaddr = memremap(res.a0,
+ sizeof(struct pvlock_vcpu_state),
+ MEMREMAP_WB);
+
+ if (!reg->kaddr) {
+ pr_warn("Failed to map PV lock data structure\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int kvm_arm_init_pvlock(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ARM_KVM_PVLOCK_STARTING,
+ "hypervisor/arm/pvlock:starting",
+ init_pvlock_vcpu_state,
+ pvlock_vcpu_state_dying_cpu);
+ if (ret < 0) {
+ pr_warn("PV-lock init failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool has_kvm_pvlock(void)
+{
+ return kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_PV_LOCK);
+}
+
+int __init pv_lock_init(void)
+{
+ int ret;
+
+ if (is_hyp_mode_available())
+ return 0;
+
+ if (!has_kvm_pvlock())
+ return 0;
+
+ ret = kvm_arm_init_pvlock();
+ if (ret)
+ return ret;
+
+ static_call_update(pv_vcpu_is_preempted, para_vcpu_is_preempted);
+ pr_info("using PV-lock preempted\n");
+
+ return 0;
+}
\ No newline at end of file
@@ -42,6 +42,7 @@
#include <asm/cpu_ops.h>
#include <asm/kasan.h>
#include <asm/numa.h>
+#include <asm/paravirt.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp_plat.h>
@@ -360,6 +361,8 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
smp_init_cpus();
smp_build_mpidr_hash();
+ pv_lock_init();
+
/* Init percpu seeds for random tags after cpus are set up. */
kasan_init_sw_tags();
@@ -192,6 +192,7 @@ enum cpuhp_state {
/* Must be the last timer callback */
CPUHP_AP_DUMMY_TIMER_STARTING,
CPUHP_AP_ARM_XEN_STARTING,
+ CPUHP_AP_ARM_KVM_PVLOCK_STARTING,
CPUHP_AP_ARM_CORESIGHT_STARTING,
CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
CPUHP_AP_ARM64_ISNDEP_STARTING,