@@ -127,6 +127,9 @@ The pseudo-firmware bitmap register are as follows:
Bit-1: KVM_REG_ARM_VENDOR_HYP_BIT_PTP:
The bit represents the Precision Time Protocol KVM service.
+ Bit-2: KVM_REG_ARM_VENDOR_HYP_BIT_PV_LOCK:
+ The bit represents the Paravirtualized lock service.
+
Errors:
======= =============================================================
@@ -501,6 +501,11 @@ struct kvm_vcpu_arch {
u64 last_steal;
gpa_t base;
} steal;
+
+ /* Guest PV lock state */
+ struct {
+ gpa_t base;
+ } pv_lock;
};
/*
@@ -924,6 +929,19 @@ static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
return (vcpu_arch->steal.base != GPA_INVALID);
}
+static inline void kvm_arm_pvlock_preempted_init(struct kvm_vcpu_arch *vcpu_arch)
+{
+ vcpu_arch->pv_lock.base = GPA_INVALID;
+}
+
+static inline bool kvm_arm_is_pvlock_preempted_ready(struct kvm_vcpu_arch *vcpu_arch)
+{
+ return (vcpu_arch->pv_lock.base != GPA_INVALID);
+}
+
+gpa_t kvm_init_pvlock(struct kvm_vcpu *vcpu);
+void kvm_update_pvlock_preempted(struct kvm_vcpu *vcpu, u64 preempted);
+
void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
@@ -366,6 +366,7 @@ enum {
enum {
KVM_REG_ARM_VENDOR_HYP_BIT_FUNC_FEAT = 0,
KVM_REG_ARM_VENDOR_HYP_BIT_PTP = 1,
+ KVM_REG_ARM_VENDOR_HYP_BIT_PV_LOCK = 2,
#ifdef __KERNEL__
KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_COUNT,
#endif
@@ -10,7 +10,7 @@ include $(srctree)/virt/kvm/Makefile.kvm
obj-$(CONFIG_KVM) += kvm.o
obj-$(CONFIG_KVM) += hyp/
-kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
+kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o pvlock.o \
inject_fault.o va_layout.o handle_exit.o \
guest.o debug.o reset.o sys_regs.o stacktrace.o \
vgic-sys-reg-v3.o fpsimd.o pkvm.o \
@@ -357,6 +357,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
kvm_arm_pvtime_vcpu_init(&vcpu->arch);
+ kvm_arm_pvlock_preempted_init(&vcpu->arch);
+
vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
err = kvm_vgic_vcpu_init(vcpu);
@@ -432,6 +434,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (vcpu_has_ptrauth(vcpu))
vcpu_ptrauth_disable(vcpu);
+
+ if (kvm_arm_is_pvlock_preempted_ready(&vcpu->arch))
+ kvm_update_pvlock_preempted(vcpu, 0);
+
kvm_arch_vcpu_load_debug_state_flags(vcpu);
if (!cpumask_test_cpu(smp_processor_id(), vcpu->kvm->arch.supported_cpus))
@@ -445,6 +451,8 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
if (has_vhe())
kvm_vcpu_put_sysregs_vhe(vcpu);
kvm_timer_vcpu_put(vcpu);
+ if (kvm_arm_is_pvlock_preempted_ready(&vcpu->arch))
+ kvm_update_pvlock_preempted(vcpu, 1);
kvm_vgic_put(vcpu);
kvm_vcpu_pmu_restore_host(vcpu);
kvm_arm_vmid_clear_active();
@@ -116,6 +116,9 @@ static bool kvm_hvc_call_allowed(struct kvm_vcpu *vcpu, u32 func_id)
case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID:
return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_PTP,
&smccc_feat->vendor_hyp_bmap);
+ case ARM_SMCCC_VENDOR_HYP_KVM_PV_LOCK_FUNC_ID:
+ return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_PV_LOCK,
+ &smccc_feat->vendor_hyp_bmap);
default:
return kvm_hvc_call_default_allowed(func_id);
}
@@ -201,6 +204,11 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
if (gpa != GPA_INVALID)
val[0] = gpa;
break;
+ case ARM_SMCCC_VENDOR_HYP_KVM_PV_LOCK_FUNC_ID:
+ gpa = kvm_init_pvlock(vcpu);
+ if (gpa != GPA_INVALID)
+ val[0] = gpa;
+ break;
case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID:
val[0] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0;
val[1] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1;
new file mode 100644
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2019 Huawei Technologies Co., Ltd
+ * Author: Zengruan Ye <yezengruan@huawei.com>
+ * Usama Arif <usama.arif@bytedance.com>
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/kvm_host.h>
+
+#include <asm/pvlock-abi.h>
+
+#include <kvm/arm_hypercalls.h>
+
+gpa_t kvm_init_pvlock(struct kvm_vcpu *vcpu)
+{
+ struct pvlock_vcpu_state init_values = {};
+ struct kvm *kvm = vcpu->kvm;
+ u64 base = vcpu->arch.pv_lock.base;
+ int idx;
+
+ if (base == GPA_INVALID)
+ return base;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ kvm_write_guest(kvm, base, &init_values, sizeof(init_values));
+ srcu_read_unlock(&kvm->srcu, idx);
+
+ return base;
+}
+
+void kvm_update_pvlock_preempted(struct kvm_vcpu *vcpu, u64 preempted)
+{
+ int idx;
+ u64 offset;
+ struct kvm *kvm = vcpu->kvm;
+ u64 base = vcpu->arch.pv_lock.base;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ offset = offsetof(struct pvlock_vcpu_state, preempted);
+ kvm_put_guest(kvm, base + offset, cpu_to_le64(preempted));
+ srcu_read_unlock(&kvm->srcu, idx);
+}
@@ -365,6 +365,7 @@ enum {
enum {
KVM_REG_ARM_VENDOR_HYP_BIT_FUNC_FEAT = 0,
KVM_REG_ARM_VENDOR_HYP_BIT_PTP = 1,
+ KVM_REG_ARM_VENDOR_HYP_BIT_PV_LOCK = 2,
#ifdef __KERNEL__
KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_COUNT,
#endif