@@ -239,6 +239,7 @@ enum vcpu_sysreg {
/* Statistical Profiling Extension Registers. */
PMSCR_EL1, /* Statistical Profiling Control Register */
+ PMSNEVFR_EL1, /* Sampling Inverted Event Filter Register */
PMSICR_EL1, /* Sampling Interval Counter Register */
PMSIRR_EL1, /* Sampling Interval Reload Register */
PMSFCR_EL1, /* Sampling Filter Control Register */
@@ -28,6 +28,9 @@ int kvm_spe_vcpu_first_run_init(struct kvm_vcpu *vcpu);
void kvm_spe_write_sysreg(struct kvm_vcpu *vcpu, int reg, u64 val);
u64 kvm_spe_read_sysreg(struct kvm_vcpu *vcpu, int reg);
+void kvm_spe_vcpu_load(struct kvm_vcpu *vcpu);
+void kvm_spe_vcpu_put(struct kvm_vcpu *vcpu);
+
int kvm_spe_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
int kvm_spe_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
int kvm_spe_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
@@ -45,6 +48,9 @@ static inline int kvm_spe_vcpu_first_run_init(struct kvm_vcpu *vcpu) { return -E
static inline void kvm_spe_write_sysreg(struct kvm_vcpu *vcpu, int reg, u64 val) {}
static inline u64 kvm_spe_read_sysreg(struct kvm_vcpu *vcpu, int reg) { return 0; }
+static inline void kvm_spe_vcpu_load(struct kvm_vcpu *vcpu) {}
+static inline void kvm_spe_vcpu_put(struct kvm_vcpu *vcpu) {}
+
static inline int kvm_spe_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
@@ -921,6 +921,7 @@
#define ID_AA64DFR0_PMSVER_8_2 0x1
#define ID_AA64DFR0_PMSVER_8_3 0x2
+#define ID_AA64DFR0_PMSVER_8_7 0x3
#define ID_DFR0_PERFMON_SHIFT 24
@@ -466,6 +466,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (vcpu_has_ptrauth(vcpu))
vcpu_ptrauth_disable(vcpu);
kvm_arch_vcpu_load_debug_state_flags(vcpu);
+ kvm_spe_vcpu_load(vcpu);
if (!cpumask_empty(&vcpu->arch.supported_cpus) &&
!cpumask_test_cpu(smp_processor_id(), &vcpu->arch.supported_cpus))
@@ -474,6 +475,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
+ kvm_spe_vcpu_put(vcpu);
kvm_arch_vcpu_put_debug_state_flags(vcpu);
kvm_arch_vcpu_put_fp(vcpu);
if (has_vhe())
@@ -67,6 +67,35 @@ u64 kvm_spe_read_sysreg(struct kvm_vcpu *vcpu, int reg)
return __vcpu_sys_reg(vcpu, reg);
}
+static unsigned int kvm_spe_get_pmsver(void)
+{
+ u64 dfr0 = read_sysreg(id_aa64dfr0_el1);
+
+ return cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVER_SHIFT);
+}
+
+void kvm_spe_vcpu_load(struct kvm_vcpu *vcpu)
+{
+ if (!kvm_vcpu_has_spe(vcpu))
+ return;
+
+ if (kvm_spe_get_pmsver() < ID_AA64DFR0_PMSVER_8_7)
+ return;
+
+ write_sysreg_s(__vcpu_sys_reg(vcpu, PMSNEVFR_EL1), SYS_PMSNEVFR_EL1);
+}
+
+void kvm_spe_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ if (!kvm_vcpu_has_spe(vcpu))
+ return;
+
+ if (kvm_spe_get_pmsver() < ID_AA64DFR0_PMSVER_8_7)
+ return;
+
+ __vcpu_sys_reg(vcpu, PMSNEVFR_EL1) = read_sysreg_s(SYS_PMSNEVFR_EL1);
+}
+
static bool kvm_vcpu_supports_spe(struct kvm_vcpu *vcpu)
{
if (!kvm_supports_spe())
@@ -1562,6 +1562,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
{ SPE_SYS_REG(SYS_PMSCR_EL1), .reg = PMSCR_EL1 },
+ { SPE_SYS_REG(SYS_PMSNEVFR_EL1), .reg = PMSNEVFR_EL1 },
{ SPE_SYS_REG(SYS_PMSICR_EL1), .reg = PMSICR_EL1 },
{ SPE_SYS_REG(SYS_PMSIRR_EL1), .reg = PMSIRR_EL1 },
{ SPE_SYS_REG(SYS_PMSFCR_EL1), .reg = PMSFCR_EL1 },
FEAT_SPEv1p2 introduced a new register, PMSNEVFR_EL1. The SPE driver is not using the register, so save the register to the guest context on vcpu_put() and restore it on vcpu_load() since it will not be touched by the host, and the value programmed by the guest doesn't affect the host. Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com> --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/include/asm/kvm_spe.h | 6 ++++++ arch/arm64/include/asm/sysreg.h | 1 + arch/arm64/kvm/arm.c | 2 ++ arch/arm64/kvm/spe.c | 29 +++++++++++++++++++++++++++++ arch/arm64/kvm/sys_regs.c | 1 + 6 files changed, 40 insertions(+)