@@ -373,6 +373,54 @@ static bool __hyp_text __hyp_switch_fpsimd(struct kvm_vcpu *vcpu)
return true;
}
+static bool __hyp_text __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
+{
+ struct kvm_host_data *host;
+ struct kvm_pmu_events *pmu;
+ u32 clr, set;
+
+ host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+ pmu = &host->pmu_events;
+
+ /* We can potentially avoid a sysreg write by only changing bits that
+ * differ between the guest/host. E.g. where events are enabled in
+ * both guest and host
+ */
+ clr = pmu->events_host & ~pmu->events_guest;
+ set = pmu->events_guest & ~pmu->events_host;
+
+ if (clr)
+ write_sysreg(clr, pmcntenclr_el0);
+
+ if (set)
+ write_sysreg(set, pmcntenset_el0);
+
+ return (clr || set);
+}
+
+static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
+{
+ struct kvm_host_data *host;
+ struct kvm_pmu_events *pmu;
+ u32 clr, set;
+
+ host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+ pmu = &host->pmu_events;
+
+ /* We can potentially avoid a sysreg write by only changing bits that
+ * differ between the guest/host. E.g. where events are enabled in
+ * both guest and host
+ */
+ clr = pmu->events_guest & ~pmu->events_host;
+ set = pmu->events_host & ~pmu->events_guest;
+
+ if (clr)
+ write_sysreg(clr, pmcntenclr_el0);
+
+ if (set)
+ write_sysreg(set, pmcntenset_el0);
+}
+
/*
* Return true when we were able to fixup the guest exit and should return to
* the guest, false when we should restore the host state and return to the
@@ -488,12 +536,15 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
+ bool pmu_switch_needed;
u64 exit_code;
host_ctxt = vcpu->arch.host_cpu_context;
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
+ pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
+
sysreg_save_host_state_vhe(host_ctxt);
__activate_traps(vcpu);
@@ -524,6 +575,9 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
__debug_switch_to_host(vcpu);
+ if (pmu_switch_needed)
+ __pmu_switch_to_host(host_ctxt);
+
return exit_code;
}
@@ -532,6 +586,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
+ bool pmu_switch_needed;
u64 exit_code;
vcpu = kern_hyp_va(vcpu);
@@ -540,6 +595,8 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
+ pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
+
__sysreg_save_state_nvhe(host_ctxt);
__activate_traps(vcpu);
@@ -586,6 +643,9 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
*/
__debug_switch_to_host(vcpu);
+ if (pmu_switch_needed)
+ __pmu_switch_to_host(host_ctxt);
+
return exit_code;
}