@@ -360,8 +360,6 @@ struct kvm_vcpu_arch {
struct {
/* {Break,watch}point registers */
struct kvm_guest_debug_arch regs;
- /* Statistical profiling extension */
- u64 pmscr_el1;
/* Self-hosted trace */
u64 trfcr_el1;
} host_debug_state;
@@ -84,8 +84,10 @@ void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
void __debug_switch_to_host(struct kvm_vcpu *vcpu);
#ifdef __KVM_NVHE_HYPERVISOR__
-void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu);
-void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
+void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu,
+ struct kvm_cpu_context *host_ctxt);
+void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu,
+ struct kvm_cpu_context *host_ctxt);
#endif
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
@@ -81,11 +81,12 @@ static void __debug_restore_trace(u64 trfcr_el1)
write_sysreg_s(trfcr_el1, SYS_TRFCR_EL1);
}
-void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
+void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu,
+ struct kvm_cpu_context *host_ctxt)
{
/* Disable and flush SPE data generation */
if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_SPE)
- __debug_save_spe(&vcpu->arch.host_debug_state.pmscr_el1);
+ __debug_save_spe(__ctxt_sys_reg(host_ctxt, PMSCR_EL1));
/* Disable and flush Self-Hosted Trace generation */
if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_TRBE)
__debug_save_trace(&vcpu->arch.host_debug_state.trfcr_el1);
@@ -96,10 +97,11 @@ void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
__debug_switch_to_guest_common(vcpu);
}
-void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
+void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu,
+ struct kvm_cpu_context *host_ctxt)
{
if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_SPE)
- __debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
+ __debug_restore_spe(ctxt_sys_reg(host_ctxt, PMSCR_EL1));
if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_TRBE)
__debug_restore_trace(vcpu->arch.host_debug_state.trfcr_el1);
}
@@ -290,7 +290,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
* translation regime to EL2 (via MDCR_EL2_E2PB == 0) and
* before we load guest Stage1.
*/
- __debug_save_host_buffers_nvhe(vcpu);
+ __debug_save_host_buffers_nvhe(vcpu, host_ctxt);
__kvm_adjust_pc(vcpu);
@@ -342,7 +342,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
* This must come after restoring the host sysregs, since a non-VHE
* system may enable SPE here and make use of the TTBRs.
*/
- __debug_restore_host_buffers_nvhe(vcpu);
+ __debug_restore_host_buffers_nvhe(vcpu, host_ctxt);
if (pmu_switch_needed)
__pmu_switch_to_host(host_ctxt);
The SPE registers are now part of the KVM register context, use the host context to save the value of PMSCR_EL1 instead of a dedicated field in host_debug_state. Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com> --- arch/arm64/include/asm/kvm_host.h | 2 -- arch/arm64/include/asm/kvm_hyp.h | 6 ++++-- arch/arm64/kvm/hyp/nvhe/debug-sr.c | 10 ++++++---- arch/arm64/kvm/hyp/nvhe/switch.c | 4 ++-- 4 files changed, 12 insertions(+), 10 deletions(-)