@@ -132,10 +132,14 @@ int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
void __timer_enable_traps(struct kvm_vcpu *vcpu);
void __timer_disable_traps(struct kvm_vcpu *vcpu);
-void __sysreg_save_host_state(struct kvm_cpu_context *ctxt);
-void __sysreg_restore_host_state(struct kvm_cpu_context *ctxt);
-void __sysreg_save_guest_state(struct kvm_cpu_context *ctxt);
-void __sysreg_restore_guest_state(struct kvm_cpu_context *ctxt);
+void __sysreg_save_host_state_nvhe(struct kvm_cpu_context *ctxt);
+void __sysreg_restore_host_state_nvhe(struct kvm_cpu_context *ctxt);
+void __sysreg_save_guest_state_nvhe(struct kvm_cpu_context *ctxt);
+void __sysreg_restore_guest_state_nvhe(struct kvm_cpu_context *ctxt);
+void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt);
+void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt);
+void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt);
+void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
void __sysreg32_save_state(struct kvm_vcpu *vcpu);
void __sysreg32_restore_state(struct kvm_vcpu *vcpu);
@@ -352,7 +352,7 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
- __sysreg_save_host_state(host_ctxt);
+ sysreg_save_host_state_vhe(host_ctxt);
__activate_traps(vcpu);
__activate_vm(vcpu->kvm);
@@ -364,7 +364,7 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
* to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
*/
__sysreg32_restore_state(vcpu);
- __sysreg_restore_guest_state(guest_ctxt);
+ sysreg_restore_guest_state_vhe(guest_ctxt);
__debug_switch_to_guest(vcpu);
do {
@@ -374,13 +374,13 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
/* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code));
- __sysreg_save_guest_state(guest_ctxt);
+ sysreg_save_guest_state_vhe(guest_ctxt);
__sysreg32_save_state(vcpu);
__vgic_save_state(vcpu);
__deactivate_traps(vcpu);
- __sysreg_restore_host_state(host_ctxt);
+ sysreg_restore_host_state_vhe(host_ctxt);
/*
* This must come after restoring the host sysregs, since a non-VHE
@@ -404,7 +404,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
- __sysreg_save_host_state(host_ctxt);
+ __sysreg_save_host_state_nvhe(host_ctxt);
__activate_traps(vcpu);
__activate_vm(kern_hyp_va(vcpu->kvm));
@@ -417,7 +417,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
* to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
*/
__sysreg32_restore_state(vcpu);
- __sysreg_restore_guest_state(guest_ctxt);
+ __sysreg_restore_guest_state_nvhe(guest_ctxt);
__debug_switch_to_guest(vcpu);
do {
@@ -427,7 +427,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
/* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code));
- __sysreg_save_guest_state(guest_ctxt);
+ __sysreg_save_guest_state_nvhe(guest_ctxt);
__sysreg32_save_state(vcpu);
__timer_disable_traps(vcpu);
__vgic_save_state(vcpu);
@@ -435,7 +435,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
__deactivate_traps(vcpu);
__deactivate_vm(vcpu);
- __sysreg_restore_host_state(host_ctxt);
+ __sysreg_restore_host_state_nvhe(host_ctxt);
/*
* This must come after restoring the host sysregs, since a non-VHE
@@ -460,7 +460,7 @@ static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
__timer_disable_traps(vcpu);
__deactivate_traps(vcpu);
__deactivate_vm(vcpu);
- __sysreg_restore_host_state(__host_ctxt);
+ __sysreg_restore_host_state_nvhe(__host_ctxt);
}
/*
@@ -483,7 +483,7 @@ static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
vcpu = host_ctxt->__hyp_running_vcpu;
__deactivate_traps(vcpu);
- __sysreg_restore_host_state(host_ctxt);
+ sysreg_restore_host_state_vhe(host_ctxt);
panic(__hyp_panic_string,
spsr, elr,
@@ -75,15 +75,27 @@ static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr);
}
-void __hyp_text __sysreg_save_host_state(struct kvm_cpu_context *ctxt)
+void __hyp_text __sysreg_save_host_state_nvhe(struct kvm_cpu_context *ctxt)
+{
+ __sysreg_save_el1_state(ctxt);
+ __sysreg_save_common_state(ctxt);
+ __sysreg_save_user_state(ctxt);
+}
+
+void __hyp_text __sysreg_save_guest_state_nvhe(struct kvm_cpu_context *ctxt)
+{
+ __sysreg_save_el1_state(ctxt);
+ __sysreg_save_common_state(ctxt);
+ __sysreg_save_user_state(ctxt);
+}
+
+void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt)
{
- if (!has_vhe())
- __sysreg_save_el1_state(ctxt);
__sysreg_save_common_state(ctxt);
__sysreg_save_user_state(ctxt);
}
-void __hyp_text __sysreg_save_guest_state(struct kvm_cpu_context *ctxt)
+void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt)
{
__sysreg_save_el1_state(ctxt);
__sysreg_save_common_state(ctxt);
@@ -136,15 +148,27 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
}
-void __hyp_text __sysreg_restore_host_state(struct kvm_cpu_context *ctxt)
+void __hyp_text __sysreg_restore_host_state_nvhe(struct kvm_cpu_context *ctxt)
+{
+ __sysreg_restore_el1_state(ctxt);
+ __sysreg_restore_common_state(ctxt);
+ __sysreg_restore_user_state(ctxt);
+}
+
+void __hyp_text __sysreg_restore_guest_state_nvhe(struct kvm_cpu_context *ctxt)
+{
+ __sysreg_restore_el1_state(ctxt);
+ __sysreg_restore_common_state(ctxt);
+ __sysreg_restore_user_state(ctxt);
+}
+
+void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt)
{
- if (!has_vhe())
- __sysreg_restore_el1_state(ctxt);
__sysreg_restore_common_state(ctxt);
__sysreg_restore_user_state(ctxt);
}
-void __hyp_text __sysreg_restore_guest_state(struct kvm_cpu_context *ctxt)
+void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
{
__sysreg_restore_el1_state(ctxt);
__sysreg_restore_common_state(ctxt);