@@ -574,7 +574,7 @@ void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
-DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data);
+DECLARE_KVM_HYP_PER_CPU(kvm_host_data_t, kvm_host_data);
static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
{
@@ -69,7 +69,6 @@ KVM_NVHE_ALIAS(kvm_patch_vector_branch);
KVM_NVHE_ALIAS(kvm_update_va_mask);
/* Global kernel state accessed by nVHE hyp code. */
-KVM_NVHE_ALIAS(kvm_host_data);
KVM_NVHE_ALIAS(kvm_vgic_global_state);
/* Kernel constant needed to compute idmap addresses. */
@@ -46,7 +46,6 @@
__asm__(".arch_extension virt");
#endif
-DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data);
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
/* The VMID used in the VTTBR */
@@ -1303,7 +1302,7 @@ static void cpu_hyp_reset(void)
static void cpu_hyp_reinit(void)
{
- kvm_init_host_cpu_context(&this_cpu_ptr(&kvm_host_data)->host_ctxt);
+ kvm_init_host_cpu_context(&this_cpu_ptr_hyp(kvm_host_data)->host_ctxt);
cpu_hyp_reset();
@@ -1538,7 +1537,7 @@ static int init_hyp_mode(void)
for_each_possible_cpu(cpu) {
kvm_host_data_t *cpu_data;
- cpu_data = per_cpu_ptr(&kvm_host_data, cpu);
+ cpu_data = per_cpu_ptr_hyp(kvm_host_data, cpu);
err = create_hyp_mappings(cpu_data, cpu_data + 1, PAGE_HYP);
if (err) {
@@ -30,6 +30,9 @@
/* Non-VHE copy of the kernel symbol. */
DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
+/* Non-VHE instance of kvm_host_data. */
+DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data);
+
static void __activate_traps(struct kvm_vcpu *vcpu)
{
u64 val;
@@ -28,6 +28,9 @@
const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
+/* VHE instance of kvm_host_data. */
+DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data);
+
static void __activate_traps(struct kvm_vcpu *vcpu)
{
u64 val;
@@ -31,7 +31,7 @@ static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
*/
void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
{
- struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data);
+ struct kvm_host_data *ctx = this_cpu_ptr_hyp(kvm_host_data);
if (!kvm_pmu_switch_needed(attr))
return;
@@ -47,7 +47,7 @@ void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
*/
void kvm_clr_pmu_events(u32 clr)
{
- struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data);
+ struct kvm_host_data *ctx = this_cpu_ptr_hyp(kvm_host_data);
ctx->pmu_events.events_host &= ~clr;
ctx->pmu_events.events_guest &= ~clr;
@@ -169,7 +169,7 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
if (!has_vhe())
return;
- host = this_cpu_ptr(&kvm_host_data);
+ host = this_cpu_ptr_hyp(kvm_host_data);
events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host;
@@ -188,7 +188,7 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
if (!has_vhe())
return;
- host = this_cpu_ptr(&kvm_host_data);
+ host = this_cpu_ptr_hyp(kvm_host_data);
events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host;
Host CPU context is stored in a global per-cpu variable `kvm_host_data`. In preparation for introducing independent per-CPU region for nVHE hyp, create two separate instances of `kvm_host_data`, one for VHE and one for nVHE. Signed-off-by: David Brazdil <dbrazdil@google.com> --- arch/arm64/include/asm/kvm_host.h | 2 +- arch/arm64/kernel/image-vars.h | 1 - arch/arm64/kvm/arm.c | 5 ++--- arch/arm64/kvm/hyp/nvhe/switch.c | 3 +++ arch/arm64/kvm/hyp/vhe/switch.c | 3 +++ arch/arm64/kvm/pmu.c | 8 ++++---- 6 files changed, 13 insertions(+), 9 deletions(-)