@@ -31,9 +31,13 @@ static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
*/
void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
{
- struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
+ struct kvm_host_data *ctx;
- if (!kvm_arm_support_pmu_v3() || !ctx || !kvm_pmu_switch_needed(attr))
+ if (!kvm_arm_support_pmu_v3())
+ return;
+
+ ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
+ if (!ctx || !kvm_pmu_switch_needed(attr))
return;
if (!attr->exclude_host)
@@ -47,9 +51,13 @@ void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
*/
void kvm_clr_pmu_events(u32 clr)
{
- struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
+ struct kvm_host_data *ctx;
+
+ if (!kvm_arm_support_pmu_v3())
+ return;
- if (!kvm_arm_support_pmu_v3() || !ctx)
+ ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
+ if (!ctx)
return;
ctx->pmu_events.events_host &= ~clr;