@@ -205,6 +205,11 @@ struct kvm_cpu_context {
struct kvm_vcpu *__hyp_running_vcpu;
};
+struct kvm_pmu_events {
+ u32 events_host;
+ u32 events_guest;
+};
+
struct kvm_host_data {
struct kvm_cpu_context host_ctxt;
struct kvm_pmu_events pmu_events;
@@ -472,11 +477,33 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
+#define KVM_PMU_EVENTS_HOST 1
+#define KVM_PMU_EVENTS_GUEST 2
+
#ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
{
return kvm_arch_vcpu_run_map_fp(vcpu);
}
+static inline void kvm_set_pmu_events(u32 set, int flags)
+{
+ struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data);
+
+ if (flags & KVM_PMU_EVENTS_HOST)
+ ctx->pmu_events.events_host |= set;
+ if (flags & KVM_PMU_EVENTS_GUEST)
+ ctx->pmu_events.events_guest |= set;
+}
+static inline void kvm_clr_pmu_events(u32 clr)
+{
+ struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data);
+
+ ctx->pmu_events.events_host &= ~clr;
+ ctx->pmu_events.events_guest &= ~clr;
+}
+#else
+static inline void kvm_set_pmu_events(u32 set, int flags) {}
+static inline void kvm_clr_pmu_events(u32 clr) {}
#endif
static inline void kvm_arm_vhe_guest_enter(void)
In order to effeciently enable/disable guest/host only perf counters at guest entry/exit we add bitfields to kvm_cpu_context for guest and host events as well as accessors for updating them. Signed-off-by: Andrew Murray <andrew.murray@arm.com> --- arch/arm64/include/asm/kvm_host.h | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+)