diff mbox

[V1,1/4] KVM: x86/vPMU: Define kvm_pmu_ops to support vPMU function dispatch

Message ID 1414771534-29411-2-git-send-email-wei@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Wei Huang Oct. 31, 2014, 4:05 p.m. UTC
This patch defines a new function pointer struct (kvm_pmu_ops)
to support vPMU for both Intel and AMD. The functions of this
new struct are well self-explaned by their names. In the
meanwhile the struct that maps from event_sel bits to
PERF_TYPE_HARDWARE events is moved from Intel specific code
to kvm_host.h as a common struct now.

Signed-off-by: Wei Huang <wei@redhat.com>
---
 arch/x86/include/asm/kvm_host.h | 35 +++++++++++++++++++++++++++++------
 arch/x86/kvm/pmu.c              | 21 ++++++++-------------
 2 files changed, 37 insertions(+), 19 deletions(-)
diff mbox

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 6ed0c30..e085cf8 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -299,6 +299,12 @@  struct kvm_mmu {
 	u64 pdptrs[4]; /* pae */
 };
 
+struct msr_data {
+	bool host_initiated;
+	u32 index;
+	u64 data;
+};
+
 enum pmc_type {
 	KVM_PMC_GP = 0,
 	KVM_PMC_FIXED,
@@ -313,6 +319,29 @@  struct kvm_pmc {
 	struct kvm_vcpu *vcpu;
 };
 
+/* mapping from event selection to PERF_TYPE_HARDWARE events */
+struct kvm_event_hw_type_mapping {
+	u8 eventsel;
+	u8 unit_mask;
+	unsigned event_type;
+	bool inexact;
+};
+
+struct kvm_pmu_ops {
+	void (*pmu_cpuid_update)(struct kvm_vcpu *vcpu);
+	int (*pmu_check_pmc)(struct kvm_vcpu *vcpu, unsigned pmc);
+	int (*pmu_read_pmc)(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
+	int (*pmu_set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
+	int (*pmu_get_msr)(struct kvm_vcpu *vcpu, u32 index, u64 *data);
+	bool (*is_pmu_msr)(struct kvm_vcpu *vcpu, u32 msr);
+	void (*pmu_deliver_pmi)(struct kvm_vcpu *vcpu);
+	void (*pmu_handle_event)(struct kvm_vcpu *vcpu);
+	void (*pmu_reset)(struct kvm_vcpu *vcpu);
+	void (*pmu_init)(struct kvm_vcpu *vcpu);
+	void (*pmu_destroy)(struct kvm_vcpu *vcpu);
+};
+
+/* used by both Intel and AMD; but some fields are only applicable to Intel. */
 struct kvm_pmu {
 	unsigned nr_arch_gp_counters;
 	unsigned nr_arch_fixed_counters;
@@ -653,12 +682,6 @@  struct kvm_vcpu_stat {
 
 struct x86_instruction_info;
 
-struct msr_data {
-	bool host_initiated;
-	u32 index;
-	u64 data;
-};
-
 struct kvm_x86_ops {
 	int (*cpu_has_kvm_support)(void);          /* __init */
 	int (*disabled_by_bios)(void);             /* __init */
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 8e6b7d8..b8c7db7 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -20,12 +20,7 @@ 
 #include "cpuid.h"
 #include "lapic.h"
 
-static struct kvm_arch_event_perf_mapping {
-	u8 eventsel;
-	u8 unit_mask;
-	unsigned event_type;
-	bool inexact;
-} arch_events[] = {
+struct kvm_event_hw_type_mapping intel_arch_events[] = {
 	/* Index must match CPUID 0x0A.EBX bit vector */
 	[0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
 	[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
@@ -37,7 +32,7 @@  static struct kvm_arch_event_perf_mapping {
 	[7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
 };
 
-/* mapping between fixed pmc index and arch_events array */
+/* mapping between fixed pmc index and intel_arch_events array */
 int fixed_pmc_events[] = {1, 0, 7};
 
 static bool pmc_is_gp(struct kvm_pmc *pmc)
@@ -202,16 +197,16 @@  static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select,
 {
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(arch_events); i++)
-		if (arch_events[i].eventsel == event_select
-				&& arch_events[i].unit_mask == unit_mask
+	for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
+		if (intel_arch_events[i].eventsel == event_select
+				&& intel_arch_events[i].unit_mask == unit_mask
 				&& (pmu->available_event_types & (1 << i)))
 			break;
 
-	if (i == ARRAY_SIZE(arch_events))
+	if (i == ARRAY_SIZE(intel_arch_events))
 		return PERF_COUNT_HW_MAX;
 
-	return arch_events[i].event_type;
+	return intel_arch_events[i].event_type;
 }
 
 static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
@@ -265,7 +260,7 @@  static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
 		return;
 
 	reprogram_counter(pmc, PERF_TYPE_HARDWARE,
-			arch_events[fixed_pmc_events[idx]].event_type,
+			intel_arch_events[fixed_pmc_events[idx]].event_type,
 			!(en & 0x2), /* exclude user */
 			!(en & 0x1), /* exclude kernel */
 			pmi, false, false);