@@ -848,6 +848,8 @@ struct kvm_x86_ops {
void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t offset, unsigned long mask);
+
+ struct kvm_pmu_ops *(*get_pmu_ops)(void);
};
struct kvm_arch_async_pf {
@@ -858,6 +860,7 @@ struct kvm_arch_async_pf {
};
extern struct kvm_x86_ops *kvm_x86_ops;
+extern struct kvm_pmu_ops *kvm_pmu_ops;
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
s64 adjustment)
@@ -12,10 +12,10 @@ kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
- i8254.o ioapic.o irq_comm.o cpuid.o pmu_intel.o
+ i8254.o ioapic.o irq_comm.o cpuid.o pmu.o
kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT) += assigned-dev.o iommu.o
-kvm-intel-y += vmx.o
-kvm-amd-y += svm.o
+kvm-intel-y += vmx.o pmu_intel.o
+kvm-amd-y += svm.o pmu_amd.o
obj-$(CONFIG_KVM) += kvm.o
obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
@@ -22,6 +22,7 @@
#include "lapic.h"
#include "mmu.h"
#include "trace.h"
+#include "pmu.h"
static u32 xstate_required_size(u64 xstate_bv, bool compacted)
{
@@ -104,7 +105,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
((best->eax & 0xff00) >> 8) != 0)
return -EINVAL;
- kvm_pmu_cpuid_update(vcpu);
+ kvm_pmu_refresh(vcpu);
return 0;
}
new file mode 100644
@@ -0,0 +1,325 @@
+/*
+ * Kernel-based Virtual Machine -- Performance Monitoring Unit support
+ *
+ * Copyright 2014 Red Hat, Inc. and/or its affiliates.
+ *
+ * Authors:
+ * Avi Kivity <avi@redhat.com>
+ * Gleb Natapov <gleb@redhat.com>
+ * Wei Huang <wei@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+#include <linux/types.h>
+#include <linux/kvm_host.h>
+#include <linux/perf_event.h>
+#include <asm/perf_event.h>
+#include "x86.h"
+#include "cpuid.h"
+#include "lapic.h"
+#include "pmu.h"
+
+/* Design Note:
+ * - Each Perf counter is mainly defined as "struct kvm_pmc";
+ * - There are two types of perf counters: general purpose (gp) and fixed.
+ * gp-counters are stored in gp_counters[] and fixed-counters are stored
+ * in fixed_counters[] respectively. Both of them are part of "struct
+ * kvm_pmu";
+ * - pmu.c understands the difference between gp-counters and fixed-counters.
+ * However AMD doesn't support fixed-counters;
+ * - There are three types of index to access perf counters (PMC):
+ * 1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
+ * has MSR_K7_PERFCTRn.
+ * 2. MSR Index (named msr_idx): This normally is used by RDPMC
+ * instruction. For instance AMD RDPMC instruction uses 0000_0003h
+ * in ECX to access C001_0007h (MSR_K7_PERCTR3). Intel has a similar
+ * mechanism, except that it also supports fixed counters.
+ * 3. Global Index (named glb_idx): glb_idx is an index specific to PMU
+ * code. Each PMC has an index stored in kvm_pmc.idx field. The
+ * mapping relationship is as the following:
+ * * Intel: [0 .. INTEL_PMC_MAX_GENERIC-1] <=> gp counters
+ * [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
+ * * AMD: [0 .. AMD64_NUM_COUNTERS-1] <=> gp counters
+ */
+
+struct kvm_pmu_ops *kvm_pmu_ops;
+
+static void pmi_trigger_fn(struct irq_work *irq_work)
+{
+ struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
+ struct kvm_vcpu *vcpu = PMU_TO_VCPU(pmu);
+
+ kvm_pmu_deliver_pmi(vcpu);
+}
+
+static void kvm_perf_overflow(struct perf_event *perf_event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ struct kvm_pmc *pmc = perf_event->overflow_handler_context;
+ struct kvm_pmu *pmu = PMC_TO_PMU(pmc);
+
+ if (!test_and_set_bit(pmc->idx,
+ (unsigned long *)&pmu->reprogram_pmi)) {
+ __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
+ kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
+ }
+}
+
+static void kvm_perf_overflow_intr(struct perf_event *perf_event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ struct kvm_pmc *pmc = perf_event->overflow_handler_context;
+ struct kvm_pmu *pmu = PMC_TO_PMU(pmc);
+
+ if (!test_and_set_bit(pmc->idx,
+ (unsigned long *)&pmu->reprogram_pmi)) {
+ __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
+ kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
+
+ /*
+ * Inject PMI. If vcpu was in a guest mode during NMI PMI
+ * can be ejected on a guest mode re-entry. Otherwise we can't
+ * be sure that vcpu wasn't executing hlt instruction at the
+ * time of vmexit and is not going to re-enter guest mode until
+ * woken up. So we should wake it, but this is impossible from
+ * NMI context. Do it from irq work instead.
+ */
+ if (!kvm_is_in_guest())
+ irq_work_queue(&pmc->vcpu->arch.pmu.irq_work);
+ else
+ kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
+ }
+}
+
+static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
+ unsigned config, bool exclude_user,
+ bool exclude_kernel, bool intr,
+ bool in_tx, bool in_tx_cp)
+{
+ struct perf_event *event;
+ struct perf_event_attr attr = {
+ .type = type,
+ .size = sizeof(attr),
+ .pinned = true,
+ .exclude_idle = true,
+ .exclude_host = 1,
+ .exclude_user = exclude_user,
+ .exclude_kernel = exclude_kernel,
+ .config = config,
+ };
+
+ if (in_tx)
+ attr.config |= HSW_IN_TX;
+ if (in_tx_cp)
+ attr.config |= HSW_IN_TX_CHECKPOINTED;
+
+ attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
+
+ event = perf_event_create_kernel_counter(&attr, -1, current,
+ intr ? kvm_perf_overflow_intr:
+ kvm_perf_overflow, pmc);
+ if (IS_ERR(event)) {
+ printk_once("kvm_pmu: event creation failed %ld\n",
+ PTR_ERR(event));
+ return;
+ }
+
+ pmc->perf_event = event;
+ clear_bit(pmc->idx, (unsigned long*)&pmc->vcpu->arch.pmu.reprogram_pmi);
+}
+
+void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
+{
+ unsigned config, type = PERF_TYPE_RAW;
+ u8 event_select, unit_mask;
+
+ if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
+ printk_once("kvm pmu: pin control bit is ignored\n");
+
+ pmc->eventsel = eventsel;
+
+ pmc_stop_counter(pmc);
+
+ if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
+ return;
+
+ event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
+ unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
+
+ if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
+ ARCH_PERFMON_EVENTSEL_INV |
+ ARCH_PERFMON_EVENTSEL_CMASK |
+ HSW_IN_TX |
+ HSW_IN_TX_CHECKPOINTED))) {
+ config = kvm_pmu_ops->find_arch_event(&pmc->vcpu->arch.pmu,
+ event_select,
+ unit_mask);
+ if (config != PERF_COUNT_HW_MAX)
+ type = PERF_TYPE_HARDWARE;
+ }
+
+ if (type == PERF_TYPE_RAW)
+ config = eventsel & X86_RAW_EVENT_MASK;
+
+ pmc_reprogram_counter(pmc, type, config,
+ !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
+ !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
+ eventsel & ARCH_PERFMON_EVENTSEL_INT,
+ (eventsel & HSW_IN_TX),
+ (eventsel & HSW_IN_TX_CHECKPOINTED));
+}
+EXPORT_SYMBOL_GPL(reprogram_gp_counter);
+
+void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx)
+{
+ unsigned en_field = ctrl & 0x3;
+ bool pmi = ctrl & 0x8;
+
+ pmc_stop_counter(pmc);
+
+ if (!en_field || !pmc_is_enabled(pmc))
+ return;
+
+ pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
+ kvm_pmu_ops->find_fixed_event(fixed_idx),
+ !(en_field & 0x2), /* exclude user */
+ !(en_field & 0x1), /* exclude kernel */
+ pmi, false, false);
+}
+EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
+
+void reprogram_counter(struct kvm_pmu *pmu, int glb_idx)
+{
+ struct kvm_pmc *pmc = kvm_pmu_ops->glb_idx_to_pmc(pmu, glb_idx);
+
+ if (!pmc)
+ return;
+
+ if (pmc_is_gp(pmc))
+ reprogram_gp_counter(pmc, pmc->eventsel);
+ else {
+ int fixed_idx = glb_idx - INTEL_PMC_IDX_FIXED;
+ u8 ctrl = fixed_ctr_ctrl_field(pmu->fixed_ctr_ctrl, fixed_idx);
+
+ reprogram_fixed_counter(pmc, ctrl, fixed_idx);
+ }
+}
+EXPORT_SYMBOL_GPL(reprogram_counter);
+
+
+void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
+ u64 bitmask;
+ int bit;
+
+ bitmask = pmu->reprogram_pmi;
+
+ for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
+ struct kvm_pmc *pmc = kvm_pmu_ops->glb_idx_to_pmc(pmu, bit);
+
+ if (unlikely(!pmc || !pmc->perf_event)) {
+ clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
+ continue;
+ }
+
+ reprogram_counter(pmu, bit);
+ }
+}
+
+/* check if msr_idx is a valid index to access PMU */
+inline int kvm_pmu_check_msr_idx(struct kvm_vcpu *vcpu, unsigned msr_idx)
+{
+ return kvm_pmu_ops->check_msr_idx(vcpu, msr_idx);
+}
+
+int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned msr_idx, u64 *data)
+{
+ bool fast_mode = msr_idx & (1u << 31);
+ struct kvm_pmc *pmc;
+ u64 ctr_val;
+
+ pmc = kvm_pmu_ops->msr_idx_to_pmc(vcpu, msr_idx);
+ if (!pmc)
+ return 1;
+
+ ctr_val = pmc_read_counter(pmc);
+ if (fast_mode)
+ ctr_val = (u32)ctr_val;
+
+ *data = ctr_val;
+ return 0;
+}
+
+void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.apic)
+ kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
+}
+
+bool kvm_pmu_is_msr(struct kvm_vcpu *vcpu, u32 msr)
+{
+ return kvm_pmu_ops->is_pmu_msr(vcpu, msr);
+}
+
+int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
+{
+ return kvm_pmu_ops->get_msr(vcpu, msr, data);
+}
+
+int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+{
+ return kvm_pmu_ops->set_msr(vcpu, msr_info);
+
+}
+
+/* Refresh PMU settings. This function generally is called when underlying
+ * settings are changed (such as changes of PMU CPUID), which should rarely
+ * happen.
+ */
+void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
+{
+ kvm_pmu_ops->refresh(vcpu);
+}
+
+void kvm_pmu_reset(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
+
+ irq_work_sync(&pmu->irq_work);
+ kvm_pmu_ops->reset(vcpu);
+}
+
+void kvm_pmu_init(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
+
+ memset(pmu, 0, sizeof(*pmu));
+ kvm_pmu_ops->init(vcpu);
+ init_irq_work(&pmu->irq_work, pmi_trigger_fn);
+ kvm_pmu_refresh(vcpu);
+}
+
+void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
+{
+ kvm_pmu_reset(vcpu);
+}
+
+/* Called before using any PMU functions above */
+int kvm_pmu_arch_init(struct kvm_x86_ops *x86_ops)
+{
+ kvm_pmu_ops = x86_ops->get_pmu_ops();
+
+ if (x86_ops && (kvm_pmu_ops = x86_ops->get_pmu_ops()) != NULL)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+void kvm_pmu_arch_exit(void)
+{
+ kvm_pmu_ops = NULL;
+}
new file mode 100644
@@ -0,0 +1,98 @@
+#ifndef __KVM_X86_PMU_H
+#define __KVM_X86_PMU_H
+
+#define VCPU_TO_PMU(vcpu) (&(vcpu)->arch.pmu)
+#define PMU_TO_VCPU(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu))
+#define PMC_TO_PMU(pmc) (&(pmc)->vcpu->arch.pmu)
+
+/* retrieve the control fields of IA32_FIXED_CTR_CTRL */
+#define fixed_ctr_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
+
+static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
+{
+ struct kvm_pmu *pmu = PMC_TO_PMU(pmc);
+
+ return pmu->counter_bitmask[pmc->type];
+}
+
+static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
+{
+ u64 counter, enabled, running;
+
+ counter = pmc->counter;
+ if (pmc->perf_event)
+ counter += perf_event_read_value(pmc->perf_event,
+ &enabled, &running);
+ /* FIXME: Scaling needed? */
+ return counter & pmc_bitmask(pmc);
+}
+
+static inline void pmc_stop_counter(struct kvm_pmc *pmc)
+{
+ if (pmc->perf_event) {
+ pmc->counter = pmc_read_counter(pmc);
+ perf_event_release_kernel(pmc->perf_event);
+ pmc->perf_event = NULL;
+ }
+}
+
+static inline bool pmc_is_gp(struct kvm_pmc *pmc)
+{
+ return pmc->type == KVM_PMC_GP;
+}
+
+static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
+{
+ return pmc->type == KVM_PMC_FIXED;
+}
+
+static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
+{
+ return kvm_pmu_ops->pmc_enabled(pmc);
+}
+
+/* Returns general purpose PMC with the specified MSR. Note that it can be
+ * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
+ * paramenter to tell them apart.
+ */
+static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
+ u32 base)
+{
+ if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
+ return &pmu->gp_counters[msr - base];
+
+ return NULL;
+}
+
+/* Returns fixed PMC with the specified MSR */
+static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
+{
+ int base = MSR_CORE_PERF_FIXED_CTR0;
+
+ if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
+ return &pmu->fixed_counters[msr - base];
+
+ return NULL;
+}
+
+void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
+void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
+void reprogram_counter(struct kvm_pmu *pmu, int glb_idx);
+
+void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
+int kvm_pmu_check_msr_idx(struct kvm_vcpu *vcpu, unsigned msr_idx);
+int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
+void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
+bool kvm_pmu_is_msr(struct kvm_vcpu *vcpu, u32 msr);
+int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
+int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
+void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
+void kvm_pmu_reset(struct kvm_vcpu *vcpu);
+void kvm_pmu_init(struct kvm_vcpu *vcpu);
+void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
+int kvm_pmu_arch_init(struct kvm_x86_ops *x86_ops);
+void kvm_pmu_arch_exit(void);
+
+extern struct kvm_pmu_ops intel_pmu_ops;
+extern struct kvm_pmu_ops amd_pmu_ops;
+#endif /* __KVM_X86_PMU_H */
new file mode 100644
@@ -0,0 +1,97 @@
+/*
+ * KVM PMU support for AMD
+ *
+ * Copyright 2015, Red Hat, Inc. and/or its affiliates.
+ *
+ * Author:
+ * Wei Huang <wei@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ * Implementation is based on pmu_intel.c file
+ */
+#include <linux/types.h>
+#include <linux/kvm_host.h>
+#include <linux/perf_event.h>
+#include "x86.h"
+#include "cpuid.h"
+#include "lapic.h"
+#include "pmu.h"
+
+static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
+ u8 event_select,
+ u8 unit_mask)
+{
+ return PERF_COUNT_HW_MAX;
+}
+
+static unsigned amd_find_fixed_event(int fixed_idx)
+{
+ return PERF_COUNT_HW_MAX;
+}
+
+static bool amd_pmc_enabled(struct kvm_pmc *pmc)
+{
+ return false;
+}
+
+static struct kvm_pmc *amd_glb_idx_to_pmc(struct kvm_pmu *pmu, int glb_idx)
+{
+ return NULL;
+}
+
+/* Returns 0 if msr_idx's corresponding MSR exists; otherwise returns 1. */
+static int amd_check_msr_idx(struct kvm_vcpu *vcpu, unsigned msr_idx)
+{
+ return 1;
+}
+
+static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
+ unsigned msr_idx)
+{
+ return NULL;
+}
+
+static bool amd_is_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
+{
+ return false;
+}
+
+static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
+{
+ return 1;
+}
+
+static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+{
+ return 1;
+}
+
+static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
+{
+}
+
+static void amd_pmu_init(struct kvm_vcpu *vcpu)
+{
+}
+
+static void amd_pmu_reset(struct kvm_vcpu *vcpu)
+{
+}
+
+struct kvm_pmu_ops amd_pmu_ops = {
+ .find_arch_event = amd_find_arch_event,
+ .find_fixed_event = amd_find_fixed_event,
+ .pmc_enabled = amd_pmc_enabled,
+ .glb_idx_to_pmc = amd_glb_idx_to_pmc,
+ .check_msr_idx = amd_check_msr_idx,
+ .msr_idx_to_pmc = amd_msr_idx_to_pmc,
+ .is_pmu_msr = amd_is_pmu_msr,
+ .get_msr = amd_pmu_get_msr,
+ .set_msr = amd_pmu_set_msr,
+ .refresh = amd_pmu_refresh,
+ .init = amd_pmu_init,
+ .reset = amd_pmu_reset,
+};
+EXPORT_SYMBOL_GPL(amd_pmu_ops);
@@ -1,5 +1,5 @@
/*
- * Kernel-based Virtual Machine -- Performance Monitoring Unit support
+ * KVM PMU support for Intel CPUs
*
* Copyright 2011 Red Hat, Inc. and/or its affiliates.
*
@@ -11,7 +11,6 @@
* the COPYING file in the top-level directory.
*
*/
-
#include <linux/types.h>
#include <linux/kvm_host.h>
#include <linux/perf_event.h>
@@ -19,6 +18,7 @@
#include "x86.h"
#include "cpuid.h"
#include "lapic.h"
+#include "pmu.h"
static struct kvm_event_hw_type_mapping intel_arch_events[] = {
/* Index must match CPUID 0x0A.EBX bit vector */
@@ -33,167 +33,44 @@ static struct kvm_event_hw_type_mapping intel_arch_events[] = {
};
/* mapping between fixed pmc index and intel_arch_events array */
-int fixed_pmc_events[] = {1, 0, 7};
-
-static bool pmc_is_gp(struct kvm_pmc *pmc)
-{
- return pmc->type == KVM_PMC_GP;
-}
-
-static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
-{
- struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
-
- return pmu->counter_bitmask[pmc->type];
-}
-
-static inline bool pmc_enabled(struct kvm_pmc *pmc)
-{
- struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
- return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
-}
-
-static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
- u32 base)
-{
- if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
- return &pmu->gp_counters[msr - base];
- return NULL;
-}
+static int fixed_pmc_events[] = {1, 0, 7};
-static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
-{
- int base = MSR_CORE_PERF_FIXED_CTR0;
- if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
- return &pmu->fixed_counters[msr - base];
- return NULL;
-}
-
-static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx)
-{
- return get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + idx);
-}
-
-static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
+static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
{
- if (idx < INTEL_PMC_IDX_FIXED)
- return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0);
- else
- return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED);
-}
+ int i;
-void kvm_deliver_pmi(struct kvm_vcpu *vcpu)
-{
- if (vcpu->arch.apic)
- kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
-}
+ for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
+ u8 new_ctrl = fixed_ctr_ctrl_field(data, i);
+ u8 old_ctrl = fixed_ctr_ctrl_field(pmu->fixed_ctr_ctrl, i);
+ struct kvm_pmc *pmc;
-static void trigger_pmi(struct irq_work *irq_work)
-{
- struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu,
- irq_work);
- struct kvm_vcpu *vcpu = container_of(pmu, struct kvm_vcpu,
- arch.pmu);
+ pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
- kvm_deliver_pmi(vcpu);
-}
+ if (old_ctrl == new_ctrl)
+ continue;
-static void kvm_perf_overflow(struct perf_event *perf_event,
- struct perf_sample_data *data,
- struct pt_regs *regs)
-{
- struct kvm_pmc *pmc = perf_event->overflow_handler_context;
- struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
- if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
- __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
- kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
+ reprogram_fixed_counter(pmc, new_ctrl, i);
}
-}
-static void kvm_perf_overflow_intr(struct perf_event *perf_event,
- struct perf_sample_data *data, struct pt_regs *regs)
-{
- struct kvm_pmc *pmc = perf_event->overflow_handler_context;
- struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
- if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
- __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
- kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
- /*
- * Inject PMI. If vcpu was in a guest mode during NMI PMI
- * can be ejected on a guest mode re-entry. Otherwise we can't
- * be sure that vcpu wasn't executing hlt instruction at the
- * time of vmexit and is not going to re-enter guest mode until,
- * woken up. So we should wake it, but this is impossible from
- * NMI context. Do it from irq work instead.
- */
- if (!kvm_is_in_guest())
- irq_work_queue(&pmc->vcpu->arch.pmu.irq_work);
- else
- kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
- }
+ pmu->fixed_ctr_ctrl = data;
}
-static u64 read_pmc(struct kvm_pmc *pmc)
+/* Function is called when global control register has been updated. */
+static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
{
- u64 counter, enabled, running;
-
- counter = pmc->counter;
-
- if (pmc->perf_event)
- counter += perf_event_read_value(pmc->perf_event,
- &enabled, &running);
-
- /* FIXME: Scaling needed? */
-
- return counter & pmc_bitmask(pmc);
-}
+ int bit;
+ u64 diff = pmu->global_ctrl ^ data;
-static void stop_counter(struct kvm_pmc *pmc)
-{
- if (pmc->perf_event) {
- pmc->counter = read_pmc(pmc);
- perf_event_release_kernel(pmc->perf_event);
- pmc->perf_event = NULL;
- }
-}
+ pmu->global_ctrl = data;
-static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
- unsigned config, bool exclude_user, bool exclude_kernel,
- bool intr, bool in_tx, bool in_tx_cp)
-{
- struct perf_event *event;
- struct perf_event_attr attr = {
- .type = type,
- .size = sizeof(attr),
- .pinned = true,
- .exclude_idle = true,
- .exclude_host = 1,
- .exclude_user = exclude_user,
- .exclude_kernel = exclude_kernel,
- .config = config,
- };
- if (in_tx)
- attr.config |= HSW_IN_TX;
- if (in_tx_cp)
- attr.config |= HSW_IN_TX_CHECKPOINTED;
-
- attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
-
- event = perf_event_create_kernel_counter(&attr, -1, current,
- intr ? kvm_perf_overflow_intr :
- kvm_perf_overflow, pmc);
- if (IS_ERR(event)) {
- printk_once("kvm: pmu event creation failed %ld\n",
- PTR_ERR(event));
- return;
+ for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) {
+ reprogram_counter(pmu, bit);
}
-
- pmc->perf_event = event;
- clear_bit(pmc->idx, (unsigned long*)&pmc->vcpu->arch.pmu.reprogram_pmi);
}
-static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select,
- u8 unit_mask)
+static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
+ u8 event_select,
+ u8 unit_mask)
{
int i;
@@ -209,115 +86,66 @@ static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select,
return intel_arch_events[i].event_type;
}
-static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
+static unsigned intel_find_fixed_event(int fixed_idx)
{
- unsigned config, type = PERF_TYPE_RAW;
- u8 event_select, unit_mask;
-
- if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
- printk_once("kvm pmu: pin control bit is ignored\n");
- pmc->eventsel = eventsel;
-
- stop_counter(pmc);
-
- if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_enabled(pmc))
- return;
-
- event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
- unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
-
- if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
- ARCH_PERFMON_EVENTSEL_INV |
- ARCH_PERFMON_EVENTSEL_CMASK |
- HSW_IN_TX |
- HSW_IN_TX_CHECKPOINTED))) {
- config = find_arch_event(&pmc->vcpu->arch.pmu, event_select,
- unit_mask);
- if (config != PERF_COUNT_HW_MAX)
- type = PERF_TYPE_HARDWARE;
- }
-
- if (type == PERF_TYPE_RAW)
- config = eventsel & X86_RAW_EVENT_MASK;
+ if (fixed_idx >= ARRAY_SIZE(fixed_pmc_events))
+ return PERF_COUNT_HW_MAX;
- reprogram_counter(pmc, type, config,
- !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
- !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
- eventsel & ARCH_PERFMON_EVENTSEL_INT,
- (eventsel & HSW_IN_TX),
- (eventsel & HSW_IN_TX_CHECKPOINTED));
+ return intel_arch_events[fixed_pmc_events[fixed_idx]].event_type;
}
-static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
+/* Check if a PMC is enabled by comparising it with globl_ctrl bits. */
+static bool intel_pmc_enabled(struct kvm_pmc *pmc)
{
- unsigned en = en_pmi & 0x3;
- bool pmi = en_pmi & 0x8;
+ struct kvm_pmu *pmu = PMC_TO_PMU(pmc);
- stop_counter(pmc);
-
- if (!en || !pmc_enabled(pmc))
- return;
-
- reprogram_counter(pmc, PERF_TYPE_HARDWARE,
- intel_arch_events[fixed_pmc_events[idx]].event_type,
- !(en & 0x2), /* exclude user */
- !(en & 0x1), /* exclude kernel */
- pmi, false, false);
-}
-
-static inline u8 fixed_en_pmi(u64 ctrl, int idx)
-{
- return (ctrl >> (idx * 4)) & 0xf;
+ return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
}
-static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
+static struct kvm_pmc *intel_glb_idx_to_pmc(struct kvm_pmu *pmu, int glb_idx)
{
- int i;
-
- for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
- u8 en_pmi = fixed_en_pmi(data, i);
- struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i);
-
- if (fixed_en_pmi(pmu->fixed_ctr_ctrl, i) == en_pmi)
- continue;
-
- reprogram_fixed_counter(pmc, en_pmi, i);
+ if (glb_idx < INTEL_PMC_IDX_FIXED)
+ return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + glb_idx,
+ MSR_P6_EVNTSEL0);
+ else {
+ u32 fidx = glb_idx - INTEL_PMC_IDX_FIXED;
+ return get_fixed_pmc(pmu, fidx + MSR_CORE_PERF_FIXED_CTR0);
}
-
- pmu->fixed_ctr_ctrl = data;
}
-static void reprogram_idx(struct kvm_pmu *pmu, int idx)
+/* Returns 0 if msr_idx's corresponding MSR exists; otherwise returns 1. */
+static int intel_check_msr_idx(struct kvm_vcpu *vcpu, unsigned msr_idx)
{
- struct kvm_pmc *pmc = global_idx_to_pmc(pmu, idx);
+ struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
+ bool fixed = msr_idx & (1u << 30);
- if (!pmc)
- return;
+ msr_idx &= ~(3u << 30);
- if (pmc_is_gp(pmc))
- reprogram_gp_counter(pmc, pmc->eventsel);
- else {
- int fidx = idx - INTEL_PMC_IDX_FIXED;
- reprogram_fixed_counter(pmc,
- fixed_en_pmi(pmu->fixed_ctr_ctrl, fidx), fidx);
- }
+ return (!fixed && msr_idx >= pmu->nr_arch_gp_counters) ||
+ (fixed && msr_idx >= pmu->nr_arch_fixed_counters);
}
-static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
+static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
+ unsigned msr_idx)
{
- int bit;
- u64 diff = pmu->global_ctrl ^ data;
+ struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
+ bool fixed = msr_idx & (1u << 30);
+ struct kvm_pmc *counters;
- pmu->global_ctrl = data;
+ msr_idx &= ~(3u << 30);
+ if (!fixed && msr_idx >= pmu->nr_arch_gp_counters)
+ return NULL;
+ if (fixed && msr_idx >= pmu->nr_arch_fixed_counters)
+ return NULL;
+ counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
- for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
- reprogram_idx(pmu, bit);
+ return &counters[msr_idx];
}
-bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
+static bool intel_is_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
{
- struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
int ret;
switch (msr) {
@@ -328,20 +156,21 @@ bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
ret = pmu->version > 1;
break;
default:
- ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)
- || get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0)
- || get_fixed_pmc(pmu, msr);
+ ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
+ get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
+ get_fixed_pmc(pmu, msr);
break;
}
+
return ret;
}
-int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
+static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
{
- struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
struct kvm_pmc *pmc;
- switch (index) {
+ switch (msr) {
case MSR_CORE_PERF_FIXED_CTR_CTRL:
*data = pmu->fixed_ctr_ctrl;
return 0;
@@ -355,26 +184,27 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
*data = pmu->global_ovf_ctrl;
return 0;
default:
- if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
- (pmc = get_fixed_pmc(pmu, index))) {
- *data = read_pmc(pmc);
+ if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
+ (pmc = get_fixed_pmc(pmu, msr))) {
+ *data = pmc_read_counter(pmc);
return 0;
- } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
+ } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
*data = pmc->eventsel;
return 0;
}
}
+
return 1;
}
-int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
- struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
struct kvm_pmc *pmc;
- u32 index = msr_info->index;
+ u32 msr = msr_info->index;
u64 data = msr_info->data;
- switch (index) {
+ switch (msr) {
case MSR_CORE_PERF_FIXED_CTR_CTRL:
if (pmu->fixed_ctr_ctrl == data)
return 0;
@@ -406,13 +236,13 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
}
break;
default:
- if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
- (pmc = get_fixed_pmc(pmu, index))) {
+ if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
+ (pmc = get_fixed_pmc(pmu, msr))) {
if (!msr_info->host_initiated)
data = (s64)(s32)data;
- pmc->counter += data - read_pmc(pmc);
+ pmc->counter += data - pmc_read_counter(pmc);
return 0;
- } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
+ } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
if (data == pmc->eventsel)
return 0;
if (!(data & pmu->reserved_bits)) {
@@ -421,43 +251,13 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
}
}
}
- return 1;
-}
-
-int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc)
-{
- struct kvm_pmu *pmu = &vcpu->arch.pmu;
- bool fixed = pmc & (1u << 30);
- pmc &= ~(3u << 30);
- return (!fixed && pmc >= pmu->nr_arch_gp_counters) ||
- (fixed && pmc >= pmu->nr_arch_fixed_counters);
-}
-
-int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
-{
- struct kvm_pmu *pmu = &vcpu->arch.pmu;
- bool fast_mode = pmc & (1u << 31);
- bool fixed = pmc & (1u << 30);
- struct kvm_pmc *counters;
- u64 ctr;
-
- pmc &= ~(3u << 30);
- if (!fixed && pmc >= pmu->nr_arch_gp_counters)
- return 1;
- if (fixed && pmc >= pmu->nr_arch_fixed_counters)
- return 1;
- counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
- ctr = read_pmc(&counters[pmc]);
- if (fast_mode)
- ctr = (u32)ctr;
- *data = ctr;
- return 0;
+ return 1;
}
-void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
+static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
{
- struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
struct kvm_cpuid_entry2 *entry;
union cpuid10_eax eax;
union cpuid10_edx edx;
@@ -506,66 +306,55 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
}
-void kvm_pmu_init(struct kvm_vcpu *vcpu)
+static void intel_pmu_init(struct kvm_vcpu *vcpu)
{
int i;
- struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
- memset(pmu, 0, sizeof(*pmu));
for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
pmu->gp_counters[i].type = KVM_PMC_GP;
pmu->gp_counters[i].vcpu = vcpu;
pmu->gp_counters[i].idx = i;
}
+
for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
pmu->fixed_counters[i].type = KVM_PMC_FIXED;
pmu->fixed_counters[i].vcpu = vcpu;
pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
}
- init_irq_work(&pmu->irq_work, trigger_pmi);
- kvm_pmu_cpuid_update(vcpu);
}
-void kvm_pmu_reset(struct kvm_vcpu *vcpu)
+static void intel_pmu_reset(struct kvm_vcpu *vcpu)
{
- struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
int i;
- irq_work_sync(&pmu->irq_work);
for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
struct kvm_pmc *pmc = &pmu->gp_counters[i];
- stop_counter(pmc);
+
+ pmc_stop_counter(pmc);
pmc->counter = pmc->eventsel = 0;
}
for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
- stop_counter(&pmu->fixed_counters[i]);
+ pmc_stop_counter(&pmu->fixed_counters[i]);
pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
pmu->global_ovf_ctrl = 0;
}
-void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
-{
- kvm_pmu_reset(vcpu);
-}
-
-void kvm_handle_pmu_event(struct kvm_vcpu *vcpu)
-{
- struct kvm_pmu *pmu = &vcpu->arch.pmu;
- u64 bitmask;
- int bit;
-
- bitmask = pmu->reprogram_pmi;
-
- for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
- struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit);
-
- if (unlikely(!pmc || !pmc->perf_event)) {
- clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
- continue;
- }
-
- reprogram_idx(pmu, bit);
- }
-}
+struct kvm_pmu_ops intel_pmu_ops = {
+ .find_arch_event = intel_find_arch_event,
+ .find_fixed_event = intel_find_fixed_event,
+ .pmc_enabled = intel_pmc_enabled,
+ .glb_idx_to_pmc = intel_glb_idx_to_pmc,
+ .check_msr_idx = intel_check_msr_idx,
+ .msr_idx_to_pmc = intel_msr_idx_to_pmc,
+ .is_pmu_msr = intel_is_pmu_msr,
+ .get_msr = intel_pmu_get_msr,
+ .set_msr = intel_pmu_set_msr,
+ .refresh = intel_pmu_refresh,
+ .init = intel_pmu_init,
+ .reset = intel_pmu_reset,
+};
+EXPORT_SYMBOL_GPL(intel_pmu_ops);
@@ -39,6 +39,7 @@
#include <asm/virtext.h>
#include "trace.h"
+#include "pmu.h"
#define __ex(x) __kvm_handle_fault_on_reboot(x)
@@ -4330,6 +4331,11 @@ static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
{
}
+static struct kvm_pmu_ops *svm_get_pmu_ops(void)
+{
+ return &amd_pmu_ops;
+}
+
static struct kvm_x86_ops svm_x86_ops = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
@@ -4432,6 +4438,8 @@ static struct kvm_x86_ops svm_x86_ops = {
.handle_external_intr = svm_handle_external_intr,
.sched_in = svm_sched_in,
+
+ .get_pmu_ops = svm_get_pmu_ops,
};
static int __init svm_init(void)
@@ -48,6 +48,7 @@
#include <asm/apic.h>
#include "trace.h"
+#include "pmu.h"
#define __ex(x) __kvm_handle_fault_on_reboot(x)
#define __ex_clear(x, reg) \
@@ -10127,6 +10128,11 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
}
+static struct kvm_pmu_ops *vmx_get_pmu_ops(void)
+{
+ return &intel_pmu_ops;
+}
+
static struct kvm_x86_ops vmx_x86_ops = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
@@ -10240,6 +10246,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
.slot_disable_log_dirty = vmx_slot_disable_log_dirty,
.flush_log_dirty = vmx_flush_log_dirty,
.enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
+
+ .get_pmu_ops = vmx_get_pmu_ops,
};
static int __init vmx_init(void)
@@ -28,6 +28,7 @@
#include "x86.h"
#include "cpuid.h"
#include "assigned-dev.h"
+#include "pmu.h"
#include <linux/clocksource.h>
#include <linux/interrupt.h>
@@ -899,7 +900,7 @@ bool kvm_rdpmc(struct kvm_vcpu *vcpu)
u64 data;
int err;
- err = kvm_pmu_read_pmc(vcpu, ecx, &data);
+ err = kvm_pmu_rdpmc(vcpu, ecx, &data);
if (err)
return err;
kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
@@ -2279,7 +2280,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
pr = true;
case MSR_P6_EVNTSEL0:
case MSR_P6_EVNTSEL1:
- if (kvm_pmu_msr(vcpu, msr))
+ if (kvm_pmu_is_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr_info);
if (pr || data != 0)
@@ -2325,7 +2326,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
default:
if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
return xen_hvm_config(vcpu, data);
- if (kvm_pmu_msr(vcpu, msr))
+ if (kvm_pmu_is_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr_info);
if (!ignore_msrs) {
vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
@@ -2519,7 +2520,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_P6_PERFCTR1:
case MSR_P6_EVNTSEL0:
case MSR_P6_EVNTSEL1:
- if (kvm_pmu_msr(vcpu, msr))
+ if (kvm_pmu_is_msr(vcpu, msr))
return kvm_pmu_get_msr(vcpu, msr, pdata);
data = 0;
break;
@@ -2642,7 +2643,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
data = vcpu->arch.osvw.status;
break;
default:
- if (kvm_pmu_msr(vcpu, msr))
+ if (kvm_pmu_is_msr(vcpu, msr))
return kvm_pmu_get_msr(vcpu, msr, pdata);
if (!ignore_msrs) {
vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
@@ -4918,13 +4919,13 @@ static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
u32 pmc)
{
- return kvm_pmu_check_pmc(emul_to_vcpu(ctxt), pmc);
+ return kvm_pmu_check_msr_idx(emul_to_vcpu(ctxt), pmc);
}
static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
u32 pmc, u64 *pdata)
{
- return kvm_pmu_read_pmc(emul_to_vcpu(ctxt), pmc, pdata);
+ return kvm_pmu_rdpmc(emul_to_vcpu(ctxt), pmc, pdata);
}
static void emulator_halt(struct x86_emulate_ctxt *ctxt)
@@ -5782,6 +5783,9 @@ int kvm_arch_init(void *opaque)
kvm_timer_init();
+ /* init pointers of PMU operations */
+ kvm_pmu_arch_init(ops);
+
perf_register_guest_info_callbacks(&kvm_guest_cbs);
if (cpu_has_xsave)
@@ -5812,6 +5816,7 @@ void kvm_arch_exit(void)
pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
#endif
kvm_x86_ops = NULL;
+ kvm_pmu_arch_exit();
kvm_mmu_module_exit();
free_percpu(shared_msrs);
}
@@ -6218,9 +6223,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_NMI, vcpu))
process_nmi(vcpu);
if (kvm_check_request(KVM_REQ_PMU, vcpu))
- kvm_handle_pmu_event(vcpu);
+ kvm_pmu_handle_event(vcpu);
if (kvm_check_request(KVM_REQ_PMI, vcpu))
- kvm_deliver_pmi(vcpu);
+ kvm_pmu_deliver_pmi(vcpu);
if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
vcpu_scan_ioapic(vcpu);
if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
This patch converts existing Intel specific vPMU code into a common vPMU interface with the following steps: - A large portion of Intel vPMU code is now moved to pmu.c file. The rest is re-arranged and hooked up with the newly-defined intel_pmu_ops. - Create a corresponding pmu_amd.c file with empty functions for AMD SVM - The PMU function pointer, kvm_pmu_ops, is initialized by calling kvm_x86_ops->get_pmu_ops(). - To reduce the code size, Intel and AMD modules are now generated from their corrponding arch and PMU files; In the meanwhile, due to this arrangement several functions are exposed as public ones to allow calling from PMU code. Signed-off-by: Wei Huang <wei@redhat.com> --- arch/x86/include/asm/kvm_host.h | 3 + arch/x86/kvm/Makefile | 6 +- arch/x86/kvm/cpuid.c | 3 +- arch/x86/kvm/pmu.c | 325 ++++++++++++++++++++++++++++++ arch/x86/kvm/pmu.h | 98 ++++++++++ arch/x86/kvm/pmu_amd.c | 97 +++++++++ arch/x86/kvm/pmu_intel.c | 423 ++++++++++------------------------------ arch/x86/kvm/svm.c | 8 + arch/x86/kvm/vmx.c | 8 + arch/x86/kvm/x86.c | 23 ++- 10 files changed, 664 insertions(+), 330 deletions(-) create mode 100644 arch/x86/kvm/pmu.c create mode 100644 arch/x86/kvm/pmu.h create mode 100644 arch/x86/kvm/pmu_amd.c