From patchwork Fri Oct 31 16:05:31 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Wei Huang X-Patchwork-Id: 5205581 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.19.201]) by patchwork1.web.kernel.org (Postfix) with ESMTP id B716C9F387 for ; Fri, 31 Oct 2014 16:05:51 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id AF20A20176 for ; Fri, 31 Oct 2014 16:05:50 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id BB976201CD for ; Fri, 31 Oct 2014 16:05:49 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1756841AbaJaQFp (ORCPT ); Fri, 31 Oct 2014 12:05:45 -0400 Received: from mx1.redhat.com ([209.132.183.28]:45851 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1756251AbaJaQFn (ORCPT ); Fri, 31 Oct 2014 12:05:43 -0400 Received: from int-mx13.intmail.prod.int.phx2.redhat.com (int-mx13.intmail.prod.int.phx2.redhat.com [10.5.11.26]) by mx1.redhat.com (8.14.4/8.14.4) with ESMTP id s9VG5gfR012528 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-GCM-SHA384 bits=256 verify=FAIL); Fri, 31 Oct 2014 12:05:42 -0400 Received: from virtlab412.virt.lab.eng.bos.redhat.com (virtlab412.virt.lab.eng.bos.redhat.com [10.19.152.12]) by int-mx13.intmail.prod.int.phx2.redhat.com (8.14.4/8.14.4) with ESMTP id s9VG5e1q003902; Fri, 31 Oct 2014 12:05:41 -0400 From: Wei Huang To: kvm@vger.kernel.org Cc: pbonzini@redhat.com, gleb@kernel.org, rkrcmar@redhat.com, wei@redhat.com Subject: [PATCH V1 1/4] KVM: x86/vPMU: Define kvm_pmu_ops to support vPMU function dispatch Date: Fri, 31 Oct 2014 12:05:31 -0400 Message-Id: <1414771534-29411-2-git-send-email-wei@redhat.com> In-Reply-To: <1414771534-29411-1-git-send-email-wei@redhat.com> References: <1414771534-29411-1-git-send-email-wei@redhat.com> X-Scanned-By: MIMEDefang 2.68 on 10.5.11.26 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Spam-Status: No, score=-7.5 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP This patch defines a new function pointer struct (kvm_pmu_ops) to support vPMU for both Intel and AMD. The functions of this new struct are well self-explaned by their names. In the meanwhile the struct that maps from event_sel bits to PERF_TYPE_HARDWARE events is moved from Intel specific code to kvm_host.h as a common struct now. Signed-off-by: Wei Huang --- arch/x86/include/asm/kvm_host.h | 35 +++++++++++++++++++++++++++++------ arch/x86/kvm/pmu.c | 21 ++++++++------------- 2 files changed, 37 insertions(+), 19 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 6ed0c30..e085cf8 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -299,6 +299,12 @@ struct kvm_mmu { u64 pdptrs[4]; /* pae */ }; +struct msr_data { + bool host_initiated; + u32 index; + u64 data; +}; + enum pmc_type { KVM_PMC_GP = 0, KVM_PMC_FIXED, @@ -313,6 +319,29 @@ struct kvm_pmc { struct kvm_vcpu *vcpu; }; +/* mapping from event selection to PERF_TYPE_HARDWARE events */ +struct kvm_event_hw_type_mapping { + u8 eventsel; + u8 unit_mask; + unsigned event_type; + bool inexact; +}; + +struct kvm_pmu_ops { + void (*pmu_cpuid_update)(struct kvm_vcpu *vcpu); + int (*pmu_check_pmc)(struct kvm_vcpu *vcpu, unsigned pmc); + int (*pmu_read_pmc)(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); + int (*pmu_set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info); + int (*pmu_get_msr)(struct kvm_vcpu *vcpu, u32 index, u64 *data); + bool (*is_pmu_msr)(struct kvm_vcpu *vcpu, u32 msr); + void (*pmu_deliver_pmi)(struct kvm_vcpu *vcpu); + void (*pmu_handle_event)(struct kvm_vcpu *vcpu); + void (*pmu_reset)(struct kvm_vcpu *vcpu); + void (*pmu_init)(struct kvm_vcpu *vcpu); + void (*pmu_destroy)(struct kvm_vcpu *vcpu); +}; + +/* used by both Intel and AMD; but some fields are only applicable to Intel. */ struct kvm_pmu { unsigned nr_arch_gp_counters; unsigned nr_arch_fixed_counters; @@ -653,12 +682,6 @@ struct kvm_vcpu_stat { struct x86_instruction_info; -struct msr_data { - bool host_initiated; - u32 index; - u64 data; -}; - struct kvm_x86_ops { int (*cpu_has_kvm_support)(void); /* __init */ int (*disabled_by_bios)(void); /* __init */ diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 8e6b7d8..b8c7db7 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -20,12 +20,7 @@ #include "cpuid.h" #include "lapic.h" -static struct kvm_arch_event_perf_mapping { - u8 eventsel; - u8 unit_mask; - unsigned event_type; - bool inexact; -} arch_events[] = { +struct kvm_event_hw_type_mapping intel_arch_events[] = { /* Index must match CPUID 0x0A.EBX bit vector */ [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES }, [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS }, @@ -37,7 +32,7 @@ static struct kvm_arch_event_perf_mapping { [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES }, }; -/* mapping between fixed pmc index and arch_events array */ +/* mapping between fixed pmc index and intel_arch_events array */ int fixed_pmc_events[] = {1, 0, 7}; static bool pmc_is_gp(struct kvm_pmc *pmc) @@ -202,16 +197,16 @@ static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select, { int i; - for (i = 0; i < ARRAY_SIZE(arch_events); i++) - if (arch_events[i].eventsel == event_select - && arch_events[i].unit_mask == unit_mask + for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++) + if (intel_arch_events[i].eventsel == event_select + && intel_arch_events[i].unit_mask == unit_mask && (pmu->available_event_types & (1 << i))) break; - if (i == ARRAY_SIZE(arch_events)) + if (i == ARRAY_SIZE(intel_arch_events)) return PERF_COUNT_HW_MAX; - return arch_events[i].event_type; + return intel_arch_events[i].event_type; } static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) @@ -265,7 +260,7 @@ static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx) return; reprogram_counter(pmc, PERF_TYPE_HARDWARE, - arch_events[fixed_pmc_events[idx]].event_type, + intel_arch_events[fixed_pmc_events[idx]].event_type, !(en & 0x2), /* exclude user */ !(en & 0x1), /* exclude kernel */ pmi, false, false);