From patchwork Mon Sep 28 05:38:02 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Haozhong Zhang X-Patchwork-Id: 7275321 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork1.web.kernel.org (Postfix) with ESMTP id A14F49F36A for ; Mon, 28 Sep 2015 05:41:25 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 8E85F206E1 for ; Mon, 28 Sep 2015 05:41:24 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 6C727206DF for ; Mon, 28 Sep 2015 05:41:23 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1756212AbbI1FjH (ORCPT ); Mon, 28 Sep 2015 01:39:07 -0400 Received: from mga01.intel.com ([192.55.52.88]:28763 "EHLO mga01.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1756090AbbI1FjE (ORCPT ); Mon, 28 Sep 2015 01:39:04 -0400 Received: from orsmga002.jf.intel.com ([10.7.209.21]) by fmsmga101.fm.intel.com with ESMTP; 27 Sep 2015 22:38:45 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.17,601,1437462000"; d="scan'208";a="814454112" Received: from hzzhang-optiplex-9020.sh.intel.com (HELO localhost) ([10.239.12.62]) by orsmga002.jf.intel.com with ESMTP; 27 Sep 2015 22:38:42 -0700 From: Haozhong Zhang To: kvm@vger.kernel.org Cc: Gleb Natapov , Paolo Bonzini , Thomas Gleixner , Ingo Molnar , "H. Peter Anvin" , x86@kernel.org, Joerg Roedel , Wanpeng Li , Xiao Guangrong , =?UTF-8?q?Mihai=20Don=C8=9Bu?= , Andy Lutomirski , Kai Huang , linux-kernel@vger.kernel.org, Haozhong Zhang Subject: [PATCH 03/12] KVM: x86: Add a common TSC scaling function Date: Mon, 28 Sep 2015 13:38:02 +0800 Message-Id: <1443418691-24050-4-git-send-email-haozhong.zhang@intel.com> X-Mailer: git-send-email 2.4.8 In-Reply-To: <1443418691-24050-1-git-send-email-haozhong.zhang@intel.com> References: <1443418691-24050-1-git-send-email-haozhong.zhang@intel.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Spam-Status: No, score=-6.9 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, T_RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP VMX and SVM calculate the TSC scaling ratio in a similar logic, so this patch generalizes it to a common TSC scaling function. Signed-off-by: Haozhong Zhang --- arch/x86/kvm/svm.c | 48 +++------------------------------ arch/x86/kvm/x86.c | 70 ++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/kvm_host.h | 4 ++- 3 files changed, 77 insertions(+), 45 deletions(-) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index a3186e2..1a333bd 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -209,7 +209,6 @@ static int nested_svm_intercept(struct vcpu_svm *svm); static int nested_svm_vmexit(struct vcpu_svm *svm); static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, bool has_error_code, u32 error_code); -static u64 __scale_tsc(u64 ratio, u64 tsc); enum { VMCB_INTERCEPTS, /* Intercept vectors, TSC offset, @@ -947,21 +946,7 @@ static __init int svm_hardware_setup(void) kvm_enable_efer_bits(EFER_FFXSR); if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) { - u64 max; - kvm_has_tsc_control = true; - - /* - * Make sure the user can only configure tsc_khz values that - * fit into a signed integer. - * A min value is not calculated needed because it will always - * be 1 on all machines and a value of 0 is used to disable - * tsc-scaling for the vcpu. - */ - max = min(0x7fffffffULL, __scale_tsc(tsc_khz, TSC_RATIO_MAX)); - - kvm_max_guest_tsc_khz = max; - kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX; kvm_tsc_scaling_ratio_frac_bits = 32; kvm_tsc_scaling_ratio_rsvd = TSC_RATIO_RSVD; @@ -1030,31 +1015,6 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type) seg->base = 0; } -static u64 __scale_tsc(u64 ratio, u64 tsc) -{ - u64 mult, frac, _tsc; - - mult = ratio >> 32; - frac = ratio & ((1ULL << 32) - 1); - - _tsc = tsc; - _tsc *= mult; - _tsc += (tsc >> 32) * frac; - _tsc += ((tsc & ((1ULL << 32) - 1)) * frac) >> 32; - - return _tsc; -} - -static u64 svm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc) -{ - u64 _tsc = tsc; - - if (vcpu->arch.tsc_scaling_ratio != TSC_RATIO_DEFAULT) - _tsc = __scale_tsc(vcpu->arch.tsc_scaling_ratio, tsc); - - return _tsc; -} - static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) { u64 ratio; @@ -1123,7 +1083,7 @@ static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool ho if (host) { if (vcpu->arch.tsc_scaling_ratio != TSC_RATIO_DEFAULT) WARN_ON(adjustment < 0); - adjustment = svm_scale_tsc(vcpu, (u64)adjustment); + adjustment = kvm_scale_tsc(vcpu, (u64)adjustment); } svm->vmcb->control.tsc_offset += adjustment; @@ -1141,7 +1101,7 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) { u64 tsc; - tsc = svm_scale_tsc(vcpu, rdtsc()); + tsc = kvm_scale_tsc(vcpu, rdtsc()); return target_tsc - tsc; } @@ -3166,7 +3126,7 @@ static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) { struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu)); return vmcb->control.tsc_offset + - svm_scale_tsc(vcpu, host_tsc); + kvm_scale_tsc(vcpu, host_tsc); } static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) @@ -3176,7 +3136,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) switch (msr_info->index) { case MSR_IA32_TSC: { msr_info->data = svm->vmcb->control.tsc_offset + - svm_scale_tsc(vcpu, rdtsc()); + kvm_scale_tsc(vcpu, rdtsc()); break; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4a521b4..920c302 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1321,6 +1321,64 @@ static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset) vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; } +/* + * Multiply tsc by a fixed point number represented by ratio. + * + * The most significant 64-N bits (mult) of ratio represent the + * integral part of the fixed point number; the remaining N bits + * (frac) represent the fractional part, ie. ratio represents a fixed + * point number (mult + frac * 2^(-N)). + * + * N.B: we always assume not all 64 bits of ratio are used for the + * fractional part and the ratio has at least 1 bit for the fractional + * part, i.e. 0 < N < 64. + * + * N equals to kvm_tsc_scaling_ratio_frac_bits. + */ +static u64 __scale_tsc(u64 ratio, u64 tsc) +{ + u64 mult, frac, mask, _tsc; + int width, nr; + + BUG_ON(kvm_tsc_scaling_ratio_frac_bits >= 64 || + kvm_tsc_scaling_ratio_frac_bits == 0); + + mult = ratio >> kvm_tsc_scaling_ratio_frac_bits; + mask = (1ULL << kvm_tsc_scaling_ratio_frac_bits) - 1; + frac = ratio & mask; + + width = 64 - kvm_tsc_scaling_ratio_frac_bits; + mask = (1ULL << width) - 1; + nr = kvm_tsc_scaling_ratio_frac_bits; + + _tsc = tsc; + _tsc *= mult; + _tsc += (tsc >> kvm_tsc_scaling_ratio_frac_bits) * frac; + + while (nr >= width) { + _tsc += (((tsc >> (nr - width)) & mask) * frac) >> (64 - nr); + nr -= width; + } + + if (nr > 0) + _tsc += ((tsc & ((1ULL << nr) - 1)) * frac) >> + kvm_tsc_scaling_ratio_frac_bits; + + return _tsc; +} + +u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc) +{ + u64 _tsc = tsc; + u64 ratio = vcpu->arch.tsc_scaling_ratio; + + if (ratio != kvm_default_tsc_scaling_ratio) + _tsc = __scale_tsc(ratio, tsc); + + return _tsc; +} +EXPORT_SYMBOL_GPL(kvm_scale_tsc); + void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) { struct kvm *kvm = vcpu->kvm; @@ -7290,6 +7348,18 @@ int kvm_arch_hardware_setup(void) if (r != 0) return r; + /* + * Make sure the user can only configure tsc_khz values that + * fit into a signed integer. + * A min value is not calculated needed because it will always + * be 1 on all machines. + */ + if (kvm_has_tsc_control) { + u64 max = min(0x7fffffffULL, + __scale_tsc(kvm_max_tsc_scaling_ratio, tsc_khz)); + kvm_max_guest_tsc_khz = max; + } + kvm_init_msr_list(); return 0; } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 1bef9e2..3c43e3e 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1144,5 +1144,7 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) { } #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ -#endif +u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc); + +#endif