From patchwork Sat May 30 10:59:23 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xiao Guangrong X-Patchwork-Id: 6512851 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork2.web.kernel.org (Postfix) with ESMTP id 29147C0433 for ; Sat, 30 May 2015 11:02:36 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 326DC20850 for ; Sat, 30 May 2015 11:02:35 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 15DA720852 for ; Sat, 30 May 2015 11:02:34 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S932442AbbE3LCT (ORCPT ); Sat, 30 May 2015 07:02:19 -0400 Received: from mga11.intel.com ([192.55.52.93]:5141 "EHLO mga11.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1757796AbbE3K7v (ORCPT ); Sat, 30 May 2015 06:59:51 -0400 Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga102.fm.intel.com with ESMTP; 30 May 2015 03:59:50 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.13,522,1427785200"; d="scan'208";a="717943276" Received: from slu40-mobl2.ccr.corp.intel.com (HELO homedesktop.ccr.corp.intel.com) ([10.254.212.183]) by fmsmga001.fm.intel.com with ESMTP; 30 May 2015 03:59:48 -0700 From: Xiao Guangrong To: pbonzini@redhat.com Cc: gleb@kernel.org, mtosatti@redhat.com, kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Xiao Guangrong Subject: [PATCH 12/15] KVM: MTRR: introduce mtrr_for_each_mem_type Date: Sat, 30 May 2015 18:59:23 +0800 Message-Id: <1432983566-15773-13-git-send-email-guangrong.xiao@linux.intel.com> X-Mailer: git-send-email 2.1.0 In-Reply-To: <1432983566-15773-1-git-send-email-guangrong.xiao@linux.intel.com> References: <1432983566-15773-1-git-send-email-guangrong.xiao@linux.intel.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Spam-Status: No, score=-6.9 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, T_RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP It walks all MTRRs and gets all the memory cache type setting for the specified range also it checks if the range is fully covered by MTRRs Signed-off-by: Xiao Guangrong --- arch/x86/kvm/mtrr.c | 183 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 183 insertions(+) diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c index e59d138..35f86303 100644 --- a/arch/x86/kvm/mtrr.c +++ b/arch/x86/kvm/mtrr.c @@ -395,6 +395,189 @@ void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu) INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head); } +struct mtrr_looker { + /* input fields. */ + struct kvm_mtrr *mtrr_state; + u64 start; + u64 end; + + /* output fields. */ + int mem_type; + /* [start, end) is fully covered in MTRRs? */ + bool partial_map; + + /* private fields. */ + union { + /* used for fixed MTRRs. */ + struct { + int index; + int seg; + }; + + /* used for var MTRRs. */ + struct { + struct kvm_mtrr_range *range; + /* max address has been covered in var MTRRs. */ + u64 start_max; + }; + }; + + bool fixed; +}; + +static void mtrr_lookup_init(struct mtrr_looker *looker, + struct kvm_mtrr *mtrr_state, u64 start, u64 end) +{ + looker->mtrr_state = mtrr_state; + looker->start = start; + looker->end = end; +} + +static u64 fixed_mtrr_range_end_addr(int seg, int index) +{ + struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg]; + + return mtrr_seg->start + mtrr_seg->range_size * index; +} + +static bool mtrr_lookup_fixed_start(struct mtrr_looker *looker) +{ + int seg, index; + + if (!looker->mtrr_state->fixed_mtrr_enabled) + return false; + + seg = fixed_mtrr_addr_to_seg(looker->start); + if (seg < 0) + return false; + + looker->fixed = true; + index = fixed_mtrr_addr_seg_to_range_index(looker->start, seg); + looker->index = index; + looker->seg = seg; + looker->mem_type = looker->mtrr_state->fixed_ranges[index]; + looker->start = fixed_mtrr_range_end_addr(seg, index); + return true; +} + +static bool match_var_range(struct mtrr_looker *looker, + struct kvm_mtrr_range *range) +{ + u64 start, end; + + var_mtrr_range(range, &start, &end); + if (!(start >= looker->end || end <= looker->start)) { + looker->range = range; + looker->mem_type = range->base & 0xff; + + /* + * the function is called when we do kvm_mtrr.head walking + * that means range has the minimum base address interleaves + * with [looker->start_max, looker->end). + */ + looker->partial_map |= looker->start_max < start; + + /* update the max address has been covered. */ + looker->start_max = max(looker->start_max, end); + return true; + } + + return false; +} + +static void mtrr_lookup_var_start(struct mtrr_looker *looker) +{ + struct kvm_mtrr *mtrr_state = looker->mtrr_state; + struct kvm_mtrr_range *range; + + looker->fixed = false; + looker->partial_map = false; + looker->start_max = looker->start; + looker->mem_type = -1; + + list_for_each_entry(range, &mtrr_state->head, node) + if (match_var_range(looker, range)) + return; + + looker->partial_map = true; +} + +static void mtrr_lookup_fixed_next(struct mtrr_looker *looker) +{ + struct fixed_mtrr_segment *eseg = &fixed_seg_table[looker->seg]; + struct kvm_mtrr *mtrr_state = looker->mtrr_state; + u64 end; + + if (looker->start >= looker->end) { + looker->mem_type = -1; + looker->partial_map = false; + return; + } + + WARN_ON(!looker->fixed); + + looker->index++; + end = fixed_mtrr_range_end_addr(looker->seg, looker->index); + + /* switch to next segment. */ + if (end >= eseg->end) { + looker->seg++; + looker->index = 0; + + /* have looked up for all fixed MTRRs. */ + if (looker->seg >= ARRAY_SIZE(fixed_seg_table)) + return mtrr_lookup_var_start(looker); + + end = fixed_mtrr_range_end_addr(looker->seg, looker->index); + } + + looker->mem_type = mtrr_state->fixed_ranges[looker->index]; + looker->start = end; +} + +static void mtrr_lookup_var_next(struct mtrr_looker *looker) +{ + struct kvm_mtrr *mtrr_state = looker->mtrr_state; + + WARN_ON(looker->fixed); + + looker->mem_type = -1; + + list_for_each_entry_continue(looker->range, &mtrr_state->head, node) + if (match_var_range(looker, looker->range)) + return; + + looker->partial_map |= looker->start_max < looker->end; +} + +static void mtrr_lookup_start(struct mtrr_looker *looker) +{ + looker->mem_type = -1; + + if (!looker->mtrr_state->mtrr_enabled) { + looker->partial_map = true; + return; + } + + if (!mtrr_lookup_fixed_start(looker)) + mtrr_lookup_var_start(looker); +} + +static void mtrr_lookup_next(struct mtrr_looker *looker) +{ + WARN_ON(looker->mem_type == -1); + + if (looker->fixed) + mtrr_lookup_fixed_next(looker); + else + mtrr_lookup_var_next(looker); +} + +#define mtrr_for_each_mem_type(_looker_, _mtrr_, _gpa_start_, _gpa_end_) \ + for (mtrr_lookup_init(_looker_, _mtrr_, _gpa_start_, _gpa_end_), \ + mtrr_lookup_start(_looker_); (_looker_)->mem_type != -1; \ + mtrr_lookup_next(_looker_)) + u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) { struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;