From patchwork Thu Apr 30 10:24:36 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xiao Guangrong X-Patchwork-Id: 6304941 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork2.web.kernel.org (Postfix) with ESMTP id B5558BEEE1 for ; Thu, 30 Apr 2015 14:57:48 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id AEC062011D for ; Thu, 30 Apr 2015 14:57:47 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 85076200FE for ; Thu, 30 Apr 2015 14:57:45 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752842AbbD3O4y (ORCPT ); Thu, 30 Apr 2015 10:56:54 -0400 Received: from mga11.intel.com ([192.55.52.93]:2344 "EHLO mga11.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752577AbbD3OuJ (ORCPT ); Thu, 30 Apr 2015 10:50:09 -0400 Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga102.fm.intel.com with ESMTP; 30 Apr 2015 07:50:03 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.11,677,1422950400"; d="scan'208";a="718482977" Received: from unknown (HELO xiaohome.ccr.corp.intel.com) ([10.254.208.207]) by fmsmga002.fm.intel.com with ESMTP; 30 Apr 2015 07:50:01 -0700 From: guangrong.xiao@linux.intel.com To: pbonzini@redhat.com Cc: gleb@kernel.org, mtosatti@redhat.com, kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Xiao Guangrong Subject: [PATCH 5/9] KVM: MMU: KVM: introduce for_each_slot_rmap Date: Thu, 30 Apr 2015 18:24:36 +0800 Message-Id: <1430389490-24602-6-git-send-email-guangrong.xiao@linux.intel.com> X-Mailer: git-send-email 1.9.3 In-Reply-To: <1430389490-24602-1-git-send-email-guangrong.xiao@linux.intel.com> References: <1430389490-24602-1-git-send-email-guangrong.xiao@linux.intel.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Spam-Status: No, score=-5.3 required=5.0 tests=BAYES_00, DATE_IN_PAST_03_06, RCVD_IN_DNSWL_HI, T_RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP From: Xiao Guangrong It is used to clean up the code between kvm_handle_hva_range and slot_handle_level, also it will be used by later patch Signed-off-by: Xiao Guangrong --- arch/x86/kvm/mmu.c | 144 ++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 99 insertions(+), 45 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 4d98c6c..fea1e83 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1417,6 +1417,63 @@ restart: return 0; } +struct slot_rmap_walker { + /* input fields. */ + struct kvm_memory_slot *slot; + gfn_t start_gfn; + gfn_t end_gfn; + int start_level; + int end_level; + + /* output fields. */ + gfn_t gfn; + unsigned long *rmap; + int level; + + /* private field. */ + unsigned long *end_rmap; +}; + +static void rmap_walk_init_level(struct slot_rmap_walker *walker, int level) +{ + struct kvm_memory_slot *slot = walker->slot; + + walker->level = level; + walker->gfn = walker->start_gfn; + walker->rmap = __gfn_to_rmap(walker->gfn, walker->level, slot); + walker->end_rmap = __gfn_to_rmap(walker->end_gfn, walker->level, slot); +} + +static void slot_rmap_walk_init(struct slot_rmap_walker *walker) +{ + rmap_walk_init_level(walker, walker->start_level); +} + +static bool slot_rmap_walk_okay(struct slot_rmap_walker *walker) +{ + return !!walker->rmap; +} + +static void slot_rmap_walk_next(struct slot_rmap_walker *walker) +{ + if (++walker->rmap <= walker->end_rmap) { + walker->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(walker->level)); + return; + } + + if (++walker->level > walker->end_level) { + walker->rmap = NULL; + return; + } + + rmap_walk_init_level(walker, walker->level); +} + +#define for_each_slot_rmap(_walker_) \ + for (slot_rmap_walk_init(_walker_); \ + slot_rmap_walk_okay(_walker_); \ + slot_rmap_walk_next(_walker_)) + static int kvm_handle_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, @@ -1428,10 +1485,10 @@ static int kvm_handle_hva_range(struct kvm *kvm, int level, unsigned long data)) { - int j; - int ret = 0; struct kvm_memslots *slots; struct kvm_memory_slot *memslot; + struct slot_rmap_walker walker; + int ret = 0; slots = kvm_memslots(kvm); @@ -1451,26 +1508,18 @@ static int kvm_handle_hva_range(struct kvm *kvm, gfn_start = hva_to_gfn_memslot(hva_start, memslot); gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); - for (j = PT_PAGE_TABLE_LEVEL; - j < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++j) { - unsigned long idx, idx_end; - unsigned long *rmapp; - gfn_t gfn = gfn_start; - - /* - * {idx(page_j) | page_j intersects with - * [hva_start, hva_end)} = {idx, idx+1, ..., idx_end}. - */ - idx = gfn_to_index(gfn_start, memslot->base_gfn, j); - idx_end = gfn_to_index(gfn_end - 1, memslot->base_gfn, j); - - rmapp = __gfn_to_rmap(gfn_start, j, memslot); - - for (; idx <= idx_end; - ++idx, gfn += (1UL << KVM_HPAGE_GFN_SHIFT(j))) - ret |= handler(kvm, rmapp++, memslot, - gfn, j, data); - } + walker = (struct slot_rmap_walker) { + .slot = memslot, + .start_gfn = gfn_start, + .end_gfn = gfn_end - 1, + .start_level = PT_PAGE_TABLE_LEVEL, + .end_level = PT_PAGE_TABLE_LEVEL + + KVM_NR_PAGE_SIZES - 1, + }; + + for_each_slot_rmap(&walker) + ret |= handler(kvm, walker.rmap, memslot, walker.gfn, + walker.level, data); } return ret; @@ -4404,34 +4453,29 @@ typedef bool (*slot_level_handler) (struct kvm *kvm, unsigned long *rmap); /* The caller should hold mmu-lock before calling this function. */ static bool -slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, - slot_level_handler fn, int min_level, int max_level, - bool lock_flush_tlb) +slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, + slot_level_handler fn, int min_level, int max_level, + gfn_t start_gfn, gfn_t last_gfn, bool lock_flush_tlb) { - unsigned long last_gfn; bool flush = false; - int level; - - last_gfn = memslot->base_gfn + memslot->npages - 1; - - for (level = min_level; level <= max_level; ++level) { - unsigned long *rmapp; - unsigned long last_index, index; - - rmapp = memslot->arch.rmap[level - PT_PAGE_TABLE_LEVEL]; - last_index = gfn_to_index(last_gfn, memslot->base_gfn, level); + struct slot_rmap_walker walker = { + .slot = memslot, + .start_gfn = start_gfn, + .end_gfn = last_gfn, + .start_level = min_level, + .end_level = max_level, + }; - for (index = 0; index <= last_index; ++index, ++rmapp) { - if (*rmapp) - flush |= fn(kvm, rmapp); + for_each_slot_rmap(&walker) { + if (*walker.rmap) + flush |= fn(kvm, walker.rmap); - if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { - if (flush && lock_flush_tlb) { - kvm_flush_remote_tlbs(kvm); - flush = false; - } - cond_resched_lock(&kvm->mmu_lock); + if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { + if (flush && lock_flush_tlb) { + kvm_flush_remote_tlbs(kvm); + flush = false; } + cond_resched_lock(&kvm->mmu_lock); } } @@ -4439,6 +4483,16 @@ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, } static bool +slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, + slot_level_handler fn, int min_level, int max_level, + bool lock_flush_tlb) +{ + return slot_handle_level_range(kvm, memslot, fn, min_level, + max_level, memslot->base_gfn, + memslot->base_gfn + memslot->npages - 1, lock_flush_tlb); +} + +static bool slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, bool lock_flush_tlb) {