From patchwork Mon Apr 26 09:58:54 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Takuya Yoshikawa X-Patchwork-Id: 95025 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter.kernel.org (8.14.3/8.14.3) with ESMTP id o3Q9tFK5009498 for ; Mon, 26 Apr 2010 09:55:15 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753295Ab0DZJzM (ORCPT ); Mon, 26 Apr 2010 05:55:12 -0400 Received: from serv2.oss.ntt.co.jp ([222.151.198.100]:47046 "EHLO serv2.oss.ntt.co.jp" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752345Ab0DZJzL (ORCPT ); Mon, 26 Apr 2010 05:55:11 -0400 Received: from serv2.oss.ntt.co.jp (localhost [127.0.0.1]) by serv2.oss.ntt.co.jp (Postfix) with ESMTP id BFE5F248222; Mon, 26 Apr 2010 18:55:09 +0900 (JST) Received: from serv1.oss.ntt.co.jp (serv1.oss.ntt.co.jp [172.19.0.2]) by serv2.oss.ntt.co.jp (Postfix) with ESMTP id AF7B124821E; Mon, 26 Apr 2010 18:55:09 +0900 (JST) Received: from yshtky3.kern.oss.ntt.co.jp (unknown [172.17.1.82]) by serv1.oss.ntt.co.jp (Postfix) with SMTP id 922DB11C00B; Mon, 26 Apr 2010 18:55:09 +0900 (JST) Date: Mon, 26 Apr 2010 18:58:54 +0900 From: Takuya Yoshikawa To: avi@redhat.com, mtosatti@redhat.com Cc: kvm@vger.kernel.org Subject: [PATCH 1/1] KVM: x86: avoid unnecessary bitmap allocation when memslot is clean Message-Id: <20100426185854.5d606a04.yoshikawa.takuya@oss.ntt.co.jp> In-Reply-To: <20100426185657.7f68c3ad.yoshikawa.takuya@oss.ntt.co.jp> References: <20100426185657.7f68c3ad.yoshikawa.takuya@oss.ntt.co.jp> X-Mailer: Sylpheed 2.6.0 (GTK+ 2.16.1; i486-pc-linux-gnu) Mime-Version: 1.0 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter.kernel.org [140.211.167.41]); Mon, 26 Apr 2010 09:55:15 +0000 (UTC) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6b2ce1d..0086d64 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2744,7 +2744,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot; unsigned long n; unsigned long is_dirty = 0; - unsigned long *dirty_bitmap = NULL; mutex_lock(&kvm->slots_lock); @@ -2759,27 +2758,29 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, n = kvm_dirty_bitmap_bytes(memslot); - r = -ENOMEM; - dirty_bitmap = vmalloc(n); - if (!dirty_bitmap) - goto out; - memset(dirty_bitmap, 0, n); - for (i = 0; !is_dirty && i < n/sizeof(long); i++) is_dirty = memslot->dirty_bitmap[i]; /* If nothing is dirty, don't bother messing with page tables. */ if (is_dirty) { struct kvm_memslots *slots, *old_slots; + unsigned long *dirty_bitmap; spin_lock(&kvm->mmu_lock); kvm_mmu_slot_remove_write_access(kvm, log->slot); spin_unlock(&kvm->mmu_lock); - slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); - if (!slots) - goto out_free; + r = -ENOMEM; + dirty_bitmap = vmalloc(n); + if (!dirty_bitmap) + goto out; + memset(dirty_bitmap, 0, n); + slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); + if (!slots) { + vfree(dirty_bitmap); + goto out; + } memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); slots->memslots[log->slot].dirty_bitmap = dirty_bitmap; @@ -2788,13 +2789,20 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, synchronize_srcu_expedited(&kvm->srcu); dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap; kfree(old_slots); + + r = -EFAULT; + if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) { + vfree(dirty_bitmap); + goto out; + } + vfree(dirty_bitmap); + } else { + r = -EFAULT; + if (clear_user(log->dirty_bitmap, n)) + goto out; } r = 0; - if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) - r = -EFAULT; -out_free: - vfree(dirty_bitmap); out: mutex_unlock(&kvm->slots_lock); return r;