From patchwork Sun Aug 30 08:38:13 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Michael S. Tsirkin" X-Patchwork-Id: 7096811 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork2.web.kernel.org (Postfix) with ESMTP id 13015BEEC1 for ; Sun, 30 Aug 2015 08:39:04 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 10B83205C6 for ; Sun, 30 Aug 2015 08:39:03 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id DBF1A205C7 for ; Sun, 30 Aug 2015 08:39:01 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753199AbbH3Iim (ORCPT ); Sun, 30 Aug 2015 04:38:42 -0400 Received: from mx1.redhat.com ([209.132.183.28]:44607 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752848AbbH3IiU (ORCPT ); Sun, 30 Aug 2015 04:38:20 -0400 Received: from int-mx11.intmail.prod.int.phx2.redhat.com (int-mx11.intmail.prod.int.phx2.redhat.com [10.5.11.24]) by mx1.redhat.com (Postfix) with ESMTPS id 0EE886F; Sun, 30 Aug 2015 08:38:20 +0000 (UTC) Received: from redhat.com (ovpn-116-40.ams2.redhat.com [10.36.116.40]) by int-mx11.intmail.prod.int.phx2.redhat.com (8.14.4/8.14.4) with SMTP id t7U8cEZ8010886; Sun, 30 Aug 2015 04:38:16 -0400 Date: Sun, 30 Aug 2015 11:38:13 +0300 From: "Michael S. Tsirkin" To: linux-kernel@vger.kernel.org Cc: Thomas Gleixner , Ingo Molnar , "H. Peter Anvin" , x86@kernel.org, Gleb Natapov , Paolo Bonzini , kvm@vger.kernel.org, Rusty Russell Subject: [PATCH 2/2] kvm/x86: use __test_bit Message-ID: <1440776707-22016-2-git-send-email-mst@redhat.com> References: <1440776707-22016-1-git-send-email-mst@redhat.com> MIME-Version: 1.0 Content-Disposition: inline In-Reply-To: <1440776707-22016-1-git-send-email-mst@redhat.com> X-Mutt-Fcc: =sent X-Scanned-By: MIMEDefang 2.68 on 10.5.11.24 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Spam-Status: No, score=-6.9 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, T_RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Let compiler do slightly better optimizations using the non-volatile __test_bit in all cases where the values are set using the non-volatile __set_bit and __clear_bit. I left test_bit in place where the mask is set using the atomic set_bit/clear_bit, for symmetry. This shaves about 100 bytes off the kernel size: before: 134868 2997 8372 146237 23b3d arch/x86/kvm/kvm-intel.ko 343129 47640 441 391210 5f82a arch/x86/kvm/kvm.ko after: 134836 2997 8372 146205 23b1d arch/x86/kvm/kvm-intel.ko 343017 47640 441 391098 5f7ba arch/x86/kvm/kvm.ko Signed-off-by: Michael S. Tsirkin --- arch/x86/kvm/ioapic.h | 2 +- arch/x86/kvm/kvm_cache_regs.h | 6 +++--- arch/x86/kvm/ioapic.c | 2 +- arch/x86/kvm/pmu_intel.c | 2 +- arch/x86/kvm/vmx.c | 18 +++++++++--------- arch/x86/kvm/x86.c | 2 +- 6 files changed, 16 insertions(+), 16 deletions(-) diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h index ca0b0b4..3b58d41 100644 --- a/arch/x86/kvm/ioapic.h +++ b/arch/x86/kvm/ioapic.h @@ -102,7 +102,7 @@ static inline bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector) { struct kvm_ioapic *ioapic = kvm->arch.vioapic; smp_rmb(); - return test_bit(vector, ioapic->handled_vectors); + return __test_bit(vector, ioapic->handled_vectors); } void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index e1e89ee..21ef6d6 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -9,7 +9,7 @@ static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, enum kvm_reg reg) { - if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail)) + if (!__test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail)) kvm_x86_ops->cache_reg(vcpu, reg); return vcpu->arch.regs[reg]; @@ -38,7 +38,7 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) { might_sleep(); /* on svm */ - if (!test_bit(VCPU_EXREG_PDPTR, + if (!__test_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail)) kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR); @@ -68,7 +68,7 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu) { - if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) + if (!__test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) kvm_x86_ops->decache_cr3(vcpu); return vcpu->arch.cr3; } diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index 856f791..bf2afa5 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c @@ -117,7 +117,7 @@ static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu) return; new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector); - old_val = test_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map); + old_val = __test_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map); if (new_val == old_val) return; diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c index ab38af4..fb20a0f 100644 --- a/arch/x86/kvm/pmu_intel.c +++ b/arch/x86/kvm/pmu_intel.c @@ -98,7 +98,7 @@ static bool intel_pmc_is_enabled(struct kvm_pmc *pmc) { struct kvm_pmu *pmu = pmc_to_pmu(pmc); - return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl); + return __test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl); } static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index c117703..ed44026 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2025,7 +2025,7 @@ static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) { unsigned long rflags, save_rflags; - if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) { + if (!__test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) { __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); rflags = vmcs_readl(GUEST_RFLAGS); if (to_vmx(vcpu)->rmode.vm86_active) { @@ -3478,7 +3478,7 @@ static void ept_load_pdptrs(struct kvm_vcpu *vcpu) { struct kvm_mmu *mmu = vcpu->arch.walk_mmu; - if (!test_bit(VCPU_EXREG_PDPTR, + if (!__test_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_dirty)) return; @@ -3513,7 +3513,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, unsigned long cr0, struct kvm_vcpu *vcpu) { - if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) + if (!__test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) vmx_decache_cr3(vcpu); if (!(cr0 & X86_CR0_PG)) { /* From paging/starting to nonpaging */ @@ -4275,24 +4275,24 @@ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1, */ if (msr <= 0x1fff) { if (type & MSR_TYPE_R && - !test_bit(msr, msr_bitmap_l1 + 0x000 / f)) + !__test_bit(msr, msr_bitmap_l1 + 0x000 / f)) /* read-low */ __clear_bit(msr, msr_bitmap_nested + 0x000 / f); if (type & MSR_TYPE_W && - !test_bit(msr, msr_bitmap_l1 + 0x800 / f)) + !__test_bit(msr, msr_bitmap_l1 + 0x800 / f)) /* write-low */ __clear_bit(msr, msr_bitmap_nested + 0x800 / f); } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { msr &= 0x1fff; if (type & MSR_TYPE_R && - !test_bit(msr, msr_bitmap_l1 + 0x400 / f)) + !__test_bit(msr, msr_bitmap_l1 + 0x400 / f)) /* read-high */ __clear_bit(msr, msr_bitmap_nested + 0x400 / f); if (type & MSR_TYPE_W && - !test_bit(msr, msr_bitmap_l1 + 0xc00 / f)) + !__test_bit(msr, msr_bitmap_l1 + 0xc00 / f)) /* write-high */ __clear_bit(msr, msr_bitmap_nested + 0xc00 / f); @@ -8316,9 +8316,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) vmx->nested.sync_shadow_vmcs = false; } - if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) + if (__test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); - if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) + if (__test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); cr4 = cr4_read_shadow(); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5ef2560..c8015fa 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -555,7 +555,7 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu) if (is_long_mode(vcpu) || !is_pae(vcpu)) return false; - if (!test_bit(VCPU_EXREG_PDPTR, + if (!__test_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail)) return true;