From patchwork Thu Mar 10 10:13:40 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jan Beulich X-Patchwork-Id: 8555011 Return-Path: X-Original-To: patchwork-xen-devel@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork2.web.kernel.org (Postfix) with ESMTP id 902DFC0553 for ; Thu, 10 Mar 2016 10:15:56 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 45A3D20340 for ; Thu, 10 Mar 2016 10:15:55 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id DA9D320328 for ; Thu, 10 Mar 2016 10:15:53 +0000 (UTC) Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xen.org with esmtp (Exim 4.84) (envelope-from ) id 1adxbF-0002Kq-LC; Thu, 10 Mar 2016 10:13:45 +0000 Received: from mail6.bemta3.messagelabs.com ([195.245.230.39]) by lists.xen.org with esmtp (Exim 4.84) (envelope-from ) id 1adxbE-0002Kh-ED for xen-devel@lists.xenproject.org; Thu, 10 Mar 2016 10:13:44 +0000 Received: from [85.158.137.68] by server-17.bemta-3.messagelabs.com id 02/53-03109-7D841E65; Thu, 10 Mar 2016 10:13:43 +0000 X-Env-Sender: JBeulich@suse.com X-Msg-Ref: server-15.tower-31.messagelabs.com!1457604820!27815161!1 X-Originating-IP: [137.65.248.74] X-SpamReason: No, hits=0.5 required=7.0 tests=BODY_RANDOM_LONG X-StarScan-Received: X-StarScan-Version: 8.11; banners=-,-,- X-VirusChecked: Checked Received: (qmail 38885 invoked from network); 10 Mar 2016 10:13:42 -0000 Received: from prv-mh.provo.novell.com (HELO prv-mh.provo.novell.com) (137.65.248.74) by server-15.tower-31.messagelabs.com with DHE-RSA-AES256-GCM-SHA384 encrypted SMTP; 10 Mar 2016 10:13:42 -0000 Received: from INET-PRV-MTA by prv-mh.provo.novell.com with Novell_GroupWise; Thu, 10 Mar 2016 03:13:40 -0700 Message-Id: <56E156E402000078000DB2BE@prv-mh.provo.novell.com> X-Mailer: Novell GroupWise Internet Agent 14.2.0 Date: Thu, 10 Mar 2016 03:13:40 -0700 From: "Jan Beulich" To: "xen-devel" References: <56E1555002000078000DB293@prv-mh.provo.novell.com> In-Reply-To: <56E1555002000078000DB293@prv-mh.provo.novell.com> Mime-Version: 1.0 Cc: Tim Deegan Subject: [Xen-devel] [PATCH 2/2] x86/shadow: avoid extra local array variable X-BeenThere: xen-devel@lists.xen.org X-Mailman-Version: 2.1.18 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Errors-To: xen-devel-bounces@lists.xen.org Sender: "Xen-devel" X-Spam-Status: No, score=-1.9 required=5.0 tests=BAYES_00, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP mfns[2] was there just because struct sh_emulate_ctxt's two MFN values can't be used to hand to vmap(). Making the structure fields an array avoids the extra copying. Signed-off-by: Jan Beulich x86/shadow: avoid extra local array variable mfns[2] was there just because struct sh_emulate_ctxt's two MFN values can't be used to hand to vmap(). Making the structure fields an array avoids the extra copying. Signed-off-by: Jan Beulich --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -1746,11 +1746,11 @@ void *sh_emulate_map_dest(struct vcpu *v struct domain *d = v->domain; void *map; - sh_ctxt->mfn1 = emulate_gva_to_mfn(v, vaddr, sh_ctxt); - if ( !mfn_valid(sh_ctxt->mfn1) ) - return ((mfn_x(sh_ctxt->mfn1) == BAD_GVA_TO_GFN) ? + sh_ctxt->mfn[0] = emulate_gva_to_mfn(v, vaddr, sh_ctxt); + if ( !mfn_valid(sh_ctxt->mfn[0]) ) + return ((mfn_x(sh_ctxt->mfn[0]) == BAD_GVA_TO_GFN) ? MAPPING_EXCEPTION : - (mfn_x(sh_ctxt->mfn1) == READONLY_GFN) ? + (mfn_x(sh_ctxt->mfn[0]) == READONLY_GFN) ? MAPPING_SILENT_FAIL : MAPPING_UNHANDLEABLE); #ifndef NDEBUG @@ -1767,39 +1767,36 @@ void *sh_emulate_map_dest(struct vcpu *v /* Unaligned writes mean probably this isn't a pagetable. */ if ( vaddr & (bytes - 1) ) - sh_remove_shadows(d, sh_ctxt->mfn1, 0, 0 /* Slow, can fail. */ ); + sh_remove_shadows(d, sh_ctxt->mfn[0], 0, 0 /* Slow, can fail. */ ); if ( likely(((vaddr + bytes - 1) & PAGE_MASK) == (vaddr & PAGE_MASK)) ) { /* Whole write fits on a single page. */ - sh_ctxt->mfn2 = _mfn(INVALID_MFN); - map = map_domain_page(sh_ctxt->mfn1) + (vaddr & ~PAGE_MASK); + sh_ctxt->mfn[1] = _mfn(INVALID_MFN); + map = map_domain_page(sh_ctxt->mfn[0]) + (vaddr & ~PAGE_MASK); } - else + else if ( !is_hvm_domain(d) ) { - mfn_t mfns[2]; - /* * Cross-page emulated writes are only supported for HVM guests; * PV guests ought to know better. */ - if ( !is_hvm_domain(d) ) - return MAPPING_UNHANDLEABLE; - + return MAPPING_UNHANDLEABLE; + } + else + { /* This write crosses a page boundary. Translate the second page. */ - sh_ctxt->mfn2 = emulate_gva_to_mfn(v, vaddr + bytes, sh_ctxt); - if ( !mfn_valid(sh_ctxt->mfn2) ) - return ((mfn_x(sh_ctxt->mfn2) == BAD_GVA_TO_GFN) ? + sh_ctxt->mfn[1] = emulate_gva_to_mfn(v, vaddr + bytes, sh_ctxt); + if ( !mfn_valid(sh_ctxt->mfn[1]) ) + return ((mfn_x(sh_ctxt->mfn[1]) == BAD_GVA_TO_GFN) ? MAPPING_EXCEPTION : - (mfn_x(sh_ctxt->mfn2) == READONLY_GFN) ? + (mfn_x(sh_ctxt->mfn[1]) == READONLY_GFN) ? MAPPING_SILENT_FAIL : MAPPING_UNHANDLEABLE); /* Cross-page writes mean probably not a pagetable. */ - sh_remove_shadows(d, sh_ctxt->mfn2, 0, 0 /* Slow, can fail. */ ); + sh_remove_shadows(d, sh_ctxt->mfn[1], 0, 0 /* Slow, can fail. */ ); - mfns[0] = sh_ctxt->mfn1; - mfns[1] = sh_ctxt->mfn2; - map = vmap(mfns, 2); + map = vmap(sh_ctxt->mfn, 2); if ( !map ) return MAPPING_UNHANDLEABLE; map += (vaddr & ~PAGE_MASK); @@ -1831,7 +1828,7 @@ void sh_emulate_unmap_dest(struct vcpu * * - it was aligned to the PTE boundaries; and * - _PAGE_PRESENT was clear before and after the write. */ - shflags = mfn_to_page(sh_ctxt->mfn1)->shadow_flags; + shflags = mfn_to_page(sh_ctxt->mfn[0])->shadow_flags; #if (SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY) if ( sh_ctxt->low_bit_was_clear && !(*(u8 *)addr & _PAGE_PRESENT) @@ -1852,12 +1849,12 @@ void sh_emulate_unmap_dest(struct vcpu * && bytes <= 4)) ) { /* Writes with this alignment constraint can't possibly cross pages. */ - ASSERT(!mfn_valid(sh_ctxt->mfn2)); + ASSERT(!mfn_valid(sh_ctxt->mfn[1])); } else #endif /* SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY */ { - if ( unlikely(mfn_valid(sh_ctxt->mfn2)) ) + if ( unlikely(mfn_valid(sh_ctxt->mfn[1])) ) { /* Validate as two writes, one to each page. */ b1 = PAGE_SIZE - (((unsigned long)addr) & ~PAGE_MASK); @@ -1865,16 +1862,16 @@ void sh_emulate_unmap_dest(struct vcpu * ASSERT(b2 < bytes); } if ( likely(b1 > 0) ) - sh_validate_guest_pt_write(v, sh_ctxt->mfn1, addr, b1); + sh_validate_guest_pt_write(v, sh_ctxt->mfn[0], addr, b1); if ( unlikely(b2 > 0) ) - sh_validate_guest_pt_write(v, sh_ctxt->mfn2, addr + b1, b2); + sh_validate_guest_pt_write(v, sh_ctxt->mfn[1], addr + b1, b2); } - paging_mark_dirty(v->domain, mfn_x(sh_ctxt->mfn1)); + paging_mark_dirty(v->domain, mfn_x(sh_ctxt->mfn[0])); - if ( unlikely(mfn_valid(sh_ctxt->mfn2)) ) + if ( unlikely(mfn_valid(sh_ctxt->mfn[1])) ) { - paging_mark_dirty(v->domain, mfn_x(sh_ctxt->mfn2)); + paging_mark_dirty(v->domain, mfn_x(sh_ctxt->mfn[1])); vunmap((void *)((unsigned long)addr & PAGE_MASK)); } else --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -4615,13 +4615,13 @@ static void emulate_unmap_dest(struct vc u32 bytes, struct sh_emulate_ctxt *sh_ctxt) { - ASSERT(mfn_valid(sh_ctxt->mfn1)); + ASSERT(mfn_valid(sh_ctxt->mfn[0])); /* If we are writing lots of PTE-aligned zeros, might want to unshadow */ if ( likely(bytes >= 4) && (*(u32 *)addr == 0) ) { if ( ((unsigned long) addr & ((sizeof (guest_intpte_t)) - 1)) == 0 ) - check_for_early_unshadow(v, sh_ctxt->mfn1); + check_for_early_unshadow(v, sh_ctxt->mfn[0]); /* Don't reset the heuristic if we're writing zeros at non-aligned * addresses, otherwise it doesn't catch REP MOVSD on PAE guests */ } --- a/xen/arch/x86/mm/shadow/private.h +++ b/xen/arch/x86/mm/shadow/private.h @@ -727,7 +727,7 @@ struct sh_emulate_ctxt { struct segment_register seg_reg[6]; /* MFNs being written to in write/cmpxchg callbacks */ - mfn_t mfn1, mfn2; + mfn_t mfn[2]; #if (SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY) /* Special case for avoiding having to verify writes: remember Reviewed-by: Andrew Cooper --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -1746,11 +1746,11 @@ void *sh_emulate_map_dest(struct vcpu *v struct domain *d = v->domain; void *map; - sh_ctxt->mfn1 = emulate_gva_to_mfn(v, vaddr, sh_ctxt); - if ( !mfn_valid(sh_ctxt->mfn1) ) - return ((mfn_x(sh_ctxt->mfn1) == BAD_GVA_TO_GFN) ? + sh_ctxt->mfn[0] = emulate_gva_to_mfn(v, vaddr, sh_ctxt); + if ( !mfn_valid(sh_ctxt->mfn[0]) ) + return ((mfn_x(sh_ctxt->mfn[0]) == BAD_GVA_TO_GFN) ? MAPPING_EXCEPTION : - (mfn_x(sh_ctxt->mfn1) == READONLY_GFN) ? + (mfn_x(sh_ctxt->mfn[0]) == READONLY_GFN) ? MAPPING_SILENT_FAIL : MAPPING_UNHANDLEABLE); #ifndef NDEBUG @@ -1767,39 +1767,36 @@ void *sh_emulate_map_dest(struct vcpu *v /* Unaligned writes mean probably this isn't a pagetable. */ if ( vaddr & (bytes - 1) ) - sh_remove_shadows(d, sh_ctxt->mfn1, 0, 0 /* Slow, can fail. */ ); + sh_remove_shadows(d, sh_ctxt->mfn[0], 0, 0 /* Slow, can fail. */ ); if ( likely(((vaddr + bytes - 1) & PAGE_MASK) == (vaddr & PAGE_MASK)) ) { /* Whole write fits on a single page. */ - sh_ctxt->mfn2 = _mfn(INVALID_MFN); - map = map_domain_page(sh_ctxt->mfn1) + (vaddr & ~PAGE_MASK); + sh_ctxt->mfn[1] = _mfn(INVALID_MFN); + map = map_domain_page(sh_ctxt->mfn[0]) + (vaddr & ~PAGE_MASK); } - else + else if ( !is_hvm_domain(d) ) { - mfn_t mfns[2]; - /* * Cross-page emulated writes are only supported for HVM guests; * PV guests ought to know better. */ - if ( !is_hvm_domain(d) ) - return MAPPING_UNHANDLEABLE; - + return MAPPING_UNHANDLEABLE; + } + else + { /* This write crosses a page boundary. Translate the second page. */ - sh_ctxt->mfn2 = emulate_gva_to_mfn(v, vaddr + bytes, sh_ctxt); - if ( !mfn_valid(sh_ctxt->mfn2) ) - return ((mfn_x(sh_ctxt->mfn2) == BAD_GVA_TO_GFN) ? + sh_ctxt->mfn[1] = emulate_gva_to_mfn(v, vaddr + bytes, sh_ctxt); + if ( !mfn_valid(sh_ctxt->mfn[1]) ) + return ((mfn_x(sh_ctxt->mfn[1]) == BAD_GVA_TO_GFN) ? MAPPING_EXCEPTION : - (mfn_x(sh_ctxt->mfn2) == READONLY_GFN) ? + (mfn_x(sh_ctxt->mfn[1]) == READONLY_GFN) ? MAPPING_SILENT_FAIL : MAPPING_UNHANDLEABLE); /* Cross-page writes mean probably not a pagetable. */ - sh_remove_shadows(d, sh_ctxt->mfn2, 0, 0 /* Slow, can fail. */ ); + sh_remove_shadows(d, sh_ctxt->mfn[1], 0, 0 /* Slow, can fail. */ ); - mfns[0] = sh_ctxt->mfn1; - mfns[1] = sh_ctxt->mfn2; - map = vmap(mfns, 2); + map = vmap(sh_ctxt->mfn, 2); if ( !map ) return MAPPING_UNHANDLEABLE; map += (vaddr & ~PAGE_MASK); @@ -1831,7 +1828,7 @@ void sh_emulate_unmap_dest(struct vcpu * * - it was aligned to the PTE boundaries; and * - _PAGE_PRESENT was clear before and after the write. */ - shflags = mfn_to_page(sh_ctxt->mfn1)->shadow_flags; + shflags = mfn_to_page(sh_ctxt->mfn[0])->shadow_flags; #if (SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY) if ( sh_ctxt->low_bit_was_clear && !(*(u8 *)addr & _PAGE_PRESENT) @@ -1852,12 +1849,12 @@ void sh_emulate_unmap_dest(struct vcpu * && bytes <= 4)) ) { /* Writes with this alignment constraint can't possibly cross pages. */ - ASSERT(!mfn_valid(sh_ctxt->mfn2)); + ASSERT(!mfn_valid(sh_ctxt->mfn[1])); } else #endif /* SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY */ { - if ( unlikely(mfn_valid(sh_ctxt->mfn2)) ) + if ( unlikely(mfn_valid(sh_ctxt->mfn[1])) ) { /* Validate as two writes, one to each page. */ b1 = PAGE_SIZE - (((unsigned long)addr) & ~PAGE_MASK); @@ -1865,16 +1862,16 @@ void sh_emulate_unmap_dest(struct vcpu * ASSERT(b2 < bytes); } if ( likely(b1 > 0) ) - sh_validate_guest_pt_write(v, sh_ctxt->mfn1, addr, b1); + sh_validate_guest_pt_write(v, sh_ctxt->mfn[0], addr, b1); if ( unlikely(b2 > 0) ) - sh_validate_guest_pt_write(v, sh_ctxt->mfn2, addr + b1, b2); + sh_validate_guest_pt_write(v, sh_ctxt->mfn[1], addr + b1, b2); } - paging_mark_dirty(v->domain, mfn_x(sh_ctxt->mfn1)); + paging_mark_dirty(v->domain, mfn_x(sh_ctxt->mfn[0])); - if ( unlikely(mfn_valid(sh_ctxt->mfn2)) ) + if ( unlikely(mfn_valid(sh_ctxt->mfn[1])) ) { - paging_mark_dirty(v->domain, mfn_x(sh_ctxt->mfn2)); + paging_mark_dirty(v->domain, mfn_x(sh_ctxt->mfn[1])); vunmap((void *)((unsigned long)addr & PAGE_MASK)); } else --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -4615,13 +4615,13 @@ static void emulate_unmap_dest(struct vc u32 bytes, struct sh_emulate_ctxt *sh_ctxt) { - ASSERT(mfn_valid(sh_ctxt->mfn1)); + ASSERT(mfn_valid(sh_ctxt->mfn[0])); /* If we are writing lots of PTE-aligned zeros, might want to unshadow */ if ( likely(bytes >= 4) && (*(u32 *)addr == 0) ) { if ( ((unsigned long) addr & ((sizeof (guest_intpte_t)) - 1)) == 0 ) - check_for_early_unshadow(v, sh_ctxt->mfn1); + check_for_early_unshadow(v, sh_ctxt->mfn[0]); /* Don't reset the heuristic if we're writing zeros at non-aligned * addresses, otherwise it doesn't catch REP MOVSD on PAE guests */ } --- a/xen/arch/x86/mm/shadow/private.h +++ b/xen/arch/x86/mm/shadow/private.h @@ -727,7 +727,7 @@ struct sh_emulate_ctxt { struct segment_register seg_reg[6]; /* MFNs being written to in write/cmpxchg callbacks */ - mfn_t mfn1, mfn2; + mfn_t mfn[2]; #if (SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY) /* Special case for avoiding having to verify writes: remember