From patchwork Sun Mar 17 19:35:57 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Igor Druzhinin X-Patchwork-Id: 10856537 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 6481A15AC for ; Sun, 17 Mar 2019 19:38:30 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 502122924B for ; Sun, 17 Mar 2019 19:38:30 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 41B78292CD; Sun, 17 Mar 2019 19:38:30 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-5.2 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI, RCVD_IN_DNSWL_MED autolearn=ham version=3.3.1 Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.wl.linuxfoundation.org (Postfix) with ESMTPS id CD6132924B for ; Sun, 17 Mar 2019 19:38:29 +0000 (UTC) Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1h5bZp-0005Ly-Mz; Sun, 17 Mar 2019 19:36:09 +0000 Received: from all-amaz-eas1.inumbo.com ([34.197.232.57] helo=us1-amaz-eas2.inumbo.com) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1h5bZo-0005Ls-OA for xen-devel@lists.xenproject.org; Sun, 17 Mar 2019 19:36:08 +0000 X-Inumbo-ID: e679cbe6-48eb-11e9-85a3-87e5b2872144 Received: from SMTP03.CITRIX.COM (unknown [162.221.156.55]) by us1-amaz-eas2.inumbo.com (Halon) with ESMTPS id e679cbe6-48eb-11e9-85a3-87e5b2872144; Sun, 17 Mar 2019 19:36:03 +0000 (UTC) X-IronPort-AV: E=Sophos;i="5.58,491,1544486400"; d="scan'208";a="80809457" From: Igor Druzhinin To: Date: Sun, 17 Mar 2019 19:35:57 +0000 Message-ID: <1552851358-27178-1-git-send-email-igor.druzhinin@citrix.com> X-Mailer: git-send-email 2.7.4 MIME-Version: 1.0 Subject: [Xen-devel] [PATCH v4 1/2] x86/hvm: split all linear reads and writes at page boundary X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Igor Druzhinin , wei.liu2@citrix.com, andrew.cooper3@citrix.com, paul.durrant@citrix.com, jbeulich@suse.com, roger.pau@citrix.com Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" X-Virus-Scanned: ClamAV using ClamSMTP Ruling out page straddling at linear level makes it easier to distinguish chunks that require proper handling as MMIO access and not complete them as page straddling memory transactions prematurely. This doesn't change the general behavior. Reviewed-by: Paul Durrant Reviewed-by: Jan Beulich Signed-off-by: Igor Druzhinin --- xen/arch/x86/hvm/emulate.c | 70 +++++++++++++++++++++++++--------------------- 1 file changed, 38 insertions(+), 32 deletions(-) diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index 754baf6..c236e7d 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -1089,12 +1089,25 @@ static int linear_read(unsigned long addr, unsigned int bytes, void *p_data, uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt) { pagefault_info_t pfinfo; - int rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo); + unsigned int offset = addr & ~PAGE_MASK; + int rc; - switch ( rc ) + if ( offset + bytes > PAGE_SIZE ) { - unsigned int offset, part1; + unsigned int part1 = PAGE_SIZE - offset; + + /* Split the access at the page boundary. */ + rc = linear_read(addr, part1, p_data, pfec, hvmemul_ctxt); + if ( rc == X86EMUL_OKAY ) + rc = linear_read(addr + part1, bytes - part1, p_data + part1, + pfec, hvmemul_ctxt); + return rc; + } + + rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo); + switch ( rc ) + { case HVMTRANS_okay: return X86EMUL_OKAY; @@ -1106,19 +1119,9 @@ static int linear_read(unsigned long addr, unsigned int bytes, void *p_data, if ( pfec & PFEC_insn_fetch ) return X86EMUL_UNHANDLEABLE; - offset = addr & ~PAGE_MASK; - if ( offset + bytes <= PAGE_SIZE ) - return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, - hvmemul_ctxt, - known_gla(addr, bytes, pfec)); - - /* Split the access at the page boundary. */ - part1 = PAGE_SIZE - offset; - rc = linear_read(addr, part1, p_data, pfec, hvmemul_ctxt); - if ( rc == X86EMUL_OKAY ) - rc = linear_read(addr + part1, bytes - part1, p_data + part1, - pfec, hvmemul_ctxt); - return rc; + return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, + hvmemul_ctxt, + known_gla(addr, bytes, pfec)); case HVMTRANS_gfn_paged_out: case HVMTRANS_gfn_shared: @@ -1132,12 +1135,25 @@ static int linear_write(unsigned long addr, unsigned int bytes, void *p_data, uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt) { pagefault_info_t pfinfo; - int rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo); + unsigned int offset = addr & ~PAGE_MASK; + int rc; - switch ( rc ) + if ( offset + bytes > PAGE_SIZE ) { - unsigned int offset, part1; + unsigned int part1 = PAGE_SIZE - offset; + + /* Split the access at the page boundary. */ + rc = linear_write(addr, part1, p_data, pfec, hvmemul_ctxt); + if ( rc == X86EMUL_OKAY ) + rc = linear_write(addr + part1, bytes - part1, p_data + part1, + pfec, hvmemul_ctxt); + return rc; + } + + rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo); + switch ( rc ) + { case HVMTRANS_okay: return X86EMUL_OKAY; @@ -1146,19 +1162,9 @@ static int linear_write(unsigned long addr, unsigned int bytes, void *p_data, return X86EMUL_EXCEPTION; case HVMTRANS_bad_gfn_to_mfn: - offset = addr & ~PAGE_MASK; - if ( offset + bytes <= PAGE_SIZE ) - return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, - hvmemul_ctxt, - known_gla(addr, bytes, pfec)); - - /* Split the access at the page boundary. */ - part1 = PAGE_SIZE - offset; - rc = linear_write(addr, part1, p_data, pfec, hvmemul_ctxt); - if ( rc == X86EMUL_OKAY ) - rc = linear_write(addr + part1, bytes - part1, p_data + part1, - pfec, hvmemul_ctxt); - return rc; + return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, + hvmemul_ctxt, + known_gla(addr, bytes, pfec)); case HVMTRANS_gfn_paged_out: case HVMTRANS_gfn_shared: