@@ -1146,7 +1146,7 @@ static int linear_read(unsigned long add
pagefault_info_t pfinfo;
struct hvm_vcpu_io *hvio = ¤t->arch.hvm.hvm_io;
unsigned int offset = addr & ~PAGE_MASK;
- int rc = HVMTRANS_bad_gfn_to_mfn;
+ int rc;
if ( offset + bytes > PAGE_SIZE )
{
@@ -1154,12 +1154,16 @@ static int linear_read(unsigned long add
/* Split the access at the page boundary. */
rc = linear_read(addr, part1, p_data, pfec, hvmemul_ctxt);
- if ( rc == X86EMUL_OKAY )
- rc = linear_read(addr + part1, bytes - part1, p_data + part1,
- pfec, hvmemul_ctxt);
- return rc;
+ if ( rc != X86EMUL_OKAY )
+ return rc;
+
+ addr += part1;
+ bytes -= part1;
+ p_data += part1;
}
+ rc = HVMTRANS_bad_gfn_to_mfn;
+
/*
* If there is an MMIO cache entry for the access then we must be re-issuing
* an access that was previously handled as MMIO. Thus it is imperative that
@@ -1201,7 +1205,7 @@ static int linear_write(unsigned long ad
pagefault_info_t pfinfo;
struct hvm_vcpu_io *hvio = ¤t->arch.hvm.hvm_io;
unsigned int offset = addr & ~PAGE_MASK;
- int rc = HVMTRANS_bad_gfn_to_mfn;
+ int rc;
if ( offset + bytes > PAGE_SIZE )
{
@@ -1209,12 +1213,16 @@ static int linear_write(unsigned long ad
/* Split the access at the page boundary. */
rc = linear_write(addr, part1, p_data, pfec, hvmemul_ctxt);
- if ( rc == X86EMUL_OKAY )
- rc = linear_write(addr + part1, bytes - part1, p_data + part1,
- pfec, hvmemul_ctxt);
- return rc;
+ if ( rc != X86EMUL_OKAY )
+ return rc;
+
+ addr += part1;
+ bytes -= part1;
+ p_data += part1;
}
+ rc = HVMTRANS_bad_gfn_to_mfn;
+
/*
* If there is an MMIO cache entry for the access then we must be re-issuing
* an access that was previously handled as MMIO. Thus it is imperative that
Let's make explicit what the compiler may or may not do on our behalf: The 2nd of the recursive invocations each can fall through rather than re-invoking the function. This will save us from adding yet another parameter (or more) to the function, just for the recursive invocations. Signed-off-by: Jan Beulich <jbeulich@suse.com>