@@ -1645,10 +1645,8 @@ static int cf_check hvmemul_cmpxchg(
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- struct vcpu *curr = current;
unsigned long addr;
uint32_t pfec = PFEC_page_present | PFEC_write_access;
- struct hvm_vcpu_io *hvio = &curr->arch.hvm.hvm_io;
int rc;
void *mapping = NULL;
@@ -1672,10 +1670,7 @@ static int cf_check hvmemul_cmpxchg(
if ( !mapping )
{
/* Fix this in case the guest is really relying on r-m-w atomicity. */
- return hvmemul_linear_mmio_write(addr, bytes, p_new, pfec,
- hvmemul_ctxt, addr,
- hvio->mmio_access.write_access &&
- hvio->mmio_gla == (addr & PAGE_MASK));
+ return linear_write(addr, bytes, p_new, pfec, hvmemul_ctxt);
}
switch ( bytes )
Using hvmemul_linear_mmio_write() directly (as fallback when mapping the memory operand isn't possible) won't work properly when the access crosses a RAM/MMIO boundary. Use linear_write() instead, which splits at such boundaries as necessary. Signed-off-by: Jan Beulich <jbeulich@suse.com>