@@ -1522,7 +1522,7 @@ int sh_remove_all_mappings(struct domain
&& (page->count_info & PGC_count_mask) <= 3
&& ((page->u.inuse.type_info & PGT_count_mask)
== (is_special_page(page) ||
- (is_hvm_domain(d) && is_ioreq_server_page(d, page))))) )
+ is_ioreq_server_page(d, page)))) )
printk(XENLOG_G_ERR "can't find all mappings of mfn %"PRI_mfn
" (gfn %"PRI_gfn"): c=%lx t=%lx s=%d i=%d\n",
mfn_x(gmfn), gfn_x(gfn),
@@ -204,10 +204,6 @@ hvm_emulate_write(enum x86_segment seg,
if ( rc || !bytes )
return rc;
- /* Unaligned writes are only acceptable on HVM */
- if ( (addr & (bytes - 1)) && !is_hvm_vcpu(v) )
- return X86EMUL_UNHANDLEABLE;
-
ptr = sh_emulate_map_dest(v, addr, bytes, sh_ctxt);
if ( IS_ERR(ptr) )
return ~PTR_ERR(ptr);
@@ -258,10 +254,6 @@ hvm_emulate_cmpxchg(enum x86_segment seg
if ( rc )
return rc;
- /* Unaligned writes are only acceptable on HVM */
- if ( (addr & (bytes - 1)) && !is_hvm_vcpu(v) )
- return X86EMUL_UNHANDLEABLE;
-
ptr = sh_emulate_map_dest(v, addr, bytes, sh_ctxt);
if ( IS_ERR(ptr) )
return ~PTR_ERR(ptr);
@@ -457,8 +449,7 @@ static void *sh_emulate_map_dest(struct
#ifndef NDEBUG
/* We don't emulate user-mode writes to page tables. */
- if ( is_hvm_domain(d) ? hvm_get_cpl(v) == 3
- : !guest_kernel_mode(v, guest_cpu_user_regs()) )
+ if ( hvm_get_cpl(v) == 3 )
{
gdprintk(XENLOG_DEBUG, "User-mode write to pagetable reached "
"emulate_map_dest(). This should never happen!\n");
@@ -487,15 +478,6 @@ static void *sh_emulate_map_dest(struct
sh_ctxt->mfn[1] = INVALID_MFN;
map = map_domain_page(sh_ctxt->mfn[0]) + (vaddr & ~PAGE_MASK);
}
- else if ( !is_hvm_domain(d) )
- {
- /*
- * Cross-page emulated writes are only supported for HVM guests;
- * PV guests ought to know better.
- */
- put_page(mfn_to_page(sh_ctxt->mfn[0]));
- return MAPPING_UNHANDLEABLE;
- }
else
{
/* This write crosses a page boundary. Translate the second page. */
@@ -3438,7 +3438,7 @@ int sh_rm_write_access_from_sl1p(struct
ASSERT(mfn_valid(smfn));
/* Remember if we've been told that this process is being torn down */
- if ( curr->domain == d && is_hvm_domain(d) )
+ if ( curr->domain == d )
curr->arch.paging.shadow.pagetable_dying
= mfn_to_page(gmfn)->pagetable_dying;
@@ -580,7 +580,6 @@ int sh_unsync(struct vcpu *v, mfn_t gmfn
if ( (pg->shadow_flags &
((SHF_page_type_mask & ~SHF_L1_ANY) | SHF_out_of_sync)) ||
sh_page_has_multiple_shadows(pg) ||
- !is_hvm_vcpu(v) ||
!v->domain->arch.paging.shadow.oos_active )
return 0;
Emulation related functions are involved in HVM handling only, and in some cases they even invoke such checks after having already done things which are valid for HVM domains only. OOS active also implies HVM. In sh_remove_all_mappings() one of the two checks is redundant with an earlier paging_mode_external() one (the other, however, needs to stay). Signed-off-by: Jan Beulich <jbeulich@suse.com> --- v2: Re-base over changes/additions earlier in the series.