@@ -546,17 +546,13 @@ void hvm_destroy_cacheattr_region_list(
int hvm_get_mem_pinned_cacheattr(
struct domain *d,
uint64_t guest_fn,
- unsigned int order,
- uint32_t *type)
+ unsigned int order)
{
struct hvm_mem_pinned_cacheattr_range *range;
uint64_t mask = ~(uint64_t)0 << order;
- int rc = 0;
+ int rc = -ENXIO;
- *type = ~0;
-
- if ( !is_hvm_domain(d) )
- return 0;
+ ASSERT(has_hvm_container_domain(d));
rcu_read_lock(&pinned_cacheattr_rcu_lock);
list_for_each_entry_rcu ( range,
@@ -566,14 +562,13 @@ int hvm_get_mem_pinned_cacheattr(
if ( ((guest_fn & mask) >= range->start) &&
((guest_fn | ~mask) <= range->end) )
{
- *type = range->type;
- rc = 1;
+ rc = range->type;
break;
}
if ( ((guest_fn & mask) <= range->end) &&
(range->start <= (guest_fn | ~mask)) )
{
- rc = -1;
+ rc = -EADDRNOTAVAIL;
break;
}
}
@@ -762,7 +757,6 @@ int epte_get_entry_emt(struct domain *d,
unsigned int order, uint8_t *ipat, bool_t direct_mmio)
{
int gmtrr_mtype, hmtrr_mtype;
- uint32_t type;
struct vcpu *v = current;
*ipat = 0;
@@ -798,14 +792,15 @@ int epte_get_entry_emt(struct domain *d,
return MTRR_TYPE_WRBACK;
}
- switch ( hvm_get_mem_pinned_cacheattr(d, gfn, order, &type) )
+ gmtrr_mtype = hvm_get_mem_pinned_cacheattr(d, gfn, order);
+ if ( gmtrr_mtype >= 0 )
{
- case 1:
*ipat = 1;
- return type != PAT_TYPE_UC_MINUS ? type : PAT_TYPE_UNCACHABLE;
- case -1:
- return -1;
+ return gmtrr_mtype != PAT_TYPE_UC_MINUS ? gmtrr_mtype
+ : MTRR_TYPE_UNCACHABLE;
}
+ if ( gmtrr_mtype == -EADDRNOTAVAIL )
+ return -1;
gmtrr_mtype = is_hvm_domain(d) && v ?
get_mtrr_type(&v->arch.hvm_vcpu.mtrr,
@@ -607,7 +607,7 @@ _sh_propagate(struct vcpu *v,
if ( (level == 1) && is_hvm_domain(d) &&
!is_xen_heap_mfn(mfn_x(target_mfn)) )
{
- unsigned int type;
+ int type;
ASSERT(!(sflags & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)));
@@ -619,7 +619,8 @@ _sh_propagate(struct vcpu *v,
* gMTRR and gPAT.
*/
if ( !mmio_mfn &&
- hvm_get_mem_pinned_cacheattr(d, gfn_x(target_gfn), 0, &type) )
+ (type = hvm_get_mem_pinned_cacheattr(d, gfn_x(target_gfn),
+ 0)) >= 0 )
sflags |= pat_type_2_pte_flags(type);
else if ( d->arch.hvm_domain.is_in_uc_mode )
sflags |= pat_type_2_pte_flags(PAT_TYPE_UNCACHABLE);
@@ -15,8 +15,7 @@ void hvm_destroy_cacheattr_region_list(
int hvm_get_mem_pinned_cacheattr(
struct domain *d,
uint64_t guest_fn,
- unsigned int order,
- uint32_t *type);
+ unsigned int order);
/* Set pinned caching type for a domain. */