@@ -545,7 +545,7 @@ void hvm_destroy_cacheattr_region_list(
int hvm_get_mem_pinned_cacheattr(
struct domain *d,
- uint64_t guest_fn,
+ gfn_t gfn,
unsigned int order)
{
struct hvm_mem_pinned_cacheattr_range *range;
@@ -559,14 +559,14 @@ int hvm_get_mem_pinned_cacheattr(
&d->arch.hvm_domain.pinned_cacheattr_ranges,
list )
{
- if ( ((guest_fn & mask) >= range->start) &&
- ((guest_fn | ~mask) <= range->end) )
+ if ( ((gfn_x(gfn) & mask) >= range->start) &&
+ ((gfn_x(gfn) | ~mask) <= range->end) )
{
rc = range->type;
break;
}
- if ( ((guest_fn & mask) <= range->end) &&
- (range->start <= (guest_fn | ~mask)) )
+ if ( ((gfn_x(gfn) & mask) <= range->end) &&
+ ((gfn_x(gfn) | ~mask) >= range->start) )
{
rc = -EADDRNOTAVAIL;
break;
@@ -808,7 +808,7 @@ int epte_get_entry_emt(struct domain *d,
return MTRR_TYPE_WRBACK;
}
- gmtrr_mtype = hvm_get_mem_pinned_cacheattr(d, gfn, order);
+ gmtrr_mtype = hvm_get_mem_pinned_cacheattr(d, _gfn(gfn), order);
if ( gmtrr_mtype >= 0 )
{
*ipat = 1;
@@ -619,8 +619,7 @@ _sh_propagate(struct vcpu *v,
* gMTRR and gPAT.
*/
if ( !mmio_mfn &&
- (type = hvm_get_mem_pinned_cacheattr(d, gfn_x(target_gfn),
- 0)) >= 0 )
+ (type = hvm_get_mem_pinned_cacheattr(d, target_gfn, 0)) >= 0 )
sflags |= pat_type_2_pte_flags(type);
else if ( d->arch.hvm_domain.is_in_uc_mode )
sflags |= pat_type_2_pte_flags(PAT_TYPE_UNCACHABLE);
@@ -1,20 +1,22 @@
#ifndef __HVM_CACHEATTR_H__
#define __HVM_CACHEATTR_H__
+#include <xen/mm.h>
+
void hvm_init_cacheattr_region_list(
struct domain *d);
void hvm_destroy_cacheattr_region_list(
struct domain *d);
/*
- * To see guest_fn is in the pinned range or not,
+ * Check whether gfn is in the pinned range:
* if yes, return 1, and set type to value in this range
* if no, return 0, setting type to ~0
* if ambiguous, return -1, setting type to ~0 (possible only for order > 0)
*/
int hvm_get_mem_pinned_cacheattr(
struct domain *d,
- uint64_t guest_fn,
+ gfn_t gfn,
unsigned int order);