@@ -307,12 +307,12 @@ int hvm_set_guest_pat(struct vcpu *v, uint64_t guest_pat)
for ( i = 0, tmp = guest_pat; i < 8; i++, tmp >>= 8 )
switch ( tmp & 0xff )
{
- case PAT_TYPE_UC_MINUS:
- case PAT_TYPE_UNCACHABLE:
- case PAT_TYPE_WRBACK:
- case PAT_TYPE_WRCOMB:
- case PAT_TYPE_WRPROT:
- case PAT_TYPE_WRTHROUGH:
+ case X86_MT_UCM:
+ case X86_MT_UC:
+ case X86_MT_WB:
+ case X86_MT_WC:
+ case X86_MT_WP:
+ case X86_MT_WT:
break;
default:
return 0;
@@ -37,7 +37,7 @@ static const uint8_t pat_entry_2_pte_flags[8] = {
_PAGE_PAT | _PAGE_PCD, _PAGE_PAT | _PAGE_PCD | _PAGE_PWT };
/* Effective mm type lookup table, according to MTRR and PAT. */
-static const uint8_t mm_type_tbl[MTRR_NUM_TYPES][PAT_TYPE_NUMS] = {
+static const uint8_t mm_type_tbl[MTRR_NUM_TYPES][X86_NUM_MT] = {
#define RS MEMORY_NUM_TYPES
#define UC MTRR_TYPE_UNCACHABLE
#define WB MTRR_TYPE_WRBACK
@@ -72,8 +72,8 @@ static uint8_t __read_mostly mtrr_epat_tbl[MTRR_NUM_TYPES][MEMORY_NUM_TYPES] =
};
/* Lookup table for PAT entry of a given PAT value in host PAT. */
-static uint8_t __read_mostly pat_entry_tbl[PAT_TYPE_NUMS] =
- { [0 ... PAT_TYPE_NUMS-1] = INVALID_MEM_TYPE };
+static uint8_t __read_mostly pat_entry_tbl[X86_NUM_MT] =
+ { [0 ... X86_NUM_MT - 1] = INVALID_MEM_TYPE };
static int __init cf_check hvm_mtrr_pat_init(void)
{
@@ -81,7 +81,7 @@ static int __init cf_check hvm_mtrr_pat_init(void)
for ( i = 0; i < MTRR_NUM_TYPES; i++ )
{
- for ( j = 0; j < PAT_TYPE_NUMS; j++ )
+ for ( j = 0; j < X86_NUM_MT; j++ )
{
unsigned int tmp = mm_type_tbl[i][j];
@@ -90,9 +90,9 @@ static int __init cf_check hvm_mtrr_pat_init(void)
}
}
- for ( i = 0; i < PAT_TYPE_NUMS; i++ )
+ for ( i = 0; i < X86_NUM_MT; i++ )
{
- for ( j = 0; j < PAT_TYPE_NUMS; j++ )
+ for ( j = 0; j < X86_NUM_MT; j++ )
{
if ( pat_cr_2_paf(XEN_MSR_PAT, j) == i )
{
@@ -115,7 +115,7 @@ uint8_t pat_type_2_pte_flags(uint8_t pat_type)
* given pat_type. If host PAT covers all the PAT types, it can't happen.
*/
if ( unlikely(pat_entry == INVALID_MEM_TYPE) )
- pat_entry = pat_entry_tbl[PAT_TYPE_UNCACHABLE];
+ pat_entry = pat_entry_tbl[X86_MT_UC];
return pat_entry_2_pte_flags[pat_entry];
}
@@ -145,14 +145,14 @@ int hvm_vcpu_cacheattr_init(struct vcpu *v)
m->mtrr_cap = (1u << 10) | (1u << 8) | num_var_ranges;
v->arch.hvm.pat_cr =
- ((uint64_t)PAT_TYPE_WRBACK) | /* PAT0: WB */
- ((uint64_t)PAT_TYPE_WRTHROUGH << 8) | /* PAT1: WT */
- ((uint64_t)PAT_TYPE_UC_MINUS << 16) | /* PAT2: UC- */
- ((uint64_t)PAT_TYPE_UNCACHABLE << 24) | /* PAT3: UC */
- ((uint64_t)PAT_TYPE_WRBACK << 32) | /* PAT4: WB */
- ((uint64_t)PAT_TYPE_WRTHROUGH << 40) | /* PAT5: WT */
- ((uint64_t)PAT_TYPE_UC_MINUS << 48) | /* PAT6: UC- */
- ((uint64_t)PAT_TYPE_UNCACHABLE << 56); /* PAT7: UC */
+ ((uint64_t)X86_MT_WB) | /* PAT0: WB */
+ ((uint64_t)X86_MT_WT << 8) | /* PAT1: WT */
+ ((uint64_t)X86_MT_UCM << 16) | /* PAT2: UC- */
+ ((uint64_t)X86_MT_UC << 24) | /* PAT3: UC */
+ ((uint64_t)X86_MT_WB << 32) | /* PAT4: WB */
+ ((uint64_t)X86_MT_WT << 40) | /* PAT5: WT */
+ ((uint64_t)X86_MT_UCM << 48) | /* PAT6: UC- */
+ ((uint64_t)X86_MT_UC << 56); /* PAT7: UC */
if ( is_hardware_domain(v->domain) )
{
@@ -356,7 +356,7 @@ uint32_t get_pat_flags(struct vcpu *v,
*/
pat_entry_value = mtrr_epat_tbl[shadow_mtrr_type][guest_eff_mm_type];
/* If conflit occurs(e.g host MTRR is UC, guest memory type is
- * WB),set UC as effective memory. Here, returning PAT_TYPE_UNCACHABLE will
+ * WB), set UC as effective memory. Here, returning X86_MT_UC will
* always set effective memory as UC.
*/
if ( pat_entry_value == INVALID_MEM_TYPE )
@@ -371,7 +371,7 @@ uint32_t get_pat_flags(struct vcpu *v,
"because the host mtrr type is:%d\n",
gl1e_flags, (uint64_t)gpaddr, guest_eff_mm_type,
shadow_mtrr_type);
- pat_entry_value = PAT_TYPE_UNCACHABLE;
+ pat_entry_value = X86_MT_UC;
}
/* 4. Get the pte flags */
return pat_type_2_pte_flags(pat_entry_value);
@@ -620,13 +620,13 @@ int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start,
p2m_memory_type_changed(d);
switch ( type )
{
- case PAT_TYPE_UC_MINUS:
+ case X86_MT_UCM:
/*
* For EPT we can also avoid the flush in this case;
* see epte_get_entry_emt().
*/
if ( hap_enabled(d) && cpu_has_vmx )
- case PAT_TYPE_UNCACHABLE:
+ case X86_MT_UC:
break;
/* fall through */
default:
@@ -638,12 +638,12 @@ int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start,
rcu_read_unlock(&pinned_cacheattr_rcu_lock);
return -ENOENT;
- case PAT_TYPE_UC_MINUS:
- case PAT_TYPE_UNCACHABLE:
- case PAT_TYPE_WRBACK:
- case PAT_TYPE_WRCOMB:
- case PAT_TYPE_WRPROT:
- case PAT_TYPE_WRTHROUGH:
+ case X86_MT_UCM:
+ case X86_MT_UC:
+ case X86_MT_WB:
+ case X86_MT_WC:
+ case X86_MT_WP:
+ case X86_MT_WT:
break;
default:
@@ -681,7 +681,7 @@ int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start,
list_add_rcu(&range->list, &d->arch.hvm.pinned_cacheattr_ranges);
p2m_memory_type_changed(d);
- if ( type != PAT_TYPE_WRBACK )
+ if ( type != X86_MT_WB )
flush_all(FLUSH_CACHE);
return 0;
@@ -1231,14 +1231,14 @@ static void cf_check vmx_handle_cd(struct vcpu *v, unsigned long value)
* memory type are all UC.
*/
u64 uc_pat =
- ((uint64_t)PAT_TYPE_UNCACHABLE) | /* PAT0 */
- ((uint64_t)PAT_TYPE_UNCACHABLE << 8) | /* PAT1 */
- ((uint64_t)PAT_TYPE_UNCACHABLE << 16) | /* PAT2 */
- ((uint64_t)PAT_TYPE_UNCACHABLE << 24) | /* PAT3 */
- ((uint64_t)PAT_TYPE_UNCACHABLE << 32) | /* PAT4 */
- ((uint64_t)PAT_TYPE_UNCACHABLE << 40) | /* PAT5 */
- ((uint64_t)PAT_TYPE_UNCACHABLE << 48) | /* PAT6 */
- ((uint64_t)PAT_TYPE_UNCACHABLE << 56); /* PAT7 */
+ ((uint64_t)X86_MT_UC) | /* PAT0 */
+ ((uint64_t)X86_MT_UC << 8) | /* PAT1 */
+ ((uint64_t)X86_MT_UC << 16) | /* PAT2 */
+ ((uint64_t)X86_MT_UC << 24) | /* PAT3 */
+ ((uint64_t)X86_MT_UC << 32) | /* PAT4 */
+ ((uint64_t)X86_MT_UC << 40) | /* PAT5 */
+ ((uint64_t)X86_MT_UC << 48) | /* PAT6 */
+ ((uint64_t)X86_MT_UC << 56); /* PAT7 */
vmx_get_guest_pat(v, pat);
vmx_set_guest_pat(v, uc_pat);
@@ -16,17 +16,7 @@
#define NORMAL_CACHE_MODE 0
#define NO_FILL_CACHE_MODE 2
-enum {
- PAT_TYPE_UNCACHABLE=0,
- PAT_TYPE_WRCOMB=1,
- PAT_TYPE_WRTHROUGH=4,
- PAT_TYPE_WRPROT=5,
- PAT_TYPE_WRBACK=6,
- PAT_TYPE_UC_MINUS=7,
- PAT_TYPE_NUMS
-};
-
-#define INVALID_MEM_TYPE PAT_TYPE_NUMS
+#define INVALID_MEM_TYPE X86_NUM_MT
/* In the Intel processor's MTRR interface, the MTRR type is always held in
an 8 bit field: */
@@ -573,8 +573,8 @@ int epte_get_entry_emt(struct domain *d, gfn_t gfn, mfn_t mfn,
if ( gmtrr_mtype >= 0 )
{
*ipat = true;
- return gmtrr_mtype != PAT_TYPE_UC_MINUS ? gmtrr_mtype
- : MTRR_TYPE_UNCACHABLE;
+ return gmtrr_mtype != X86_MT_UCM ? gmtrr_mtype
+ : MTRR_TYPE_UNCACHABLE;
}
if ( gmtrr_mtype == -EADDRNOTAVAIL )
return -1;
@@ -561,7 +561,7 @@ _sh_propagate(struct vcpu *v,
(type = hvm_get_mem_pinned_cacheattr(d, target_gfn, 0)) >= 0 )
sflags |= pat_type_2_pte_flags(type);
else if ( d->arch.hvm.is_in_uc_mode )
- sflags |= pat_type_2_pte_flags(PAT_TYPE_UNCACHABLE);
+ sflags |= pat_type_2_pte_flags(X86_MT_UC);
else
if ( iomem_access_permitted(d, mfn_x(target_mfn), mfn_x(target_mfn)) )
{
@@ -572,7 +572,7 @@ _sh_propagate(struct vcpu *v,
mfn_to_maddr(target_mfn),
MTRR_TYPE_UNCACHABLE);
else if ( iommu_snoop )
- sflags |= pat_type_2_pte_flags(PAT_TYPE_WRBACK);
+ sflags |= pat_type_2_pte_flags(X86_MT_WB);
else
sflags |= get_pat_flags(v,
gflags,