@@ -829,6 +829,8 @@ int arch_domain_create(struct domain *d,
psr_domain_init(d);
+ mapcache_domain_init(d);
+
if ( is_hvm_domain(d) )
{
if ( (rc = hvm_domain_initialise(d, config)) != 0 )
@@ -836,8 +838,6 @@ int arch_domain_create(struct domain *d,
}
else if ( is_pv_domain(d) )
{
- mapcache_domain_init(d);
-
if ( (rc = pv_domain_initialise(d)) != 0 )
goto fail;
}
@@ -82,11 +82,11 @@ void *map_domain_page(mfn_t mfn)
#endif
v = mapcache_current_vcpu();
- if ( !v || !is_pv_vcpu(v) )
+ if ( !v )
return mfn_to_virt(mfn_x(mfn));
- dcache = &v->domain->arch.pv.mapcache;
- vcache = &v->arch.pv.mapcache;
+ dcache = &v->domain->arch.mapcache;
+ vcache = &v->arch.mapcache;
if ( !dcache->inuse )
return mfn_to_virt(mfn_x(mfn));
@@ -187,14 +187,14 @@ void unmap_domain_page(const void *ptr)
ASSERT(va >= MAPCACHE_VIRT_START && va < MAPCACHE_VIRT_END);
v = mapcache_current_vcpu();
- ASSERT(v && is_pv_vcpu(v));
+ ASSERT(v);
- dcache = &v->domain->arch.pv.mapcache;
+ dcache = &v->domain->arch.mapcache;
ASSERT(dcache->inuse);
idx = PFN_DOWN(va - MAPCACHE_VIRT_START);
mfn = l1e_get_pfn(MAPCACHE_L1ENT(idx));
- hashent = &v->arch.pv.mapcache.hash[MAPHASH_HASHFN(mfn)];
+ hashent = &v->arch.mapcache.hash[MAPHASH_HASHFN(mfn)];
local_irq_save(flags);
@@ -233,11 +233,9 @@ void unmap_domain_page(const void *ptr)
int mapcache_domain_init(struct domain *d)
{
- struct mapcache_domain *dcache = &d->arch.pv.mapcache;
+ struct mapcache_domain *dcache = &d->arch.mapcache;
unsigned int bitmap_pages;
- ASSERT(is_pv_domain(d));
-
#ifdef NDEBUG
if ( !mem_hotplug && max_page <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
return 0;
@@ -261,12 +259,12 @@ int mapcache_domain_init(struct domain *d)
int mapcache_vcpu_init(struct vcpu *v)
{
struct domain *d = v->domain;
- struct mapcache_domain *dcache = &d->arch.pv.mapcache;
+ struct mapcache_domain *dcache = &d->arch.mapcache;
unsigned long i;
unsigned int ents = d->max_vcpus * MAPCACHE_VCPU_ENTRIES;
unsigned int nr = PFN_UP(BITS_TO_LONGS(ents) * sizeof(long));
- if ( !is_pv_vcpu(v) || !dcache->inuse )
+ if ( !dcache->inuse )
return 0;
if ( ents > dcache->entries )
@@ -293,7 +291,7 @@ int mapcache_vcpu_init(struct vcpu *v)
BUILD_BUG_ON(MAPHASHENT_NOTINUSE < MAPCACHE_ENTRIES);
for ( i = 0; i < MAPHASH_ENTRIES; i++ )
{
- struct vcpu_maphash_entry *hashent = &v->arch.pv.mapcache.hash[i];
+ struct vcpu_maphash_entry *hashent = &v->arch.mapcache.hash[i];
hashent->mfn = ~0UL; /* never valid to map */
hashent->idx = MAPHASHENT_NOTINUSE;
@@ -285,9 +285,6 @@ struct pv_domain
/* Mitigate L1TF with shadow/crashing? */
bool check_l1tf;
- /* map_domain_page() mapping cache. */
- struct mapcache_domain mapcache;
-
struct cpuidmasks *cpuidmasks;
};
@@ -326,6 +323,9 @@ struct arch_domain
uint8_t spec_ctrl_flags; /* See SCF_DOM_MASK */
+ /* map_domain_page() mapping cache. */
+ struct mapcache_domain mapcache;
+
union {
struct pv_domain pv;
struct hvm_domain hvm;
@@ -508,9 +508,6 @@ struct arch_domain
struct pv_vcpu
{
- /* map_domain_page() mapping cache. */
- struct mapcache_vcpu mapcache;
-
unsigned int vgc_flags;
struct trap_info *trap_ctxt;
@@ -610,6 +607,9 @@ struct arch_vcpu
#define async_exception_state(t) async_exception_state[(t)-1]
uint8_t async_exception_mask;
+ /* map_domain_page() mapping cache. */
+ struct mapcache_vcpu mapcache;
+
/* Virtual Machine Extensions */
union {
struct pv_vcpu pv;