@@ -4887,10 +4887,10 @@ bool asmlinkage vmx_vmenter_helper(const struct cpu_user_regs *regs)
for ( i = 0; i < MAX_ALTP2M; ++i )
{
- if ( currd->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
+ if ( altp2m_get_eptp(currd, i) == mfn_x(INVALID_MFN) )
continue;
- ept = &currd->arch.altp2m_p2m[i]->ept;
+ ept = &altp2m_get_p2m(currd, i)->ept;
if ( cpumask_test_cpu(cpu, ept->invalidate) )
{
cpumask_clear_cpu(cpu, ept->invalidate);
@@ -19,6 +19,38 @@ static inline bool altp2m_active(const struct domain *d)
return d->arch.altp2m_active;
}
+static inline struct p2m_domain *altp2m_get_p2m(const struct domain* d,
+ unsigned int idx)
+{
+ return d->arch.altp2m_p2m[array_index_nospec(idx, MAX_ALTP2M)];
+}
+
+static inline uint64_t altp2m_get_eptp(const struct domain* d,
+ unsigned int idx)
+{
+ return d->arch.altp2m_eptp[array_index_nospec(idx, MAX_EPTP)];
+}
+
+static inline void altp2m_set_eptp(const struct domain* d,
+ unsigned int idx,
+ uint64_t eptp)
+{
+ d->arch.altp2m_eptp[array_index_nospec(idx, MAX_EPTP)] = eptp;
+}
+
+static inline uint64_t altp2m_get_visible_eptp(const struct domain* d,
+ unsigned int idx)
+{
+ return d->arch.altp2m_visible_eptp[array_index_nospec(idx, MAX_EPTP)];
+}
+
+static inline void altp2m_set_visible_eptp(const struct domain* d,
+ unsigned int idx,
+ uint64_t eptp)
+{
+ d->arch.altp2m_visible_eptp[array_index_nospec(idx, MAX_EPTP)] = eptp;
+}
+
/* Alternate p2m VCPU */
void altp2m_vcpu_initialise(struct vcpu *v);
void altp2m_vcpu_destroy(struct vcpu *v);
@@ -18,6 +18,7 @@
#include <xen/mem_access.h>
#include <asm/mem_sharing.h>
#include <asm/page.h> /* for pagetable_t */
+#include <asm/altp2m.h>
/* Debugging and auditing of the P2M code? */
#if !defined(NDEBUG) && defined(CONFIG_HVM)
@@ -888,13 +889,14 @@ static inline struct p2m_domain *p2m_get_altp2m(struct vcpu *v)
BUG_ON(index >= MAX_ALTP2M);
- return v->domain->arch.altp2m_p2m[index];
+ return altp2m_get_p2m(v->domain, index);
}
/* set current alternate p2m table */
static inline bool p2m_set_altp2m(struct vcpu *v, unsigned int idx)
{
struct p2m_domain *orig;
+ struct p2m_domain *ap2m;
BUG_ON(idx >= MAX_ALTP2M);
@@ -906,7 +908,8 @@ static inline bool p2m_set_altp2m(struct vcpu *v, unsigned int idx)
atomic_dec(&orig->active_vcpus);
vcpu_altp2m(v).p2midx = idx;
- atomic_inc(&v->domain->arch.altp2m_p2m[idx]->active_vcpus);
+ ap2m = altp2m_get_p2m(v->domain, idx);
+ atomic_inc(&ap2m->active_vcpus);
return true;
}
@@ -205,7 +205,7 @@ bool p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned int idx)
altp2m_list_lock(d);
- if ( d->arch.altp2m_eptp[idx] != mfn_x(INVALID_MFN) )
+ if ( altp2m_get_eptp(d, idx) != mfn_x(INVALID_MFN) )
{
if ( p2m_set_altp2m(v, idx) )
altp2m_vcpu_update_p2m(v);
@@ -307,7 +307,7 @@ static void p2m_reset_altp2m(struct domain *d, unsigned int idx,
struct p2m_domain *p2m;
ASSERT(idx < MAX_ALTP2M);
- p2m = array_access_nospec(d->arch.altp2m_p2m, idx);
+ p2m = altp2m_get_p2m(d, idx);
p2m_lock(p2m);
@@ -335,8 +335,8 @@ void p2m_flush_altp2m(struct domain *d)
for ( i = 0; i < MAX_ALTP2M; i++ )
{
p2m_reset_altp2m(d, i, ALTP2M_DEACTIVATE);
- d->arch.altp2m_eptp[i] = mfn_x(INVALID_MFN);
- d->arch.altp2m_visible_eptp[i] = mfn_x(INVALID_MFN);
+ altp2m_set_eptp(d, i, mfn_x(INVALID_MFN));
+ altp2m_set_visible_eptp(d, i, mfn_x(INVALID_MFN));
}
altp2m_list_unlock(d);
@@ -350,7 +350,7 @@ static int p2m_activate_altp2m(struct domain *d, unsigned int idx,
ASSERT(idx < MAX_ALTP2M);
- p2m = array_access_nospec(d->arch.altp2m_p2m, idx);
+ p2m = altp2m_get_p2m(d, idx);
hostp2m = p2m_get_hostp2m(d);
p2m_lock(p2m);
@@ -393,8 +393,7 @@ int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx)
altp2m_list_lock(d);
- if ( d->arch.altp2m_eptp[array_index_nospec(idx, MAX_EPTP)] ==
- mfn_x(INVALID_MFN) )
+ if ( altp2m_get_eptp(d, idx) == mfn_x(INVALID_MFN) )
rc = p2m_activate_altp2m(d, idx, hostp2m->default_access);
altp2m_list_unlock(d);
@@ -417,7 +416,7 @@ int p2m_init_next_altp2m(struct domain *d, uint16_t *idx,
for ( i = 0; i < MAX_ALTP2M; i++ )
{
- if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
+ if ( altp2m_get_eptp(d, i) != mfn_x(INVALID_MFN) )
continue;
rc = p2m_activate_altp2m(d, i, a);
@@ -447,18 +446,15 @@ int p2m_destroy_altp2m_by_id(struct domain *d, unsigned int idx)
rc = -EBUSY;
altp2m_list_lock(d);
- if ( d->arch.altp2m_eptp[array_index_nospec(idx, MAX_EPTP)] !=
- mfn_x(INVALID_MFN) )
+ if ( altp2m_get_eptp(d, idx) != mfn_x(INVALID_MFN) )
{
- p2m = array_access_nospec(d->arch.altp2m_p2m, idx);
+ p2m = altp2m_get_p2m(d, idx);
if ( !_atomic_read(p2m->active_vcpus) )
{
p2m_reset_altp2m(d, idx, ALTP2M_DEACTIVATE);
- d->arch.altp2m_eptp[array_index_nospec(idx, MAX_EPTP)] =
- mfn_x(INVALID_MFN);
- d->arch.altp2m_visible_eptp[array_index_nospec(idx, MAX_EPTP)] =
- mfn_x(INVALID_MFN);
+ altp2m_set_eptp(d, idx, mfn_x(INVALID_MFN));
+ altp2m_set_visible_eptp(d, idx, mfn_x(INVALID_MFN));
rc = 0;
}
}
@@ -485,7 +481,7 @@ int p2m_switch_domain_altp2m_by_id(struct domain *d, unsigned int idx)
rc = -EINVAL;
altp2m_list_lock(d);
- if ( d->arch.altp2m_visible_eptp[idx] != mfn_x(INVALID_MFN) )
+ if ( altp2m_get_visible_eptp(d, idx) != mfn_x(INVALID_MFN) )
{
for_each_vcpu( d, v )
if ( p2m_set_altp2m(v, idx) )
@@ -510,13 +506,12 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx,
mfn_t mfn;
int rc = -EINVAL;
- if ( idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
- d->arch.altp2m_eptp[array_index_nospec(idx, MAX_EPTP)] ==
- mfn_x(INVALID_MFN) )
+ if ( idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
+ altp2m_get_eptp(d, idx) == mfn_x(INVALID_MFN) )
return rc;
hp2m = p2m_get_hostp2m(d);
- ap2m = array_access_nospec(d->arch.altp2m_p2m, idx);
+ ap2m = altp2m_get_p2m(d, idx);
p2m_lock(hp2m);
p2m_lock(ap2m);
@@ -577,10 +572,10 @@ int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
p2m_type_t t;
p2m_access_t a;
- if ( d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
+ if ( altp2m_get_eptp(d, i) == mfn_x(INVALID_MFN) )
continue;
- p2m = d->arch.altp2m_p2m[i];
+ p2m = altp2m_get_p2m(d, i);
/* Check for a dropped page that may impact this altp2m */
if ( mfn_eq(mfn, INVALID_MFN) &&
@@ -598,7 +593,7 @@ int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
for ( i = 0; i < MAX_ALTP2M; i++ )
{
if ( i == last_reset_idx ||
- d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
+ altp2m_get_eptp(d, i) == mfn_x(INVALID_MFN) )
continue;
p2m_reset_altp2m(d, i, ALTP2M_RESET);
@@ -660,11 +655,10 @@ int p2m_set_suppress_ve_multi(struct domain *d,
if ( sve->view > 0 )
{
if ( sve->view >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
- d->arch.altp2m_eptp[array_index_nospec(sve->view, MAX_EPTP)] ==
- mfn_x(INVALID_MFN) )
+ altp2m_get_eptp(d, sve->view) == mfn_x(INVALID_MFN) )
return -EINVAL;
- p2m = ap2m = array_access_nospec(d->arch.altp2m_p2m, sve->view);
+ p2m = ap2m = altp2m_get_p2m(d, sve->view);
}
p2m_lock(host_p2m);
@@ -728,11 +722,10 @@ int p2m_get_suppress_ve(struct domain *d, gfn_t gfn, bool *suppress_ve,
if ( altp2m_idx > 0 )
{
if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
- d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] ==
- mfn_x(INVALID_MFN) )
+ altp2m_get_eptp(d, altp2m_idx) == mfn_x(INVALID_MFN) )
return -EINVAL;
- p2m = ap2m = array_access_nospec(d->arch.altp2m_p2m, altp2m_idx);
+ p2m = ap2m = altp2m_get_p2m(d, altp2m_idx);
}
else
p2m = host_p2m;
@@ -766,15 +759,12 @@ int p2m_set_altp2m_view_visibility(struct domain *d, unsigned int altp2m_idx,
* min(MAX_ALTP2M, MAX_EPTP).
*/
if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
- d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] ==
- mfn_x(INVALID_MFN) )
+ altp2m_get_eptp(d, altp2m_idx) == mfn_x(INVALID_MFN) )
rc = -EINVAL;
else if ( visible )
- d->arch.altp2m_visible_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] =
- d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)];
+ altp2m_set_visible_eptp(d, altp2m_idx, altp2m_get_eptp(d, altp2m_idx));
else
- d->arch.altp2m_visible_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] =
- mfn_x(INVALID_MFN);
+ altp2m_set_visible_eptp(d, altp2m_idx, mfn_x(INVALID_MFN));
altp2m_list_unlock(d);
@@ -511,13 +511,13 @@ int hap_enable(struct domain *d, u32 mode)
for ( i = 0; i < MAX_EPTP; i++ )
{
- d->arch.altp2m_eptp[i] = mfn_x(INVALID_MFN);
- d->arch.altp2m_visible_eptp[i] = mfn_x(INVALID_MFN);
+ altp2m_set_eptp(d, i, mfn_x(INVALID_MFN));
+ altp2m_set_visible_eptp(d, i, mfn_x(INVALID_MFN));
}
for ( i = 0; i < MAX_ALTP2M; i++ )
{
- rv = p2m_alloc_table(d->arch.altp2m_p2m[i]);
+ rv = p2m_alloc_table(altp2m_get_p2m(d, i));
if ( rv != 0 )
goto out;
}
@@ -592,7 +592,7 @@ void hap_teardown(struct domain *d, bool *preempted)
for ( i = 0; i < MAX_ALTP2M; i++ )
{
- p2m_teardown(d->arch.altp2m_p2m[i], false, preempted);
+ p2m_teardown(altp2m_get_p2m(d, i), false, preempted);
if ( preempted && *preempted )
return;
}
@@ -348,11 +348,10 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
if ( altp2m_idx )
{
if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
- d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] ==
- mfn_x(INVALID_MFN) )
+ altp2m_get_eptp(d, altp2m_idx) == mfn_x(INVALID_MFN) )
return -EINVAL;
- ap2m = array_access_nospec(d->arch.altp2m_p2m, altp2m_idx);
+ ap2m = altp2m_get_p2m(d, altp2m_idx);
}
if ( !xenmem_access_to_p2m_access(p2m, access, &a) )
@@ -404,11 +403,10 @@ long p2m_set_mem_access_multi(struct domain *d,
if ( altp2m_idx )
{
if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
- d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] ==
- mfn_x(INVALID_MFN) )
+ altp2m_get_eptp(d, altp2m_idx) == mfn_x(INVALID_MFN) )
return -EINVAL;
- ap2m = array_access_nospec(d->arch.altp2m_p2m, altp2m_idx);
+ ap2m = altp2m_get_p2m(d, altp2m_idx);
}
p2m_lock(p2m);
@@ -467,11 +465,10 @@ int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t *access,
else if ( altp2m_idx ) /* altp2m view 0 is treated as the hostp2m */
{
if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
- d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] ==
- mfn_x(INVALID_MFN) )
+ altp2m_get_eptp(d, altp2m_idx) == mfn_x(INVALID_MFN) )
return -EINVAL;
- p2m = array_access_nospec(d->arch.altp2m_p2m, altp2m_idx);
+ p2m = altp2m_get_p2m(d, altp2m_idx);
}
return _p2m_get_mem_access(p2m, gfn, access);
@@ -488,7 +485,7 @@ void arch_p2m_set_access_required(struct domain *d, bool access_required)
unsigned int i;
for ( i = 0; i < MAX_ALTP2M; i++ )
{
- struct p2m_domain *p2m = d->arch.altp2m_p2m[i];
+ struct p2m_domain *p2m = altp2m_get_p2m(d, i);
if ( p2m )
p2m->access_required = access_required;
@@ -914,7 +914,7 @@ static int nominate_page(struct domain *d, gfn_t gfn,
for ( i = 0; i < MAX_ALTP2M; i++ )
{
- ap2m = d->arch.altp2m_p2m[i];
+ ap2m = altp2m_get_p2m(d, i);
if ( !ap2m )
continue;
@@ -1297,10 +1297,10 @@ static void ept_set_ad_sync(struct domain *d, bool value)
{
struct p2m_domain *p2m;
- if ( d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
+ if ( altp2m_get_eptp(d, i) == mfn_x(INVALID_MFN) )
continue;
- p2m = d->arch.altp2m_p2m[i];
+ p2m = altp2m_get_p2m(d, i);
p2m_lock(p2m);
p2m->ept.ad = value;
@@ -1500,15 +1500,15 @@ void setup_ept_dump(void)
void p2m_init_altp2m_ept(struct domain *d, unsigned int i)
{
- struct p2m_domain *p2m = array_access_nospec(d->arch.altp2m_p2m, i);
+ struct p2m_domain *p2m = altp2m_get_p2m(d, i);
struct p2m_domain *hostp2m = p2m_get_hostp2m(d);
struct ept_data *ept;
p2m->ept.ad = hostp2m->ept.ad;
ept = &p2m->ept;
ept->mfn = pagetable_get_pfn(p2m_get_pagetable(p2m));
- d->arch.altp2m_eptp[array_index_nospec(i, MAX_EPTP)] = ept->eptp;
- d->arch.altp2m_visible_eptp[array_index_nospec(i, MAX_EPTP)] = ept->eptp;
+ altp2m_set_eptp(d, i, ept->eptp);
+ altp2m_set_visible_eptp(d, i, ept->eptp);
}
unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp)
@@ -1521,10 +1521,10 @@ unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp)
for ( i = 0; i < MAX_ALTP2M; i++ )
{
- if ( d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
+ if ( altp2m_get_eptp(d, i) == mfn_x(INVALID_MFN) )
continue;
- p2m = d->arch.altp2m_p2m[i];
+ p2m = altp2m_get_p2m(d, i);
ept = &p2m->ept;
if ( eptp == ept->eptp )
@@ -107,9 +107,9 @@ void p2m_change_entry_type_global(struct domain *d,
for ( i = 0; i < MAX_ALTP2M; i++ )
{
- if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
+ if ( altp2m_get_eptp(d, i) != mfn_x(INVALID_MFN) )
{
- struct p2m_domain *altp2m = d->arch.altp2m_p2m[i];
+ struct p2m_domain *altp2m = altp2m_get_p2m(d, i);
p2m_lock(altp2m);
change_entry_type_global(altp2m, ot, nt);
@@ -142,9 +142,9 @@ void p2m_memory_type_changed(struct domain *d)
for ( i = 0; i < MAX_ALTP2M; i++ )
{
- if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
+ if ( altp2m_get_eptp(d, i) != mfn_x(INVALID_MFN) )
{
- struct p2m_domain *altp2m = d->arch.altp2m_p2m[i];
+ struct p2m_domain *altp2m = altp2m_get_p2m(d, i);
p2m_lock(altp2m);
_memory_type_changed(altp2m);
@@ -915,9 +915,9 @@ void p2m_change_type_range(struct domain *d,
for ( i = 0; i < MAX_ALTP2M; i++ )
{
- if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
+ if ( altp2m_get_eptp(d, i) != mfn_x(INVALID_MFN) )
{
- struct p2m_domain *altp2m = d->arch.altp2m_p2m[i];
+ struct p2m_domain *altp2m = altp2m_get_p2m(d, i);
p2m_lock(altp2m);
change_type_range(altp2m, start, end, ot, nt);
@@ -988,9 +988,9 @@ int p2m_finish_type_change(struct domain *d,
for ( i = 0; i < MAX_ALTP2M; i++ )
{
- if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
+ if ( altp2m_get_eptp(d, i) != mfn_x(INVALID_MFN) )
{
- struct p2m_domain *altp2m = d->arch.altp2m_p2m[i];
+ struct p2m_domain *altp2m = altp2m_get_p2m(d, i);
p2m_lock(altp2m);
rc = finish_type_change(altp2m, first_gfn, max_nr);