@@ -4654,7 +4654,8 @@ static int do_altp2m_op(
}
case HVMOP_altp2m_create_p2m:
- if ( !(rc = p2m_init_next_altp2m(d, &a.u.view.view)) )
+ if ( !(rc = p2m_init_next_altp2m(d, &a.u.view.view,
+ a.u.view.hvmmem_default_access)) )
rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
break;
@@ -314,9 +314,9 @@ static int set_mem_access(struct domain *d, struct p2m_domain *p2m,
return rc;
}
-static bool xenmem_access_to_p2m_access(struct p2m_domain *p2m,
- xenmem_access_t xaccess,
- p2m_access_t *paccess)
+bool xenmem_access_to_p2m_access(const struct p2m_domain *p2m,
+ xenmem_access_t xaccess,
+ p2m_access_t *paccess)
{
static const p2m_access_t memaccess[] = {
#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
@@ -25,6 +25,7 @@
#include <xen/guest_access.h> /* copy_from_guest() */
#include <xen/iommu.h>
+#include <xen/mem_access.h>
#include <xen/vm_event.h>
#include <xen/event.h>
#include <public/vm_event.h>
@@ -2536,7 +2537,8 @@ void p2m_flush_altp2m(struct domain *d)
altp2m_list_unlock(d);
}
-static int p2m_activate_altp2m(struct domain *d, unsigned int idx)
+static int p2m_activate_altp2m(struct domain *d, unsigned int idx,
+ p2m_access_t hvmmem_default_access)
{
struct p2m_domain *hostp2m, *p2m;
int rc;
@@ -2562,7 +2564,7 @@ static int p2m_activate_altp2m(struct domain *d, unsigned int idx)
goto out;
}
- p2m->default_access = hostp2m->default_access;
+ p2m->default_access = hvmmem_default_access;
p2m->domain = hostp2m->domain;
p2m->global_logdirty = hostp2m->global_logdirty;
p2m->min_remapped_gfn = gfn_x(INVALID_GFN);
@@ -2579,6 +2581,7 @@ static int p2m_activate_altp2m(struct domain *d, unsigned int idx)
int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx)
{
int rc = -EINVAL;
+ struct p2m_domain *hostp2m = p2m_get_hostp2m(d);
if ( idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) )
return rc;
@@ -2587,16 +2590,23 @@ int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx)
if ( d->arch.altp2m_eptp[array_index_nospec(idx, MAX_EPTP)] ==
mfn_x(INVALID_MFN) )
- rc = p2m_activate_altp2m(d, idx);
+ rc = p2m_activate_altp2m(d, idx, hostp2m->default_access);
altp2m_list_unlock(d);
return rc;
}
-int p2m_init_next_altp2m(struct domain *d, uint16_t *idx)
+int p2m_init_next_altp2m(struct domain *d, uint16_t *idx,
+ xenmem_access_t hvmmem_default_access)
{
int rc = -EINVAL;
unsigned int i;
+ p2m_access_t a;
+ struct p2m_domain *hostp2m = p2m_get_hostp2m(d);
+
+ if ( hvmmem_default_access > XENMEM_access_default ||
+ !xenmem_access_to_p2m_access(hostp2m, hvmmem_default_access, &a) )
+ return rc;
altp2m_list_lock(d);
@@ -2605,7 +2615,7 @@ int p2m_init_next_altp2m(struct domain *d, uint16_t *idx)
if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
continue;
- rc = p2m_activate_altp2m(d, i);
+ rc = p2m_activate_altp2m(d, i, a);
if ( !rc )
*idx = i;
@@ -884,7 +884,8 @@ bool p2m_altp2m_get_or_propagate(struct p2m_domain *ap2m, unsigned long gfn_l,
int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx);
/* Find an available alternate p2m and make it valid */
-int p2m_init_next_altp2m(struct domain *d, uint16_t *idx);
+int p2m_init_next_altp2m(struct domain *d, uint16_t *idx,
+ xenmem_access_t hvmmem_default_access);
/* Make a specific alternate p2m invalid */
int p2m_destroy_altp2m_by_id(struct domain *d, unsigned int idx);
@@ -251,8 +251,6 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_vcpu_disable_notify_t);
struct xen_hvm_altp2m_view {
/* IN/OUT variable */
uint16_t view;
- /* Create view only: default access type
- * NOTE: currently ignored */
uint16_t hvmmem_default_access; /* xenmem_access_t */
};
typedef struct xen_hvm_altp2m_view xen_hvm_altp2m_view_t;
@@ -58,6 +58,10 @@ typedef enum {
/* NOTE: Assumed to be only 4 bits right now on x86. */
} p2m_access_t;
+bool xenmem_access_to_p2m_access(const struct p2m_domain *p2m,
+ xenmem_access_t xaccess,
+ p2m_access_t *paccess);
+
/*
* Set access type for a region of gfns.
* If gfn == INVALID_GFN, sets the default access type.