@@ -1691,6 +1691,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
int sharing_enomem = 0;
vm_event_request_t *req_ptr = NULL;
bool_t ap2m_active, sync = 0;
+ unsigned int page_order;
/* On Nested Virtualization, walk the guest page table.
* If this succeeds, all is fine.
@@ -1757,19 +1758,23 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
hostp2m = p2m_get_hostp2m(currd);
mfn = get_gfn_type_access(hostp2m, gfn, &p2mt, &p2ma,
P2M_ALLOC | (npfec.write_access ? P2M_UNSHARE : 0),
- NULL);
+ &page_order);
if ( ap2m_active )
{
- if ( p2m_altp2m_lazy_copy(curr, gpa, gla, npfec, &p2m) )
+ p2m = p2m_get_altp2m(curr);
+
+ /*
+ * Get the altp2m entry if present; or if not, propagate from
+ * the host p2m. NB that this returns with gfn locked in the
+ * altp2m.
+ */
+ if ( p2m_altp2m_get_or_propagate(p2m, gfn, &mfn, &p2mt, &p2ma, page_order) )
{
- /* entry was lazily copied from host -- retry */
- __put_gfn(hostp2m, gfn);
+ /* Entry was copied from host -- retry fault */
rc = 1;
- goto out;
+ goto out_put_gfn;
}
-
- mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, 0, NULL);
}
else
p2m = hostp2m;
@@ -2397,65 +2397,74 @@ bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned int idx)
}
/*
- * If the fault is for a not present entry:
- * if the entry in the host p2m has a valid mfn, copy it and retry
- * else indicate that outer handler should handle fault
+ * Read info about the gfn in an altp2m, locking the gfn.
*
- * If the fault is for a present entry:
- * indicate that outer handler should handle fault
+ * If the entry is valid, pass the results back to the caller.
+ *
+ * If the entry was invalid, and the host's entry is also invalid,
+ * return to the caller without any changes.
+ *
+ * If the entry is invalid, and the host entry was valid, propagate
+ * the host's entry to the altp2m (retaining page order), and indicate
+ * that the caller should re-try the faulting instruction.
*/
-
-bool_t p2m_altp2m_lazy_copy(struct vcpu *v, paddr_t gpa,
- unsigned long gla, struct npfec npfec,
- struct p2m_domain **ap2m)
+bool p2m_altp2m_get_or_propagate(struct p2m_domain *ap2m, unsigned long gfn_l,
+ mfn_t *mfn, p2m_type_t *p2mt,
+ p2m_access_t *p2ma, unsigned int page_order)
{
- struct p2m_domain *hp2m = p2m_get_hostp2m(v->domain);
- p2m_type_t p2mt;
- p2m_access_t p2ma;
- unsigned int page_order;
- gfn_t gfn = _gfn(paddr_to_pfn(gpa));
+ p2m_type_t ap2mt;
+ p2m_access_t ap2ma;
unsigned long mask;
- mfn_t mfn;
- int rv;
-
- *ap2m = p2m_get_altp2m(v);
-
- mfn = get_gfn_type_access(*ap2m, gfn_x(gfn), &p2mt, &p2ma,
- 0, &page_order);
- __put_gfn(*ap2m, gfn_x(gfn));
-
- if ( !mfn_eq(mfn, INVALID_MFN) )
- return 0;
+ gfn_t gfn;
+ mfn_t amfn;
+ int rc;
- mfn = get_gfn_type_access(hp2m, gfn_x(gfn), &p2mt, &p2ma,
- P2M_ALLOC, &page_order);
- __put_gfn(hp2m, gfn_x(gfn));
+ /*
+ * NB we must get the full lock on the altp2m here, in addition to
+ * the lock on the individual gfn, since we may change a range of
+ * gfns below.
+ */
+ p2m_lock(ap2m);
+
+ amfn = get_gfn_type_access(ap2m, gfn_l, &ap2mt, &ap2ma, 0, NULL);
- if ( mfn_eq(mfn, INVALID_MFN) )
- return 0;
+ if ( !mfn_eq(amfn, INVALID_MFN) )
+ {
+ p2m_unlock(ap2m);
+ *mfn = amfn;
+ *p2mt = ap2mt;
+ *p2ma = ap2ma;
+ return false;
+ }
- p2m_lock(*ap2m);
+ /* Host entry is also invalid; don't bother setting the altp2m entry. */
+ if ( mfn_eq(*mfn, INVALID_MFN) )
+ {
+ p2m_unlock(ap2m);
+ return false;
+ }
/*
* If this is a superpage mapping, round down both frame numbers
- * to the start of the superpage.
+ * to the start of the superpage. NB that we repupose `amfn`
+ * here.
*/
mask = ~((1UL << page_order) - 1);
- mfn = _mfn(mfn_x(mfn) & mask);
- gfn = _gfn(gfn_x(gfn) & mask);
+ amfn = _mfn(mfn_x(*mfn) & mask);
+ gfn = _gfn(gfn_l & mask);
- rv = p2m_set_entry(*ap2m, gfn, mfn, page_order, p2mt, p2ma);
- p2m_unlock(*ap2m);
+ rc = p2m_set_entry(ap2m, gfn, amfn, page_order, *p2mt, *p2ma);
+ p2m_unlock(ap2m);
- if ( rv )
+ if ( rc )
{
- gdprintk(XENLOG_ERR,
- "failed to set entry for %#"PRIx64" -> %#"PRIx64" p2m %#"PRIx64"\n",
- gfn_x(gfn), mfn_x(mfn), (unsigned long)*ap2m);
- domain_crash(hp2m->domain);
+ gprintk(XENLOG_ERR,
+ "failed to set entry for %#"PRIx64" -> %#"PRIx64" altp2m %#"PRIx16". rc: %"PRIi32"\n",
+ gfn_l, mfn_x(amfn), vcpu_altp2m(current).p2midx, rc);
+ domain_crash(ap2m->domain);
}
-
- return 1;
+
+ return true;
}
enum altp2m_reset_type {
@@ -879,8 +879,9 @@ void p2m_altp2m_check(struct vcpu *v, uint16_t idx);
void p2m_flush_altp2m(struct domain *d);
/* Alternate p2m paging */
-bool_t p2m_altp2m_lazy_copy(struct vcpu *v, paddr_t gpa,
- unsigned long gla, struct npfec npfec, struct p2m_domain **ap2m);
+bool p2m_altp2m_get_or_propagate(struct p2m_domain *ap2m, unsigned long gfn_l,
+ mfn_t *mfn, p2m_type_t *p2mt,
+ p2m_access_t *p2ma, unsigned int page_order);
/* Make a specific alternate p2m valid */
int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx);