@@ -2623,11 +2623,14 @@ static int __get_page_type(struct page_info *page, unsigned long type,
if ( d && is_pv_domain(d) && unlikely(need_iommu(d)) )
{
if ( (x & PGT_type_mask) == PGT_writable_page )
- iommu_ret = iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)));
+ iommu_ret = iommu_unmap_pages(d,
+ mfn_to_gmfn(d, page_to_mfn(page)),
+ 0);
else if ( type == PGT_writable_page )
- iommu_ret = iommu_map_page(d, mfn_to_gmfn(d, page_to_mfn(page)),
- page_to_mfn(page),
- IOMMUF_readable|IOMMUF_writable);
+ iommu_ret = iommu_map_pages(d,
+ mfn_to_gmfn(d, page_to_mfn(page)),
+ page_to_mfn(page), 0,
+ IOMMUF_readable|IOMMUF_writable);
}
}
@@ -870,26 +870,9 @@ out:
else
{
if ( iommu_flags )
- for ( i = 0; i < (1 << order); i++ )
- {
- rc = iommu_map_page(d, gfn + i, mfn_x(mfn) + i, iommu_flags);
- if ( unlikely(rc) )
- {
- while ( i-- )
- /* If statement to satisfy __must_check. */
- if ( iommu_unmap_page(p2m->domain, gfn + i) )
- continue;
-
- break;
- }
- }
+ rc = iommu_map_pages(d, gfn, mfn_x(mfn), order, iommu_flags);
else
- for ( i = 0; i < (1 << order); i++ )
- {
- ret = iommu_unmap_page(d, gfn + i);
- if ( !rc )
- rc = ret;
- }
+ rc = iommu_unmap_pages(d, gfn, order);
}
}
@@ -514,7 +514,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
{
/* XXX -- this might be able to be faster iff current->domain == d */
void *table;
- unsigned long i, gfn_remainder = gfn;
+ unsigned long gfn_remainder = gfn;
l1_pgentry_t *p2m_entry, entry_content;
/* Intermediate table to free if we're replacing it with a superpage. */
l1_pgentry_t intermediate_entry = l1e_empty();
@@ -722,28 +722,10 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
amd_iommu_flush_pages(p2m->domain, gfn, page_order);
}
else if ( iommu_pte_flags )
- for ( i = 0; i < (1UL << page_order); i++ )
- {
- rc = iommu_map_page(p2m->domain, gfn + i, mfn_x(mfn) + i,
- iommu_pte_flags);
- if ( unlikely(rc) )
- {
- while ( i-- )
- /* If statement to satisfy __must_check. */
- if ( iommu_unmap_page(p2m->domain, gfn + i) )
- continue;
-
- break;
- }
- }
+ rc = iommu_map_pages(p2m->domain, gfn, mfn_x(mfn), page_order,
+ iommu_pte_flags);
else
- for ( i = 0; i < (1UL << page_order); i++ )
- {
- int ret = iommu_unmap_page(p2m->domain, gfn + i);
-
- if ( !rc )
- rc = ret;
- }
+ rc = iommu_unmap_pages(p2m->domain, gfn, page_order);
}
/*
@@ -708,20 +708,9 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn,
if ( !paging_mode_translate(p2m->domain) )
{
- int rc = 0;
-
if ( need_iommu(p2m->domain) )
- {
- for ( i = 0; i < (1 << page_order); i++ )
- {
- int ret = iommu_unmap_page(p2m->domain, mfn + i);
-
- if ( !rc )
- rc = ret;
- }
- }
-
- return rc;
+ return iommu_unmap_pages(p2m->domain, mfn, page_order);
+ return 0;
}
ASSERT(gfn_locked_by_me(p2m, gfn));
@@ -768,23 +757,8 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn,
if ( !paging_mode_translate(d) )
{
if ( need_iommu(d) && t == p2m_ram_rw )
- {
- for ( i = 0; i < (1 << page_order); i++ )
- {
- rc = iommu_map_page(d, mfn_x(mfn_add(mfn, i)),
- mfn_x(mfn_add(mfn, i)),
- IOMMUF_readable|IOMMUF_writable);
- if ( rc != 0 )
- {
- while ( i-- > 0 )
- /* If statement to satisfy __must_check. */
- if ( iommu_unmap_page(d, mfn_x(mfn_add(mfn, i))) )
- continue;
-
- return rc;
- }
- }
- }
+ return iommu_map_pages(d, mfn_x(mfn), mfn_x(mfn), page_order,
+ IOMMUF_readable|IOMMUF_writable);
return 0;
}
@@ -1148,7 +1122,7 @@ int set_identity_p2m_entry(struct domain *d, unsigned long gfn,
{
if ( !need_iommu(d) )
return 0;
- return iommu_map_page(d, gfn, gfn, IOMMUF_readable|IOMMUF_writable);
+ return iommu_map_pages(d, gfn, gfn, 0, IOMMUF_readable|IOMMUF_writable);
}
gfn_lock(p2m, gfn, 0);
@@ -1236,7 +1210,7 @@ int clear_identity_p2m_entry(struct domain *d, unsigned long gfn)
{
if ( !need_iommu(d) )
return 0;
- return iommu_unmap_page(d, gfn);
+ return iommu_unmap_pages(d, gfn, 0);
}
gfn_lock(p2m, gfn, 0);
@@ -1442,13 +1442,14 @@ int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm)
if ( iommu_enabled && !iommu_passthrough && !need_iommu(hardware_domain) )
{
for ( i = spfn; i < epfn; i++ )
- if ( iommu_map_page(hardware_domain, i, i, IOMMUF_readable|IOMMUF_writable) )
+ if ( iommu_map_pages(hardware_domain, i, i, 0,
+ IOMMUF_readable|IOMMUF_writable) )
break;
if ( i != epfn )
{
while (i-- > old_max)
/* If statement to satisfy __must_check. */
- if ( iommu_unmap_page(hardware_domain, i) )
+ if ( iommu_unmap_pages(hardware_domain, i, 0) )
continue;
goto destroy_m2p;
@@ -987,13 +987,13 @@ __gnttab_map_grant_ref(
!(old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
{
if ( !(kind & MAPKIND_WRITE) )
- err = iommu_map_page(ld, frame, frame,
- IOMMUF_readable|IOMMUF_writable);
+ err = iommu_map_pages(ld, frame, frame, 0,
+ IOMMUF_readable|IOMMUF_writable);
}
else if ( act_pin && !old_pin )
{
if ( !kind )
- err = iommu_map_page(ld, frame, frame, IOMMUF_readable);
+ err = iommu_map_pages(ld, frame, frame, 0, IOMMUF_readable);
}
if ( err )
{
@@ -1248,9 +1248,9 @@ __gnttab_unmap_common(
kind = mapkind(lgt, rd, op->frame);
if ( !kind )
- err = iommu_unmap_page(ld, op->frame);
+ err = iommu_unmap_pages(ld, op->frame, 0);
else if ( !(kind & MAPKIND_WRITE) )
- err = iommu_map_page(ld, op->frame, op->frame, IOMMUF_readable);
+ err = iommu_map_pages(ld, op->frame, op->frame, 0, IOMMUF_readable);
double_gt_unlock(lgt, rgt);
@@ -631,8 +631,9 @@ static int update_paging_mode(struct domain *d, unsigned long gfn)
return 0;
}
-int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
- unsigned int flags)
+static int __must_check amd_iommu_map_page(struct domain *d, unsigned long gfn,
+ unsigned long mfn,
+ unsigned int flags)
{
bool_t need_flush = 0;
struct domain_iommu *hd = dom_iommu(d);
@@ -720,7 +721,8 @@ out:
return 0;
}
-int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
+static int __must_check amd_iommu_unmap_page(struct domain *d,
+ unsigned long gfn)
{
unsigned long pt_mfn[7];
struct domain_iommu *hd = dom_iommu(d);
@@ -771,6 +773,48 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
return 0;
}
+/* TODO: Optimize by squashing map_pages/unmap_pages with map_page/unmap_page */
+int __must_check amd_iommu_map_pages(struct domain *d, unsigned long gfn,
+ unsigned long mfn, unsigned int order,
+ unsigned int flags)
+{
+ unsigned long i;
+ int rc = 0;
+
+ for ( i = 0; i < (1UL << order); i++ )
+ {
+ rc = amd_iommu_map_page(d, gfn + i, mfn + i, flags);
+ if ( unlikely(rc) )
+ {
+ while ( i-- )
+ /* If statement to satisfy __must_check. */
+ if ( amd_iommu_unmap_page(d, gfn + i) )
+ continue;
+
+ break;
+ }
+ }
+
+ return rc;
+}
+
+int __must_check amd_iommu_unmap_pages(struct domain *d, unsigned long gfn,
+ unsigned int order)
+{
+ unsigned long i;
+ int rc = 0;
+
+ for ( i = 0; i < (1UL << order); i++ )
+ {
+ int ret = amd_iommu_unmap_page(d, gfn + i);
+
+ if ( !rc )
+ rc = ret;
+ }
+
+ return rc;
+}
+
int amd_iommu_reserve_domain_unity_map(struct domain *domain,
u64 phys_addr,
unsigned long size, int iw, int ir)
@@ -296,8 +296,8 @@ static void __hwdom_init amd_iommu_hwdom_init(struct domain *d)
*/
if ( mfn_valid(_mfn(pfn)) )
{
- int ret = amd_iommu_map_page(d, pfn, pfn,
- IOMMUF_readable|IOMMUF_writable);
+ int ret = amd_iommu_map_pages(d, pfn, pfn, 0,
+ IOMMUF_readable|IOMMUF_writable);
if ( !rc )
rc = ret;
@@ -620,8 +620,8 @@ const struct iommu_ops amd_iommu_ops = {
.remove_device = amd_iommu_remove_device,
.assign_device = amd_iommu_assign_device,
.teardown = amd_iommu_domain_destroy,
- .map_page = amd_iommu_map_page,
- .unmap_page = amd_iommu_unmap_page,
+ .map_pages = amd_iommu_map_pages,
+ .unmap_pages = amd_iommu_unmap_pages,
.free_page_table = deallocate_page_table,
.reassign_device = reassign_device,
.get_device_group_id = amd_iommu_group_id,
@@ -2778,6 +2778,43 @@ static int __must_check arm_smmu_unmap_page(struct domain *d, unsigned long gfn)
return guest_physmap_remove_page(d, _gfn(gfn), _mfn(gfn), 0);
}
+/* TODO: Optimize by squashing map_pages/unmap_pages with map_page/unmap_page */
+static int __must_check arm_smmu_map_pages(struct domain *d, unsigned long gfn,
+ unsigned long mfn, unsigned int order, unsigned int flags)
+{
+ unsigned long i;
+ int rc = 0;
+
+ for (i = 0; i < (1UL << order); i++) {
+ rc = arm_smmu_map_page(d, gfn + i, mfn + i, flags);
+ if (unlikely(rc)) {
+ while (i--)
+ /* If statement to satisfy __must_check. */
+ if (arm_smmu_unmap_page(d, gfn + i))
+ continue;
+
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int __must_check arm_smmu_unmap_pages(struct domain *d,
+ unsigned long gfn, unsigned int order)
+{
+ unsigned long i;
+ int rc = 0;
+
+ for (i = 0; i < (1UL << order); i++) {
+ int ret = arm_smmu_unmap_page(d, gfn + i);
+ if (!rc)
+ rc = ret;
+ }
+
+ return rc;
+}
+
static const struct iommu_ops arm_smmu_iommu_ops = {
.init = arm_smmu_iommu_domain_init,
.hwdom_init = arm_smmu_iommu_hwdom_init,
@@ -2786,8 +2823,8 @@ static const struct iommu_ops arm_smmu_iommu_ops = {
.iotlb_flush_all = arm_smmu_iotlb_flush_all,
.assign_device = arm_smmu_assign_dev,
.reassign_device = arm_smmu_reassign_dev,
- .map_page = arm_smmu_map_page,
- .unmap_page = arm_smmu_unmap_page,
+ .map_pages = arm_smmu_map_pages,
+ .unmap_pages = arm_smmu_unmap_pages,
};
static __init const struct arm_smmu_device *find_smmu(const struct device *dev)
@@ -188,7 +188,7 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
== PGT_writable_page) )
mapping |= IOMMUF_writable;
- ret = hd->platform_ops->map_page(d, gfn, mfn, mapping);
+ ret = hd->platform_ops->map_pages(d, gfn, mfn, 0, mapping);
if ( !rc )
rc = ret;
@@ -249,8 +249,8 @@ void iommu_domain_destroy(struct domain *d)
arch_iommu_domain_destroy(d);
}
-int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
- unsigned int flags)
+int iommu_map_pages(struct domain *d, unsigned long gfn, unsigned long mfn,
+ unsigned int order, unsigned int flags)
{
const struct domain_iommu *hd = dom_iommu(d);
int rc;
@@ -258,13 +258,13 @@ int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
if ( !iommu_enabled || !hd->platform_ops )
return 0;
- rc = hd->platform_ops->map_page(d, gfn, mfn, flags);
+ rc = hd->platform_ops->map_pages(d, gfn, mfn, order, flags);
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
- "d%d: IOMMU mapping gfn %#lx to mfn %#lx failed: %d\n",
- d->domain_id, gfn, mfn, rc);
+ "d%d: IOMMU mapping gfn %#lx to mfn %#lx order %u failed: %d\n",
+ d->domain_id, gfn, mfn, order, rc);
if ( !is_hardware_domain(d) )
domain_crash(d);
@@ -273,7 +273,8 @@ int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
return rc;
}
-int iommu_unmap_page(struct domain *d, unsigned long gfn)
+int iommu_unmap_pages(struct domain *d, unsigned long gfn,
+ unsigned int order)
{
const struct domain_iommu *hd = dom_iommu(d);
int rc;
@@ -281,13 +282,13 @@ int iommu_unmap_page(struct domain *d, unsigned long gfn)
if ( !iommu_enabled || !hd->platform_ops )
return 0;
- rc = hd->platform_ops->unmap_page(d, gfn);
+ rc = hd->platform_ops->unmap_pages(d, gfn, order);
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
- "d%d: IOMMU unmapping gfn %#lx failed: %d\n",
- d->domain_id, gfn, rc);
+ "d%d: IOMMU unmapping gfn %#lx order %u failed: %d\n",
+ d->domain_id, gfn, order, rc);
if ( !is_hardware_domain(d) )
domain_crash(d);
@@ -1816,6 +1816,50 @@ static int __must_check intel_iommu_unmap_page(struct domain *d,
return dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
}
+/* TODO: Optimize by squashing map_pages/unmap_pages with map_page/unmap_page */
+static int __must_check intel_iommu_map_pages(struct domain *d,
+ unsigned long gfn,
+ unsigned long mfn,
+ unsigned int order,
+ unsigned int flags)
+{
+ unsigned long i;
+ int rc = 0;
+
+ for ( i = 0; i < (1UL << order); i++ )
+ {
+ rc = intel_iommu_map_page(d, gfn + i, mfn + i, flags);
+ if ( unlikely(rc) )
+ {
+ while ( i-- )
+ /* If statement to satisfy __must_check. */
+ if ( intel_iommu_unmap_page(d, gfn + i) )
+ continue;
+
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int __must_check intel_iommu_unmap_pages(struct domain *d,
+ unsigned long gfn,
+ unsigned int order)
+{
+ unsigned long i;
+ int rc = 0;
+
+ for ( i = 0; i < (1UL << order); i++ )
+ {
+ int ret = intel_iommu_unmap_page(d, gfn + i);
+ if ( !rc )
+ rc = ret;
+ }
+
+ return rc;
+}
+
int iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte,
int order, int present)
{
@@ -2639,8 +2683,8 @@ const struct iommu_ops intel_iommu_ops = {
.remove_device = intel_iommu_remove_device,
.assign_device = intel_iommu_assign_device,
.teardown = iommu_domain_teardown,
- .map_page = intel_iommu_map_page,
- .unmap_page = intel_iommu_unmap_page,
+ .map_pages = intel_iommu_map_pages,
+ .unmap_pages = intel_iommu_unmap_pages,
.free_page_table = iommu_free_page_table,
.reassign_device = reassign_device_ownership,
.get_device_group_id = intel_iommu_group_id,
@@ -143,8 +143,8 @@ void __hwdom_init vtd_set_hwdom_mapping(struct domain *d)
tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
for ( j = 0; j < tmp; j++ )
{
- int ret = iommu_map_page(d, pfn * tmp + j, pfn * tmp + j,
- IOMMUF_readable|IOMMUF_writable);
+ int ret = iommu_map_pages(d, pfn * tmp + j, pfn * tmp + j, 0,
+ IOMMUF_readable|IOMMUF_writable);
if ( !rc )
rc = ret;
@@ -65,9 +65,9 @@ int arch_iommu_populate_page_table(struct domain *d)
{
ASSERT(!(gfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH));
BUG_ON(SHARED_M2P(gfn));
- rc = hd->platform_ops->map_page(d, gfn, mfn,
- IOMMUF_readable |
- IOMMUF_writable);
+ rc = hd->platform_ops->map_pages(d, gfn, mfn, 0,
+ IOMMUF_readable |
+ IOMMUF_writable);
}
if ( rc )
{
@@ -52,9 +52,11 @@ int amd_iommu_init(void);
int amd_iommu_update_ivrs_mapping_acpi(void);
/* mapping functions */
-int __must_check amd_iommu_map_page(struct domain *d, unsigned long gfn,
- unsigned long mfn, unsigned int flags);
-int __must_check amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
+int __must_check amd_iommu_map_pages(struct domain *d, unsigned long gfn,
+ unsigned long mfn, unsigned int order,
+ unsigned int flags);
+int __must_check amd_iommu_unmap_pages(struct domain *d, unsigned long gfn,
+ unsigned int order);
u64 amd_iommu_get_next_table_from_pte(u32 *entry);
int __must_check amd_iommu_alloc_root(struct domain_iommu *hd);
int amd_iommu_reserve_domain_unity_map(struct domain *domain,
@@ -71,14 +71,16 @@ int iommu_construct(struct domain *d);
/* Function used internally, use iommu_domain_destroy */
void iommu_teardown(struct domain *d);
-/* iommu_map_page() takes flags to direct the mapping operation. */
+/* iommu_map_pages() takes flags to direct the mapping operation. */
#define _IOMMUF_readable 0
#define IOMMUF_readable (1u<<_IOMMUF_readable)
#define _IOMMUF_writable 1
#define IOMMUF_writable (1u<<_IOMMUF_writable)
-int __must_check iommu_map_page(struct domain *d, unsigned long gfn,
- unsigned long mfn, unsigned int flags);
-int __must_check iommu_unmap_page(struct domain *d, unsigned long gfn);
+int __must_check iommu_map_pages(struct domain *d, unsigned long gfn,
+ unsigned long mfn, unsigned int order,
+ unsigned int flags);
+int __must_check iommu_unmap_pages(struct domain *d, unsigned long gfn,
+ unsigned int order);
enum iommu_feature
{
@@ -168,9 +170,11 @@ struct iommu_ops {
#endif /* HAS_PCI */
void (*teardown)(struct domain *d);
- int __must_check (*map_page)(struct domain *d, unsigned long gfn,
- unsigned long mfn, unsigned int flags);
- int __must_check (*unmap_page)(struct domain *d, unsigned long gfn);
+ int __must_check (*map_pages)(struct domain *d, unsigned long gfn,
+ unsigned long mfn, unsigned int order,
+ unsigned int flags);
+ int __must_check (*unmap_pages)(struct domain *d, unsigned long gfn,
+ unsigned int order);
void (*free_page_table)(struct page_info *);
#ifdef CONFIG_X86
void (*update_ire_from_apic)(unsigned int apic, unsigned int reg, unsigned int value);
@@ -213,7 +217,7 @@ void iommu_dev_iotlb_flush_timeout(struct domain *d, struct pci_dev *pdev);
* The purpose of the iommu_dont_flush_iotlb optional cpu flag is to
* avoid unecessary iotlb_flush in the low level IOMMU code.
*
- * iommu_map_page/iommu_unmap_page must flush the iotlb but somethimes
+ * iommu_map_pages/iommu_unmap_pages must flush the iotlb but somethimes
* this operation can be really expensive. This flag will be set by the
* caller to notify the low level IOMMU code to avoid the iotlb flushes.
* iommu_iotlb_flush/iommu_iotlb_flush_all will be explicitly called by