@@ -591,6 +591,12 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
if (!prot || !size || (size | iova | vaddr) & mask)
return -EINVAL;
+ if (map->flags & VFIO_DMA_MAP_FLAG_NOEXEC) {
+ if (!vfio_domains_have_iommu_cap(iommu, IOMMU_CAP_NOEXEC))
+ return -EINVAL;
+ prot |= IOMMU_NOEXEC;
+ }
+
/* Don't allow IOVA or virtual address wrap */
if (iova + size - 1 < iova || vaddr + size - 1 < vaddr)
return -EINVAL;
@@ -672,11 +678,20 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
for (; n; n = rb_next(n)) {
struct vfio_dma *dma;
+ const struct iommu_ops *ops = domain->domain->ops;
dma_addr_t iova;
dma = rb_entry(n, struct vfio_dma, node);
iova = dma->iova;
+ /*
+ * if any of the mappings to be replayed has the NOEXEC flag
+ * set, then the new iommu domain must support it
+ */
+ if ((dma->prot & IOMMU_NOEXEC) &&
+ !(ops->capable(IOMMU_CAP_NOEXEC)))
+ return -EINVAL;
+
while (iova < dma->iova + dma->size) {
phys_addr_t phys = iommu_iova_to_phys(d->domain, iova);
size_t size;
@@ -969,6 +984,11 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
return 0;
return vfio_domains_have_iommu_cap(iommu,
IOMMU_CAP_CACHE_COHERENCY);
+ case VFIO_DMA_NOEXEC_IOMMU:
+ if (!iommu)
+ return 0;
+ return vfio_domains_have_iommu_cap(iommu,
+ IOMMU_CAP_NOEXEC);
default:
return 0;
}
@@ -992,7 +1012,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
} else if (cmd == VFIO_IOMMU_MAP_DMA) {
struct vfio_iommu_type1_dma_map map;
uint32_t mask = VFIO_DMA_MAP_FLAG_READ |
- VFIO_DMA_MAP_FLAG_WRITE;
+ VFIO_DMA_MAP_FLAG_WRITE |
+ VFIO_DMA_MAP_FLAG_NOEXEC;
minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);