@@ -1866,11 +1866,6 @@
Note that using this option lowers the security
provided by tboot because it makes the system
vulnerable to DMA attacks.
- nobounce [Default off]
- Disable bounce buffer for untrusted devices such as
- the Thunderbolt devices. This will treat the untrusted
- devices as the trusted ones, hence might expose security
- risks of DMA attacks.
intel_idle.max_cstate= [KNL,HW,ACPI,X86]
0 disables intel_idle and fall back on acpi_idle.
@@ -355,7 +355,6 @@ static int dmar_forcedac;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
static int iommu_identity_mapping;
-static int intel_no_bounce;
static int iommu_skip_te_disable;
#define IDENTMAP_GFX 2
@@ -457,9 +456,6 @@ static int __init intel_iommu_setup(char *str)
} else if (!strncmp(str, "tboot_noforce", 13)) {
pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
intel_iommu_tboot_noforce = 1;
- } else if (!strncmp(str, "nobounce", 8)) {
- pr_info("Intel-IOMMU: No bounce buffer. This could expose security risks of DMA attacks\n");
- intel_no_bounce = 1;
}
str += strcspn(str, ",");
@@ -2277,15 +2273,14 @@ static inline int hardware_largepage_caps(struct dmar_domain *domain,
return level;
}
-static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- struct scatterlist *sg, unsigned long phys_pfn,
- unsigned long nr_pages, int prot)
+static int
+__domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ unsigned long phys_pfn, unsigned long nr_pages, int prot)
{
struct dma_pte *first_pte = NULL, *pte = NULL;
- phys_addr_t pteval;
- unsigned long sg_res = 0;
unsigned int largepage_lvl = 0;
unsigned long lvl_pages = 0;
+ phys_addr_t pteval;
u64 attr;
BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
@@ -2297,26 +2292,14 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
if (domain_use_first_level(domain))
attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;
- if (!sg) {
- sg_res = nr_pages;
- pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
- }
+ pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
while (nr_pages > 0) {
uint64_t tmp;
- if (!sg_res) {
- unsigned int pgoff = sg->offset & ~PAGE_MASK;
-
- sg_res = aligned_nrpages(sg->offset, sg->length);
- sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
- sg->dma_length = sg->length;
- pteval = (sg_phys(sg) - pgoff) | attr;
- phys_pfn = pteval >> VTD_PAGE_SHIFT;
- }
-
if (!pte) {
- largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
+ largepage_lvl = hardware_largepage_caps(domain, iov_pfn,
+ phys_pfn, nr_pages);
first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
if (!pte)
@@ -2328,7 +2311,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
pteval |= DMA_PTE_LARGE_PAGE;
lvl_pages = lvl_to_nr_pages(largepage_lvl);
- nr_superpages = sg_res / lvl_pages;
+ nr_superpages = nr_pages / lvl_pages;
end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
/*
@@ -2362,48 +2345,45 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
lvl_pages = lvl_to_nr_pages(largepage_lvl);
BUG_ON(nr_pages < lvl_pages);
- BUG_ON(sg_res < lvl_pages);
nr_pages -= lvl_pages;
iov_pfn += lvl_pages;
phys_pfn += lvl_pages;
pteval += lvl_pages * VTD_PAGE_SIZE;
- sg_res -= lvl_pages;
/* If the next PTE would be the first in a new page, then we
- need to flush the cache on the entries we've just written.
- And then we'll need to recalculate 'pte', so clear it and
- let it get set again in the if (!pte) block above.
-
- If we're done (!nr_pages) we need to flush the cache too.
-
- Also if we've been setting superpages, we may need to
- recalculate 'pte' and switch back to smaller pages for the
- end of the mapping, if the trailing size is not enough to
- use another superpage (i.e. sg_res < lvl_pages). */
+ * need to flush the cache on the entries we've just written.
+ * And then we'll need to recalculate 'pte', so clear it and
+ * let it get set again in the if (!pte) block above.
+ *
+ * If we're done (!nr_pages) we need to flush the cache too.
+ *
+ * Also if we've been setting superpages, we may need to
+ * recalculate 'pte' and switch back to smaller pages for the
+ * end of the mapping, if the trailing size is not enough to
+ * use another superpage (i.e. nr_pages < lvl_pages).
+ */
pte++;
if (!nr_pages || first_pte_in_page(pte) ||
- (largepage_lvl > 1 && sg_res < lvl_pages)) {
+ (largepage_lvl > 1 && nr_pages < lvl_pages)) {
domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte);
pte = NULL;
}
-
- if (!sg_res && nr_pages)
- sg = sg_next(sg);
}
+
return 0;
}
-static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- struct scatterlist *sg, unsigned long phys_pfn,
- unsigned long nr_pages, int prot)
+static int
+domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ unsigned long phys_pfn, unsigned long nr_pages, int prot)
{
int iommu_id, ret;
struct intel_iommu *iommu;
/* Do the real mapping first */
- ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
+ ret = __domain_mapping(domain, iov_pfn, phys_pfn, nr_pages, prot);
if (ret)
return ret;
@@ -2415,20 +2395,6 @@ static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
return 0;
}
-static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- struct scatterlist *sg, unsigned long nr_pages,
- int prot)
-{
- return domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
-}
-
-static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- unsigned long phys_pfn, unsigned long nr_pages,
- int prot)
-{
- return domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
-}
-
static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
unsigned long flags;
@@ -2685,7 +2651,7 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
*/
dma_pte_clear_range(domain, first_vpfn, last_vpfn);
- return __domain_mapping(domain, first_vpfn, NULL,
+ return __domain_mapping(domain, first_vpfn,
first_vpfn, last_vpfn - first_vpfn + 1,
DMA_PTE_READ|DMA_PTE_WRITE);
}
@@ -4940,8 +4906,8 @@ static int intel_iommu_map(struct iommu_domain *domain,
/* Round up size to next multiple of PAGE_SIZE, if it and
the low bits of hpa would take us onto the next page */
size = aligned_nrpages(hpa, size);
- ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
- hpa >> VTD_PAGE_SHIFT, size, prot);
+ ret = domain_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
+ hpa >> VTD_PAGE_SHIFT, size, prot);
return ret;
}
Some cleanups after converting the driver to use dma-iommu ops. - Remove nobounce option; - Cleanup and simplify the path in domain mapping. Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> --- .../admin-guide/kernel-parameters.txt | 5 -- drivers/iommu/intel/iommu.c | 90 ++++++------------- 2 files changed, 28 insertions(+), 67 deletions(-)