Message ID | 20190506185207.31069-3-tmurphy@arista.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | iommu/amd: Convert the AMD iommu driver to the dma-iommu api | expand |
On Mon, May 06, 2019 at 07:52:04PM +0100, Tom Murphy wrote: > +static int handle_deferred_device(struct device *dev) > +{ > + struct iommu_domain *domain; > + const struct iommu_ops *ops; > + > + if (!is_kdump_kernel()) > + return 0; > + > + domain = iommu_get_domain_for_dev(dev); > - dma_handle =__iommu_dma_map(dev, phys, size, > + if (unlikely(handle_deferred_device(dev))) > + return DMA_MAPPING_ERROR; > + > + dma_handle = __iommu_dma_map(dev, phys, size, __iommu_dma_map already looks up the domain, and as far as I can tell all callers need the handle_deferred_device call. Should we just move it to there and pass the domain from the caller? Also shouldn't the iommu_attach_device call inside handle_deferred_device also get an unlikely marker?
like this? In that case we need to add a call to iommu_dma_alloc_remap. From 862aeebb601008cf863e3aff4ff8ed7cefebeefa Mon Sep 17 00:00:00 2001 From: Tom Murphy <tmurphy@tmurphy-419tom-0.sjc.aristanetworks.com> Date: Wed, 15 May 2019 05:43:25 -0700 Subject: [PATCH] iommu/dma-iommu: Handle deferred devices Handle devices which defer their attach to the iommu in the dma-iommu api Signed-off-by: Tom Murphy <tmurphy@arista.com> --- drivers/iommu/dma-iommu.c | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 7f313cfa9..a48ae906d 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -22,6 +22,7 @@ #include <linux/pci.h> #include <linux/scatterlist.h> #include <linux/vmalloc.h> +#include <linux/crash_dump.h> struct iommu_dma_msi_page { struct list_head list; @@ -323,6 +324,21 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, return iova_reserve_iommu_regions(dev, domain); } +static int handle_deferred_device(struct device *dev, + struct iommu_domain *domain) +{ + const struct iommu_ops *ops = domain->ops; + + if (!is_kdump_kernel()) + return 0; + + if (unlikely(ops->is_attach_deferred && + ops->is_attach_deferred(domain, dev))) + return iommu_attach_device(domain, dev); + + return 0; +} + /** * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API * page flags. @@ -432,6 +448,9 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, size_t iova_off = 0; dma_addr_t iova; + if (unlikely(handle_deferred_device(dev, domain))) + return DMA_MAPPING_ERROR; + if (cookie->type == IOMMU_DMA_IOVA_COOKIE) { iova_off = iova_offset(&cookie->iovad, phys); size = iova_align(&cookie->iovad, size + iova_off); @@ -609,6 +628,9 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, dma_addr_t iova; void *vaddr; + if (unlikely(handle_deferred_device(dev, domain))) + return DMA_MAPPING_ERROR; + *dma_handle = DMA_MAPPING_ERROR; min_size = alloc_sizes & -alloc_sizes; @@ -836,7 +858,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, bool coherent = dev_is_dma_coherent(dev); dma_addr_t dma_handle; - dma_handle =__iommu_dma_map(dev, phys, size, + dma_handle = __iommu_dma_map(dev, phys, size, dma_info_to_prot(dir, coherent, attrs), iommu_get_dma_domain(dev)); if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && @@ -954,6 +976,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, unsigned long mask = dma_get_seg_boundary(dev); int i; + if (unlikely(handle_deferred_device(dev, domain))) + return 0; + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 7a96c2c8f56b..b383498e2dc3 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -22,6 +22,7 @@ #include <linux/pci.h> #include <linux/scatterlist.h> #include <linux/vmalloc.h> +#include <linux/crash_dump.h> struct iommu_dma_msi_page { struct list_head list; @@ -322,6 +323,22 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, return iova_reserve_iommu_regions(dev, domain); } +static int handle_deferred_device(struct device *dev) +{ + struct iommu_domain *domain; + const struct iommu_ops *ops; + + if (!is_kdump_kernel()) + return 0; + + domain = iommu_get_domain_for_dev(dev); + ops = domain->ops; + if (ops->is_attach_deferred && ops->is_attach_deferred(domain, dev)) + return iommu_attach_device(domain, dev); + + return 0; +} + /** * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API * page flags. @@ -835,7 +852,10 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, bool coherent = dev_is_dma_coherent(dev); dma_addr_t dma_handle; - dma_handle =__iommu_dma_map(dev, phys, size, + if (unlikely(handle_deferred_device(dev))) + return DMA_MAPPING_ERROR; + + dma_handle = __iommu_dma_map(dev, phys, size, dma_info_to_prot(dir, coherent, attrs), iommu_get_dma_domain(dev)); if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && @@ -953,6 +973,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, unsigned long mask = dma_get_seg_boundary(dev); int i; + if (unlikely(handle_deferred_device(dev))) + return 0; + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) iommu_dma_sync_sg_for_device(dev, sg, nents, dir); @@ -1056,6 +1079,9 @@ static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, static void *iommu_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { + if (unlikely(handle_deferred_device(dev))) + return NULL; + gfp |= __GFP_ZERO; #ifdef CONFIG_DMA_DIRECT_REMAP
Handle devices which defer their attach to the iommu in the dma-iommu api Signed-off-by: Tom Murphy <tmurphy@arista.com> --- drivers/iommu/dma-iommu.c | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-)