@@ -282,17 +282,6 @@ static int iova_reserve_iommu_regions(struct device *dev,
return ret;
}
-static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
-{
- struct iommu_dma_cookie *cookie;
- struct iommu_domain *domain;
-
- cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
- domain = cookie->fq_domain;
-
- domain->ops->flush_iotlb_all(domain);
-}
-
static bool dev_is_untrusted(struct device *dev)
{
return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
@@ -312,7 +301,7 @@ int iommu_dma_init_fq(struct iommu_domain *domain)
if (cookie->fq_domain)
return 0;
- ret = init_iova_flush_queue(&cookie->iovad, iommu_dma_flush_iotlb_all);
+ ret = init_iova_flush_queue(&cookie->iovad, domain);
if (ret) {
pr_warn("iova flush queue initialization failed\n");
return ret;
@@ -63,7 +63,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
iovad->start_pfn = start_pfn;
iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
iovad->max32_alloc_size = iovad->dma_32bit_pfn;
- iovad->flush_cb = NULL;
+ iovad->fq_domain = NULL;
iovad->fq = NULL;
iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
@@ -90,10 +90,10 @@ static void free_iova_flush_queue(struct iova_domain *iovad)
free_percpu(iovad->fq);
iovad->fq = NULL;
- iovad->flush_cb = NULL;
+ iovad->fq_domain = NULL;
}
-int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb)
+int init_iova_flush_queue(struct iova_domain *iovad, struct iommu_domain *fq_domain)
{
struct iova_fq __percpu *queue;
int cpu;
@@ -105,8 +105,6 @@ int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb)
if (!queue)
return -ENOMEM;
- iovad->flush_cb = flush_cb;
-
for_each_possible_cpu(cpu) {
struct iova_fq *fq;
@@ -117,6 +115,7 @@ int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb)
spin_lock_init(&fq->lock);
}
+ iovad->fq_domain = fq_domain;
iovad->fq = queue;
timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
@@ -589,7 +588,7 @@ static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
static void iova_domain_flush(struct iova_domain *iovad)
{
atomic64_inc(&iovad->fq_flush_start_cnt);
- iovad->flush_cb(iovad);
+ iovad->fq_domain->ops->flush_iotlb_all(iovad->fq_domain);
atomic64_inc(&iovad->fq_flush_finish_cnt);
}
@@ -14,6 +14,7 @@
#include <linux/rbtree.h>
#include <linux/atomic.h>
#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
/* iova structure */
struct iova {
@@ -35,11 +36,6 @@ struct iova_rcache {
struct iova_cpu_rcache __percpu *cpu_rcaches;
};
-struct iova_domain;
-
-/* Call-Back from IOVA code into IOMMU drivers */
-typedef void (* iova_flush_cb)(struct iova_domain *domain);
-
/* Number of entries per Flush Queue */
#define IOVA_FQ_SIZE 256
@@ -82,8 +78,7 @@ struct iova_domain {
struct iova anchor; /* rbtree lookup anchor */
struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
- iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU
- TLBs */
+ struct iommu_domain *fq_domain;
struct timer_list fq_timer; /* Timer to regularily empty the
flush-queues */
@@ -147,7 +142,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi);
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn);
-int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb);
+int init_iova_flush_queue(struct iova_domain *iovad, struct iommu_domain *fq_domain);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
void put_iova_domain(struct iova_domain *iovad);
#else