@@ -2502,6 +2502,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
info->iommu = iommu;
info->pasid_table = NULL;
info->auxd_enabled = 0;
+ INIT_LIST_HEAD(&info->auxiliary_domains);
if (dev && dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(info->dev);
@@ -5036,6 +5037,124 @@ static void intel_iommu_domain_free(struct iommu_domain *domain)
domain_exit(to_dmar_domain(domain));
}
+/*
+ * Check whether a @domain will be attached to the @dev in the
+ * auxiliary mode.
+ */
+static inline bool
+is_device_attach_aux_domain(struct device *dev, struct iommu_domain *domain)
+{
+ struct device_domain_info *info = dev->archdata.iommu;
+
+ return info && info->auxd_enabled &&
+ domain->type == IOMMU_DOMAIN_UNMANAGED;
+}
+
+static void auxiliary_link_device(struct dmar_domain *domain,
+ struct device *dev)
+{
+ struct device_domain_info *info = dev->archdata.iommu;
+
+ assert_spin_locked(&device_domain_lock);
+ if (WARN_ON(!info))
+ return;
+
+ domain->auxd_refcnt++;
+ list_add(&domain->auxd, &info->auxiliary_domains);
+}
+
+static void auxiliary_unlink_device(struct dmar_domain *domain,
+ struct device *dev)
+{
+ struct device_domain_info *info = dev->archdata.iommu;
+
+ assert_spin_locked(&device_domain_lock);
+ if (WARN_ON(!info))
+ return;
+
+ list_del(&domain->auxd);
+ domain->auxd_refcnt--;
+
+ if (!domain->auxd_refcnt && domain->default_pasid > 0)
+ intel_pasid_free_id(domain->default_pasid);
+}
+
+static int domain_add_dev_auxd(struct dmar_domain *domain,
+ struct device *dev)
+{
+ int ret;
+ u8 bus, devfn;
+ unsigned long flags;
+ struct intel_iommu *iommu;
+
+ iommu = device_to_iommu(dev, &bus, &devfn);
+ if (!iommu)
+ return -ENODEV;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ if (domain->default_pasid <= 0) {
+ domain->default_pasid = intel_pasid_alloc_id(domain, PASID_MIN,
+ intel_pasid_get_dev_max_id(dev), GFP_ATOMIC);
+ if (domain->default_pasid < 0) {
+ pr_err("Can't allocate default pasid\n");
+ ret = -ENODEV;
+ goto pasid_failed;
+ }
+ }
+
+ spin_lock(&iommu->lock);
+ ret = domain_attach_iommu(domain, iommu);
+ if (ret)
+ goto attach_failed;
+
+ /* Setup the PASID entry for mediated devices: */
+ ret = intel_pasid_setup_second_level(iommu, domain, dev,
+ domain->default_pasid, false);
+ if (ret)
+ goto table_failed;
+ spin_unlock(&iommu->lock);
+
+ auxiliary_link_device(domain, dev);
+
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ return 0;
+
+table_failed:
+ domain_detach_iommu(domain, iommu);
+attach_failed:
+ spin_unlock(&iommu->lock);
+ if (!domain->auxd_refcnt && domain->default_pasid > 0)
+ intel_pasid_free_id(domain->default_pasid);
+pasid_failed:
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ return ret;
+}
+
+static void domain_remove_dev_aux(struct dmar_domain *domain,
+ struct device *dev)
+{
+ struct device_domain_info *info;
+ struct intel_iommu *iommu;
+ unsigned long flags;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ info = dev->archdata.iommu;
+ iommu = info->iommu;
+
+ intel_pasid_tear_down_second_level(iommu, domain,
+ dev, domain->default_pasid);
+
+ auxiliary_unlink_device(domain, dev);
+
+ spin_lock(&iommu->lock);
+ domain_detach_iommu(domain, iommu);
+ spin_unlock(&iommu->lock);
+
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+}
+
static int intel_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
@@ -5050,7 +5169,8 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
}
/* normally dev is not mapped */
- if (unlikely(domain_context_mapped(dev))) {
+ if (unlikely(domain_context_mapped(dev) &&
+ !is_device_attach_aux_domain(dev, domain))) {
struct dmar_domain *old_domain;
old_domain = find_domain(dev);
@@ -5097,13 +5217,19 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
dmar_domain->agaw--;
}
- return domain_add_dev_info(dmar_domain, dev);
+ if (is_device_attach_aux_domain(dev, domain))
+ return domain_add_dev_auxd(dmar_domain, dev);
+ else
+ return domain_add_dev_info(dmar_domain, dev);
}
static void intel_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
- dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
+ if (is_device_attach_aux_domain(dev, domain))
+ domain_remove_dev_aux(to_dmar_domain(domain), dev);
+ else
+ dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
}
static int intel_iommu_map(struct iommu_domain *domain,
@@ -408,9 +408,11 @@ struct dmar_domain {
/* Domain ids per IOMMU. Use u16 since
* domain ids are 16 bit wide according
* to VT-d spec, section 9.3 */
+ unsigned int auxd_refcnt; /* Refcount of auxiliary attaching */
bool has_iotlb_device;
struct list_head devices; /* all devices' list */
+ struct list_head auxd; /* link to device's auxiliary list */
struct iova_domain iovad; /* iova's that belong to this domain */
struct dma_pte *pgd; /* virtual address */
@@ -429,6 +431,11 @@ struct dmar_domain {
2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
u64 max_addr; /* maximum mapped address */
+ int default_pasid; /*
+ * The default pasid used for non-SVM
+ * traffic on mediated devices.
+ */
+
struct iommu_domain domain; /* generic domain data structure for
iommu core */
};
@@ -478,6 +485,9 @@ struct device_domain_info {
struct list_head link; /* link to domain siblings */
struct list_head global; /* link to global list */
struct list_head table; /* link to pasid table */
+ struct list_head auxiliary_domains; /* auxiliary domains
+ * attached to this device
+ */
u8 bus; /* PCI bus number */
u8 devfn; /* PCI devfn number */
u16 pfsid; /* SRIOV physical function source ID */