@@ -7,9 +7,7 @@
*/
#include <linux/device.h>
-#include <linux/dmar.h>
#include <linux/idr.h>
-#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
@@ -257,13 +255,9 @@ static ssize_t iommu_dma_protection_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- /*
- * Kernel DMA protection is a feature where Thunderbolt security is
- * handled natively using IOMMU. It is enabled when IOMMU is
- * enabled and ACPI DMAR table has DMAR_PLATFORM_OPT_IN set.
- */
- return sprintf(buf, "%d\n",
- iommu_present(&pci_bus_type) && dmar_platform_optin());
+ struct tb *tb = container_of(dev, struct tb, dev);
+
+ return sysfs_emit(buf, "%d\n", tb->nhi->iommu_dma_protection);
}
static DEVICE_ATTR_RO(iommu_dma_protection);
@@ -15,9 +15,11 @@
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/property.h>
+#include <linux/string_helpers.h>
#include "nhi.h"
#include "nhi_regs.h"
@@ -1103,6 +1105,47 @@ static void nhi_check_quirks(struct tb_nhi *nhi)
nhi->quirks |= QUIRK_AUTO_CLEAR_INT;
}
+static int nhi_check_iommu_pdev(struct pci_dev *pdev, void *data)
+{
+ if (!pdev->external_facing ||
+ !device_iommu_capable(&pdev->dev, IOMMU_CAP_PRE_BOOT_PROTECTION))
+ return 0;
+ *(bool *)data = true;
+ return 1; /* Stop walking */
+}
+
+static void nhi_check_iommu(struct tb_nhi *nhi)
+{
+ struct pci_bus *bus = nhi->pdev->bus;
+ bool port_ok = false;
+
+ /*
+ * Ideally what we'd do here is grab every PCI device that
+ * represents a tunnelling adapter for this NHI and check their
+ * status directly, but unfortunately USB4 seems to make it
+ * obnoxiously difficult to reliably make any correlation.
+ *
+ * So for now we'll have to bodge it... Hoping that the system
+ * is at least sane enough that an adapter is in the same PCI
+ * segment as its NHI, if we can find *something* on that segment
+ * which meets the requirements for Kernel DMA Protection, we'll
+ * take that to imply that firmware is aware and has (hopefully)
+ * done the right thing in general. We need to know that the PCI
+ * layer has seen the ExternalFacingPort property which will then
+ * inform the IOMMU layer to enforce the complete "untrusted DMA"
+ * flow, but also that the IOMMU driver itself can be trusted not
+ * to have been subverted by a pre-boot DMA attack.
+ */
+ while (bus->parent)
+ bus = bus->parent;
+
+ pci_walk_bus(bus, nhi_check_iommu_pdev, &port_ok);
+
+ nhi->iommu_dma_protection = port_ok;
+ dev_dbg(&nhi->pdev->dev, "IOMMU DMA protection is %s\n",
+ str_enabled_disabled(port_ok));
+}
+
static int nhi_init_msi(struct tb_nhi *nhi)
{
struct pci_dev *pdev = nhi->pdev;
@@ -1220,6 +1263,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
nhi_check_quirks(nhi);
+ nhi_check_iommu(nhi);
res = nhi_init_msi(nhi);
if (res) {
@@ -465,6 +465,7 @@ static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc)
* @msix_ida: Used to allocate MSI-X vectors for rings
* @going_away: The host controller device is about to disappear so when
* this flag is set, avoid touching the hardware anymore.
+ * @iommu_dma_protection: An IOMMU will isolate external-facing ports.
* @interrupt_work: Work scheduled to handle ring interrupt when no
* MSI-X is used.
* @hop_count: Number of rings (end point hops) supported by NHI.
@@ -479,6 +480,7 @@ struct tb_nhi {
struct tb_ring **rx_rings;
struct ida msix_ida;
bool going_away;
+ bool iommu_dma_protection;
struct work_struct interrupt_work;
u32 hop_count;
unsigned long quirks;