diff mbox series

[RFC,v1,2/3] PCI: vmd: Add VMD PCH rootbus support

Message ID 20241025150153.983306-3-szymon.durawa@linux.intel.com (mailing list archive)
State Changes Requested
Headers show
Series VMD add PCH rootbus support | expand

Commit Message

Szymon Durawa Oct. 25, 2024, 3:01 p.m. UTC
Starting from Intel Arrow Lake VMD enhacement introduces separate
rotbus for PCH. It means that all 3 MMIO BARs exposed by VMD are
shared now between CPU IOC and PCH. This patch adds PCH bus
enumeration and MMIO management for devices with VMD enhancement
support.

Suggested-by: Nirmal Patel <nirmal.patel@linux.intel.com>
Reviewed-by: Mariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com>
Signed-off-by: Szymon Durawa <szymon.durawa@linux.intel.com>
---
 drivers/pci/controller/vmd.c | 176 +++++++++++++++++++++++++++++++++--
 1 file changed, 167 insertions(+), 9 deletions(-)

Comments

Bjorn Helgaas Oct. 28, 2024, 9:50 p.m. UTC | #1
On Fri, Oct 25, 2024 at 05:01:52PM +0200, Szymon Durawa wrote:
> Starting from Intel Arrow Lake VMD enhacement introduces separate
> rotbus for PCH. It means that all 3 MMIO BARs exposed by VMD are

enhancement
root bus

Does VMD still have only 3 MMIO BARs?  VMD_RES_PCH_* suggests more
BARs.

> shared now between CPU IOC and PCH. This patch adds PCH bus
> enumeration and MMIO management for devices with VMD enhancement
> support.

s/This patch adds/Add/

We already had bus enumeration and MMIO management.

It'd be nice to have something specific about what changes with PCH.
A different fixed root bus number?  Multiple root buses?  Additional
BARs in the VMD endpoint?

If possible, describe this in generic PCIe topology terms, not in
Intel-speak (IOC, PCH, etc).

> +#define VMD_PRIMARY_PCH_BUS 0x80
> +#define VMD_BUSRANGE0 0xC8
> +#define VMD_BUSRANGE1 0xCC
> +#define VMD_MEMBAR1_OFFSET 0xD0
> +#define VMD_MEMBAR2_OFFSET1 0xD8
> +#define VMD_MEMBAR2_OFFSET2 0xDC

This file (mostly) uses lower-case hex; match that style.

> +#define VMD_BUS_END(busr) ((busr >> 8) & 0xff)
> +#define VMD_BUS_START(busr) (busr & 0x00ff)
> +
>  #define MB2_SHADOW_OFFSET	0x2000
>  #define MB2_SHADOW_SIZE		16
>  
> @@ -38,11 +47,15 @@ enum vmd_resource {
>  	VMD_RES_CFGBAR = 0,
>  	VMD_RES_MBAR_1, /*VMD Resource MemBAR 1 */
>  	VMD_RES_MBAR_2, /*VMD Resource MemBAR 2 */
> +	VMD_RES_PCH_CFGBAR,
> +	VMD_RES_PCH_MBAR_1, /*VMD Resource PCH MemBAR 1 */
> +	VMD_RES_PCH_MBAR_2, /*VMD Resource PCH MemBAR 2 */

Space after "/*".

> +static inline u8 vmd_has_pch_rootbus(struct vmd_dev *vmd)
> +{
> +	return vmd->busn_start[VMD_BUS_1] != 0;

Seems a little weird to learn this by testing whether this kzalloc'ed
field has been set.  Could easily save the driver_data pointer or just
the "features" value in struct vmd_dev.

> +		case 3:
> +			if (!(features & VMD_FEAT_HAS_PCH_ROOTBUS)) {
> +				pci_err(dev, "VMD Bus Restriction detected type %d, but PCH Rootbus is not supported, aborting.\n",
> +					BUS_RESTRICT_CFG(reg));
> +				return -ENODEV;
> +			}
> +
> +			/* IOC start bus */
> +			vmd->busn_start[VMD_BUS_0] = 224;
> +			/* PCH start bus */
> +			vmd->busn_start[VMD_BUS_1] = 225;

Seems like these magic numbers could have #defines.  I see we've been
using 128 and 224 already, and this basically adds 225.

> +static int vmd_create_pch_bus(struct vmd_dev *vmd, struct pci_sysdata *sd,
> +			      resource_size_t *offset)
> +{
> +	LIST_HEAD(resources_pch);
> +
> +	pci_add_resource(&resources_pch, &vmd->resources[VMD_RES_PCH_CFGBAR]);
> +	pci_add_resource_offset(&resources_pch,
> +				&vmd->resources[VMD_RES_PCH_MBAR_1], offset[0]);
> +	pci_add_resource_offset(&resources_pch,
> +				&vmd->resources[VMD_RES_PCH_MBAR_2], offset[1]);
> +
> +	vmd->bus[VMD_BUS_1] = pci_create_root_bus(&vmd->dev->dev,
> +						  vmd->busn_start[VMD_BUS_1],
> +						  &vmd_ops, sd, &resources_pch);
> +
> +	if (!vmd->bus[VMD_BUS_1]) {
> +		pci_free_resource_list(&resources_pch);
> +		pci_stop_root_bus(vmd->bus[VMD_BUS_1]);
> +		pci_remove_root_bus(vmd->bus[VMD_BUS_1]);
> +		return -ENODEV;
> +	}
> +
> +	/*
> +	 * primary bus is not set by pci_create_root_bus(), it is updated here
> +	 */
> +	vmd->bus[VMD_BUS_1]->primary = VMD_PRIMARY_PCH_BUS;
> +
> +	vmd_copy_host_bridge_flags(
> +		pci_find_host_bridge(vmd->dev->bus),
> +		to_pci_host_bridge(vmd->bus[VMD_BUS_1]->bridge));
> +
> +	if (vmd->irq_domain)
> +		dev_set_msi_domain(&vmd->bus[VMD_BUS_1]->dev,
> +				   vmd->irq_domain);
> +	else
> +		dev_set_msi_domain(&vmd->bus[VMD_BUS_1]->dev,
> +				   dev_get_msi_domain(&vmd->dev->dev));
> +
> +	return 0;

This looks a lot like parts of vmd_enable_domain().  Could this be
factored out into a helper function that could be used for both
VMD_BUS_0 and VMD_BUS_1?

Why is vmd_attach_resource() is different between them?  Why is
sysfs_create_link() is different?

Bjorn
diff mbox series

Patch

diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 7cce7354b5c2..842b70a21325 100755
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -31,6 +31,15 @@ 
 #define PCI_REG_VMLOCK		0x70
 #define MB2_SHADOW_EN(vmlock)	(vmlock & 0x2)
 
+#define VMD_PRIMARY_PCH_BUS 0x80
+#define VMD_BUSRANGE0 0xC8
+#define VMD_BUSRANGE1 0xCC
+#define VMD_MEMBAR1_OFFSET 0xD0
+#define VMD_MEMBAR2_OFFSET1 0xD8
+#define VMD_MEMBAR2_OFFSET2 0xDC
+#define VMD_BUS_END(busr) ((busr >> 8) & 0xff)
+#define VMD_BUS_START(busr) (busr & 0x00ff)
+
 #define MB2_SHADOW_OFFSET	0x2000
 #define MB2_SHADOW_SIZE		16
 
@@ -38,11 +47,15 @@  enum vmd_resource {
 	VMD_RES_CFGBAR = 0,
 	VMD_RES_MBAR_1, /*VMD Resource MemBAR 1 */
 	VMD_RES_MBAR_2, /*VMD Resource MemBAR 2 */
+	VMD_RES_PCH_CFGBAR,
+	VMD_RES_PCH_MBAR_1, /*VMD Resource PCH MemBAR 1 */
+	VMD_RES_PCH_MBAR_2, /*VMD Resource PCH MemBAR 2 */
 	VMD_RES_COUNT
 };
 
 enum vmd_rootbus {
 	VMD_BUS_0 = 0,
+	VMD_BUS_1,
 	VMD_BUS_COUNT
 };
 
@@ -86,6 +99,12 @@  enum vmd_features {
 	 * proper power management of the SoC.
 	 */
 	VMD_FEAT_BIOS_PM_QUIRK		= (1 << 5),
+
+	/*
+	 * Starting from Intel Arrow Lake, VMD devices have their VMD rootports
+	 * connected to CPU IOC and PCH rootbuses.
+	 */
+	VMD_FEAT_HAS_PCH_ROOTBUS	= (1 << 6)
 };
 
 #define VMD_BIOS_PM_QUIRK_LTR	0x1003	/* 3145728 ns */
@@ -93,7 +112,8 @@  enum vmd_features {
 #define VMD_FEATS_CLIENT	(VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |	\
 				 VMD_FEAT_HAS_BUS_RESTRICTIONS |	\
 				 VMD_FEAT_OFFSET_FIRST_VECTOR |		\
-				 VMD_FEAT_BIOS_PM_QUIRK)
+				 VMD_FEAT_BIOS_PM_QUIRK |		\
+				 VMD_FEAT_HAS_PCH_ROOTBUS)
 
 static DEFINE_IDA(vmd_instance_ida);
 
@@ -376,6 +396,11 @@  static void vmd_remove_irq_domain(struct vmd_dev *vmd)
 	}
 }
 
+static inline u8 vmd_has_pch_rootbus(struct vmd_dev *vmd)
+{
+	return vmd->busn_start[VMD_BUS_1] != 0;
+}
+
 static void __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
 				  unsigned int devfn, int reg, int len)
 {
@@ -521,6 +546,11 @@  static void vmd_domain_reset(struct vmd_dev *vmd)
 	u8 dev, functions, fn, hdr_type;
 	char __iomem *base;
 
+	if (vmd_has_pch_rootbus(vmd)) {
+		max_buses += resource_size(&vmd->resources[VMD_RES_PCH_CFGBAR]);
+		max_buses += 2;
+	}
+
 	for (bus = 0; bus < max_buses; bus++) {
 		for (dev = 0; dev < 32; dev++) {
 			base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus,
@@ -645,7 +675,7 @@  static int vmd_get_phys_offsets(struct vmd_dev *vmd, bool native_hint,
 	return 0;
 }
 
-static int vmd_get_bus_number_start(struct vmd_dev *vmd)
+static int vmd_get_bus_number_start(struct vmd_dev *vmd, unsigned long features)
 {
 	struct pci_dev *dev = vmd->dev;
 	u16 reg;
@@ -664,6 +694,18 @@  static int vmd_get_bus_number_start(struct vmd_dev *vmd)
 		case 2:
 			vmd->busn_start[VMD_BUS_0] = 224;
 			break;
+		case 3:
+			if (!(features & VMD_FEAT_HAS_PCH_ROOTBUS)) {
+				pci_err(dev, "VMD Bus Restriction detected type %d, but PCH Rootbus is not supported, aborting.\n",
+					BUS_RESTRICT_CFG(reg));
+				return -ENODEV;
+			}
+
+			/* IOC start bus */
+			vmd->busn_start[VMD_BUS_0] = 224;
+			/* PCH start bus */
+			vmd->busn_start[VMD_BUS_1] = 225;
+			break;
 		default:
 			pci_err(dev, "Unknown Bus Offset Setting (%d)\n",
 				BUS_RESTRICT_CFG(reg));
@@ -790,6 +832,30 @@  static void vmd_configure_cfgbar(struct vmd_dev *vmd)
 		       (resource_size(res) >> 20) - 1,
 		.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
 	};
+
+	if (vmd_has_pch_rootbus(vmd)) {
+		u16 ioc_range = 0;
+		u16 pch_range = 0;
+
+		pci_read_config_word(vmd->dev, VMD_BUSRANGE0, &ioc_range);
+		pci_read_config_word(vmd->dev, VMD_BUSRANGE1, &pch_range);
+
+		/*
+		 * Resize CPU IOC CFGBAR range to make space for PCH owned
+		 * devices by adjusting range end with value stored in
+		 * VMD_BUSRANGE0 register.
+		 */
+		vmd->resources[VMD_RES_CFGBAR].start = VMD_BUS_START(ioc_range);
+		vmd->resources[VMD_RES_CFGBAR].end = VMD_BUS_END(ioc_range);
+
+		vmd->resources[VMD_RES_PCH_CFGBAR] = (struct resource){
+			.name = "VMD CFGBAR PCH",
+			.start = VMD_BUS_START(pch_range),
+			.end = VMD_BUS_END(pch_range),
+			.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
+			.parent = &vmd->resources[VMD_RES_CFGBAR],
+		};
+	}
 }
 
 /**
@@ -822,7 +888,8 @@  static void vmd_configure_membar(struct vmd_dev *vmd,
 	if (!upper_bits)
 		flags &= ~IORESOURCE_MEM_64;
 
-	snprintf(name, sizeof(name), "VMD MEMBAR%d", membar_number/2);
+	snprintf(name, sizeof(name), "VMD MEMBAR%d %s", membar_number / 2,
+		 resource_number > VMD_RES_MBAR_2 ? "PCH" : "");
 
 	res_parent = parent;
 	if (!res_parent)
@@ -840,9 +907,43 @@  static void vmd_configure_membar(struct vmd_dev *vmd,
 static void vmd_configure_membar1_membar2(struct vmd_dev *vmd,
 					  resource_size_t mbar2_ofs)
 {
-	vmd_configure_membar(vmd, VMD_RES_MBAR_1, VMD_MEMBAR1, 0, 0, NULL);
-	vmd_configure_membar(vmd, VMD_RES_MBAR_2, VMD_MEMBAR2,
-			     mbar2_ofs, 0, NULL);
+	if (vmd_has_pch_rootbus(vmd)) {
+		u32 pch_mbar1_ofs = 0;
+		u64 pch_mbar2_ofs = 0;
+		u32 reg;
+
+		pci_read_config_dword(vmd->dev, VMD_MEMBAR1_OFFSET,
+				      &pch_mbar1_ofs);
+
+		pci_read_config_dword(vmd->dev, VMD_MEMBAR2_OFFSET1, &reg);
+		pch_mbar2_ofs = reg;
+
+		pci_read_config_dword(vmd->dev, VMD_MEMBAR2_OFFSET2, &reg);
+		pch_mbar2_ofs |= (u64)reg << 32;
+
+		/*
+		 * Resize CPU IOC MEMBAR1 and MEMBAR2 ranges to make space
+		 * for PCH owned devices by adjusting range end with values
+		 * stored in VMD_MEMBAR1_OFFSET and VMD_MEMBAR2_OFFSET registers
+		 */
+		vmd_configure_membar(vmd, VMD_RES_MBAR_1, VMD_MEMBAR1, 0,
+				     pch_mbar1_ofs, NULL);
+		vmd_configure_membar(vmd, VMD_RES_MBAR_2, VMD_MEMBAR2,
+				     mbar2_ofs, pch_mbar2_ofs - mbar2_ofs,
+				     NULL);
+
+		vmd_configure_membar(vmd, VMD_RES_PCH_MBAR_1, VMD_MEMBAR1,
+				     pch_mbar1_ofs, 0,
+				     &vmd->resources[VMD_RES_MBAR_1]);
+		vmd_configure_membar(vmd, VMD_RES_PCH_MBAR_2, VMD_MEMBAR2,
+				     mbar2_ofs + pch_mbar2_ofs, 0,
+				     &vmd->resources[VMD_RES_MBAR_2]);
+	} else {
+		vmd_configure_membar(vmd, VMD_RES_MBAR_1, VMD_MEMBAR1, 0, 0,
+				     NULL);
+		vmd_configure_membar(vmd, VMD_RES_MBAR_2, VMD_MEMBAR2,
+				     mbar2_ofs, 0, NULL);
+	}
 }
 
 static void vmd_bus_enumeration(struct pci_bus *bus, unsigned long features)
@@ -854,7 +955,9 @@  static void vmd_bus_enumeration(struct pci_bus *bus, unsigned long features)
 	vmd_acpi_begin();
 
 	pci_scan_child_bus(bus);
-	vmd_domain_reset(vmd_from_bus(bus));
+
+	if (bus->primary == 0)
+		vmd_domain_reset(vmd_from_bus(bus));
 
 	/*
 	 * When Intel VMD is enabled, the OS does not discover the Root Ports
@@ -893,6 +996,47 @@  static void vmd_bus_enumeration(struct pci_bus *bus, unsigned long features)
 	vmd_acpi_end();
 }
 
+static int vmd_create_pch_bus(struct vmd_dev *vmd, struct pci_sysdata *sd,
+			      resource_size_t *offset)
+{
+	LIST_HEAD(resources_pch);
+
+	pci_add_resource(&resources_pch, &vmd->resources[VMD_RES_PCH_CFGBAR]);
+	pci_add_resource_offset(&resources_pch,
+				&vmd->resources[VMD_RES_PCH_MBAR_1], offset[0]);
+	pci_add_resource_offset(&resources_pch,
+				&vmd->resources[VMD_RES_PCH_MBAR_2], offset[1]);
+
+	vmd->bus[VMD_BUS_1] = pci_create_root_bus(&vmd->dev->dev,
+						  vmd->busn_start[VMD_BUS_1],
+						  &vmd_ops, sd, &resources_pch);
+
+	if (!vmd->bus[VMD_BUS_1]) {
+		pci_free_resource_list(&resources_pch);
+		pci_stop_root_bus(vmd->bus[VMD_BUS_1]);
+		pci_remove_root_bus(vmd->bus[VMD_BUS_1]);
+		return -ENODEV;
+	}
+
+	/*
+	 * primary bus is not set by pci_create_root_bus(), it is updated here
+	 */
+	vmd->bus[VMD_BUS_1]->primary = VMD_PRIMARY_PCH_BUS;
+
+	vmd_copy_host_bridge_flags(
+		pci_find_host_bridge(vmd->dev->bus),
+		to_pci_host_bridge(vmd->bus[VMD_BUS_1]->bridge));
+
+	if (vmd->irq_domain)
+		dev_set_msi_domain(&vmd->bus[VMD_BUS_1]->dev,
+				   vmd->irq_domain);
+	else
+		dev_set_msi_domain(&vmd->bus[VMD_BUS_1]->dev,
+				   dev_get_msi_domain(&vmd->dev->dev));
+
+	return 0;
+}
+
 static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
 {
 	struct pci_sysdata *sd = &vmd->sysdata;
@@ -923,7 +1067,7 @@  static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
 	 * limits the bus range to between 0-127, 128-255, or 224-255
 	 */
 	if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) {
-		ret = vmd_get_bus_number_start(vmd);
+		ret = vmd_get_bus_number_start(vmd, features);
 		if (ret)
 			return ret;
 	}
@@ -1016,6 +1160,16 @@  static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
 
 	vmd_bus_enumeration(vmd->bus[VMD_BUS_0], features);
 
+	if (vmd_has_pch_rootbus(vmd)) {
+		ret = vmd_create_pch_bus(vmd, sd, offset);
+		if (ret) {
+			pci_err(vmd->dev, "Can't create PCH bus: %d\n", ret);
+			return ret;
+		}
+
+		vmd_bus_enumeration(vmd->bus[VMD_BUS_1], features);
+	}
+
 	return 0;
 }
 
@@ -1094,6 +1248,10 @@  static void vmd_remove(struct pci_dev *dev)
 	pci_stop_root_bus(vmd->bus[VMD_BUS_0]);
 	sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
 	pci_remove_root_bus(vmd->bus[VMD_BUS_0]);
+	if (vmd_has_pch_rootbus(vmd)) {
+		pci_stop_root_bus(vmd->bus[VMD_BUS_1]);
+		pci_remove_root_bus(vmd->bus[VMD_BUS_1]);
+	}
 	vmd_cleanup_srcu(vmd);
 	vmd_detach_resources(vmd);
 	vmd_remove_irq_domain(vmd);
@@ -1179,4 +1337,4 @@  module_pci_driver(vmd_drv);
 MODULE_AUTHOR("Intel Corporation");
 MODULE_DESCRIPTION("Volume Management Device driver");
 MODULE_LICENSE("GPL v2");
-MODULE_VERSION("0.6");
+MODULE_VERSION("0.7");