@@ -22,6 +22,9 @@
/* Number of MSI IRQs */
#define MC_MAX_NUM_MSI_IRQS 32
+#define MC_MAX_NUM_INBOUND_WINDOWS 8
+#define MC_ATT_MASK GENMASK(63, 31)
+
/* PCIe Bridge Phy and Controller Phy offsets */
#define MC_PCIE1_BRIDGE_ADDR 0x00008000u
#define MC_PCIE1_CTRL_ADDR 0x0000a000u
@@ -86,10 +89,13 @@
#define ISTATUS_MSI 0x194
#define ATR_WINDOW_DESC_SIZE 32
-#define ATR_PCIE_ATR_SIZE 0x25
#define ATR_SIZE_SHIFT 1
#define ATR_IMPL_ENABLE 1
+#define ATR_PCIE_WIN0_SRCADDR 0x80000000
+#define ATR_PCIE_ATR_SIZE (512 * 1024 * 1024ul)
+#define ATR_PCIE_NUM_WINDOWS 8
+
/* PCIe Master table init defines */
#define ATR0_PCIE_WIN0_SRCADDR_PARAM 0x600u
#define ATR0_PCIE_WIN0_SRC_ADDR 0x604u
@@ -278,6 +284,12 @@ struct mc_msi {
DECLARE_BITMAP(used, MC_MAX_NUM_MSI_IRQS);
};
+struct inbound_windows {
+ u64 axi_addr;
+ u64 pci_addr;
+ u64 size;
+};
+
struct mc_pcie {
void __iomem *axi_base_addr;
struct device *dev;
@@ -286,6 +298,8 @@ struct mc_pcie {
raw_spinlock_t lock;
struct mc_msi msi;
u64 outbound_range_offset;
+ u32 num_inbound_windows;
+ struct inbound_windows inbound_windows[MC_MAX_NUM_INBOUND_WINDOWS];
};
struct cause {
@@ -942,6 +956,43 @@ static int mc_pcie_init_irq_domains(struct mc_pcie *port)
return mc_allocate_msi_domains(port);
}
+static int mc_pcie_setup_inbound_ranges(struct platform_device *pdev, struct mc_pcie *port)
+{
+ void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+ phys_addr_t pcie_addr;
+ phys_addr_t axi_addr;
+ u32 atr_size;
+ u32 val;
+ int i;
+
+ for (i = 0; i < port->num_inbound_windows; i++) {
+ atr_size = ilog2(port->inbound_windows[i].size) - 1;
+ atr_size &= GENMASK(5, 0);
+
+ pcie_addr = port->inbound_windows[i].pci_addr;
+
+ val = lower_32_bits(pcie_addr) & GENMASK(31, 12);
+ val |= (atr_size << ATR_SIZE_SHIFT);
+ val |= ATR_IMPL_ENABLE;
+ writel(val, bridge_base_addr +
+ ATR0_PCIE_WIN0_SRCADDR_PARAM + (i * ATR_WINDOW_DESC_SIZE));
+ writel(upper_32_bits(pcie_addr), bridge_base_addr +
+ ATR0_PCIE_WIN0_SRC_ADDR + (i * ATR_WINDOW_DESC_SIZE));
+
+ axi_addr = port->inbound_windows[i].axi_addr;
+
+ writel(lower_32_bits(axi_addr), bridge_base_addr +
+ ATR0_PCIE_WIN0_TRSL_ADDR_LSB + (i * ATR_WINDOW_DESC_SIZE));
+ writel(upper_32_bits(axi_addr), bridge_base_addr +
+ ATR0_PCIE_WIN0_TRSL_ADDR_UDW + (i * ATR_WINDOW_DESC_SIZE));
+
+ writel(TRSL_ID_AXI4_MASTER_0, bridge_base_addr +
+ ATR0_PCIE_WIN0_TRSL_PARAM + (i * ATR_WINDOW_DESC_SIZE));
+ }
+
+ return 0;
+}
+
static void mc_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
phys_addr_t axi_addr, phys_addr_t pci_addr,
size_t size)
@@ -973,11 +1024,6 @@ static void mc_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
val = upper_32_bits(pci_addr);
writel(val, bridge_base_addr + (index * ATR_WINDOW_DESC_SIZE) +
ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
-
- val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
- val |= (ATR_PCIE_ATR_SIZE << ATR_SIZE_SHIFT);
- writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
- writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
}
static int mc_pcie_setup_windows(struct platform_device *pdev,
@@ -1157,6 +1203,116 @@ static int mc_check_for_parent_range_handling(struct platform_device *pdev, stru
return 0;
}
+static int mc_check_for_parent_dma_range_handling(struct platform_device *pdev,
+ struct mc_pcie *port)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = dev->of_node;
+ struct of_range_parser parser;
+ struct of_range range;
+ int num_parent_ranges = 0;
+ int num_ranges = 0;
+ struct inbound_windows ranges[MC_MAX_NUM_INBOUND_WINDOWS] = { 0 };
+ u64 start_axi = GENMASK(63, 0);
+ u64 end_axi = 0;
+ u64 start_pci = GENMASK(63, 0);
+ s64 size;
+ u64 window_size;
+ int i;
+
+ /* Find all dma-ranges */
+ if (of_pci_dma_range_parser_init(&parser, dn)) {
+ dev_err(dev, "missing dma-ranges property\n");
+ return -EINVAL;
+ }
+
+ for_each_of_range(&parser, &range) {
+ if (num_ranges > MC_MAX_NUM_INBOUND_WINDOWS) {
+ dev_err(dev, "too many inbound ranges; %d available tables\n",
+ MC_MAX_NUM_INBOUND_WINDOWS);
+ return -EINVAL;
+ }
+ ranges[num_ranges].axi_addr = range.cpu_addr;
+ ranges[num_ranges].pci_addr = range.pci_addr;
+ ranges[num_ranges].size = range.size;
+
+ num_ranges++;
+ }
+
+ /*
+ * Check for one level up; will need to adjust address translation
+ * tables for these
+ */
+ dn = of_get_parent(dn);
+ if (dn) {
+ of_pci_dma_range_parser_init(&parser, dn);
+
+ for_each_of_range(&parser, &range) {
+ if (num_parent_ranges > MC_MAX_NUM_INBOUND_WINDOWS) {
+ dev_err(dev, "too many parent inbound ranges; %d available tables\n",
+ MC_MAX_NUM_INBOUND_WINDOWS);
+ return -EINVAL;
+ }
+ ranges[num_parent_ranges].axi_addr = range.pci_addr;
+ num_parent_ranges++;
+ }
+ }
+
+ if (num_parent_ranges) {
+ if (num_ranges != num_parent_ranges) {
+ dev_err(dev, "num parent inbound ranges must be 0 or match num inbound ranges\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Merge ranges */
+ for (i = 0; i < num_ranges; i++) {
+ struct inbound_windows *range = &ranges[i];
+
+ if (range->axi_addr < start_axi) {
+ start_axi = range->axi_addr;
+ start_pci = range->pci_addr;
+ }
+
+ if (range->axi_addr + range->size > end_axi)
+ end_axi = range->axi_addr + range->size;
+ }
+
+ /* Move starts back as far as possible */
+ start_axi &= MC_ATT_MASK;
+ start_pci &= MC_ATT_MASK;
+
+ /* Adjust size to take account of that change */
+ size = end_axi - start_axi;
+
+ /* May need to adjust size up to the next largest power of 2 */
+ if (size < 1ull << ilog2(size))
+ size = 1ull << (ilog2(size) + 1);
+
+ window_size = 1ull << (ilog2(size) - 1);
+
+ /* Divide merged range into windows */
+ i = 0;
+ while (size > 0 && i < MC_MAX_NUM_INBOUND_WINDOWS) {
+ port->inbound_windows[i].axi_addr = start_axi;
+ port->inbound_windows[i].pci_addr = start_pci;
+ port->inbound_windows[i].size = window_size;
+
+ size -= window_size;
+ start_axi += window_size;
+ start_pci += window_size;
+ i++;
+ port->num_inbound_windows = i;
+ }
+
+ if (size < 0) {
+ dev_err(dev, "insufficient windows to map inbound ranges\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int mc_platform_init(struct pci_config_window *cfg)
{
struct device *dev = cfg->parent;
@@ -1174,6 +1330,11 @@ static int mc_platform_init(struct pci_config_window *cfg)
if (ret)
return ret;
+ /* And similarly, check for inbound address translation */
+ ret = mc_check_for_parent_dma_range_handling(pdev, port);
+ if (ret)
+ return ret;
+
/* Configure address translation table 0 for PCIe config space */
mc_pcie_setup_window(bridge_base_addr, 0, cfg->res.start - port->outbound_range_offset,
cfg->res.start - port->outbound_range_offset,
@@ -1187,6 +1348,11 @@ static int mc_platform_init(struct pci_config_window *cfg)
if (ret)
return ret;
+ /* Configure inbound translation tables */
+ ret = mc_pcie_setup_inbound_ranges(pdev, port);
+ if (ret)
+ return ret;
+
/* Address translation is up; safe to enable interrupts */
ret = mc_init_interrupts(pdev, port);
if (ret)