diff mbox

[v9,2/4] PCI: X-Gene: Add the APM X-Gene v1 PCIe MSI/MSIX termination driver

Message ID 8d5049d8c03f8f5883ea626596dafc7d3f1c99f1.1432747280.git.dhdang@apm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Duc Dang May 27, 2015, 6:27 p.m. UTC
APM X-Gene v1 SoC supports its own implementation of MSI, which is not
compliant to GIC V2M specification for MSI Termination.

There is single MSI block in X-Gene v1 SOC which serves all 5 PCIe ports.
This MSI block supports 2048 MSI termination ports coalesced into 16
physical HW IRQ lines and shared across all 5 PCIe ports.

As there are only 16 HW IRQs to serve 2048 MSI vectors, to support
set_affinity correctly for each MSI vectors, the 16 HW IRQs are statically
allocated to 8 X-Gene v1 cores (2 HW IRQs for each cores). To steer MSI
interrupt to target CPU, MSI vector is moved around these HW IRQs lines.
With this approach, the total MSI vectors this driver supports is reduced
to 256.

Signed-off-by: Duc Dang <dhdang@apm.com>
Signed-off-by: Tanmay Inamdar <tinamdar@apm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
---
 drivers/pci/host/Kconfig         |  10 +
 drivers/pci/host/Makefile        |   1 +
 drivers/pci/host/pci-xgene-msi.c | 595 +++++++++++++++++++++++++++++++++++++++
 drivers/pci/host/pci-xgene.c     |  21 ++
 4 files changed, 627 insertions(+)
 create mode 100644 drivers/pci/host/pci-xgene-msi.c

Comments

Marc Zyngier May 28, 2015, 8:05 a.m. UTC | #1
On 27/05/15 19:27, Duc Dang wrote:
> APM X-Gene v1 SoC supports its own implementation of MSI, which is not
> compliant to GIC V2M specification for MSI Termination.
> 
> There is single MSI block in X-Gene v1 SOC which serves all 5 PCIe ports.
> This MSI block supports 2048 MSI termination ports coalesced into 16
> physical HW IRQ lines and shared across all 5 PCIe ports.
> 
> As there are only 16 HW IRQs to serve 2048 MSI vectors, to support
> set_affinity correctly for each MSI vectors, the 16 HW IRQs are statically
> allocated to 8 X-Gene v1 cores (2 HW IRQs for each cores). To steer MSI
> interrupt to target CPU, MSI vector is moved around these HW IRQs lines.
> With this approach, the total MSI vectors this driver supports is reduced
> to 256.
> 
> Signed-off-by: Duc Dang <dhdang@apm.com>
> Signed-off-by: Tanmay Inamdar <tinamdar@apm.com>
> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
>  drivers/pci/host/Kconfig         |  10 +
>  drivers/pci/host/Makefile        |   1 +
>  drivers/pci/host/pci-xgene-msi.c | 595 +++++++++++++++++++++++++++++++++++++++
>  drivers/pci/host/pci-xgene.c     |  21 ++
>  4 files changed, 627 insertions(+)
>  create mode 100644 drivers/pci/host/pci-xgene-msi.c
> 
> diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
> index 1dfb567..a93fda8 100644
> --- a/drivers/pci/host/Kconfig
> +++ b/drivers/pci/host/Kconfig
> @@ -89,11 +89,21 @@ config PCI_XGENE
>         depends on ARCH_XGENE
>         depends on OF
>         select PCIEPORTBUS
> +       select PCI_MSI_IRQ_DOMAIN if PCI_MSI
>         help
>           Say Y here if you want internal PCI support on APM X-Gene SoC.
>           There are 5 internal PCIe ports available. Each port is GEN3 capable
>           and have varied lanes from x1 to x8.
> 
> +config PCI_XGENE_MSI
> +       bool "X-Gene v1 PCIe MSI feature"
> +       depends on PCI_XGENE && PCI_MSI
> +       default y
> +       help
> +         Say Y here if you want PCIE MSI support for APM X-Gene v1 SoC.
> +         This MSI driver will provide MSI support for 5 PCIe ports of
> +         APM X-Gene v1 SoC
> +
>  config PCI_LAYERSCAPE
>         bool "Freescale Layerscape PCIe controller"
>         depends on OF && ARM
> diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
> index f733b4e..1957431 100644
> --- a/drivers/pci/host/Makefile
> +++ b/drivers/pci/host/Makefile
> @@ -11,6 +11,7 @@ obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
>  obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
>  obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
>  obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
> +obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
>  obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
>  obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
>  obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o
> diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c
> new file mode 100644
> index 0000000..461cb11
> --- /dev/null
> +++ b/drivers/pci/host/pci-xgene-msi.c
> @@ -0,0 +1,595 @@
> +/*
> + * APM X-Gene MSI Driver
> + *
> + * Copyright (c) 2014, Applied Micro Circuits Corporation
> + * Author: Tanmay Inamdar <tinamdar@apm.com>
> + *        Duc Dang <dhdang@apm.com>
> + *
> + * This program is free software; you can redistribute  it and/or modify it
> + * under  the terms of  the GNU General  Public License as published by the
> + * Free Software Foundation;  either version 2 of the  License, or (at your
> + * option) any later version.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> + * GNU General Public License for more details.
> + */
> +#include <linux/cpu.h>
> +#include <linux/interrupt.h>
> +#include <linux/module.h>
> +#include <linux/msi.h>
> +#include <linux/of_irq.h>
> +#include <linux/irqchip/chained_irq.h>
> +#include <linux/pci.h>
> +#include <linux/platform_device.h>
> +#include <linux/of_pci.h>
> +
> +#define MSI_IR0                        0x000000
> +#define MSI_INT0               0x800000
> +#define IDX_PER_GROUP          8
> +#define IRQS_PER_IDX           16
> +#define NR_HW_IRQS             16
> +#define NR_MSI_VEC             (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS)
> +
> +struct xgene_msi_group {
> +       struct xgene_msi                *msi;
> +       int                             gic_irq;
> +       u32                             msi_grp;
> +};
> +
> +struct xgene_msi {
> +       struct device_node              *node;
> +       struct msi_controller           mchip;
> +       struct irq_domain               *domain;
> +       u64                             msi_addr;
> +       void __iomem                    *msi_regs;
> +       unsigned long                   *bitmap;
> +       struct mutex                    bitmap_lock;
> +       struct xgene_msi_group          *msi_groups;
> +       int                             num_cpus;
> +};
> +
> +/* Global data */
> +static struct xgene_msi xgene_msi_ctrl;
> +
> +static struct irq_chip xgene_msi_top_irq_chip = {
> +       .name           = "X-Gene1 MSI",
> +       .irq_enable     = pci_msi_unmask_irq,
> +       .irq_disable    = pci_msi_mask_irq,
> +       .irq_mask       = pci_msi_mask_irq,
> +       .irq_unmask     = pci_msi_unmask_irq,
> +};
> +
> +static struct  msi_domain_info xgene_msi_domain_info = {
> +       .flags  = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
> +                 MSI_FLAG_PCI_MSIX),
> +       .chip   = &xgene_msi_top_irq_chip,
> +};
> +
> +/*
> + * X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where
> + * n is group number (0..F), x is index of registers in each group (0..7)
> + * The registers layout is like following:
> + * MSI0IR0                     base_addr
> + * MSI0IR1                     base_addr +  0x10000
> + * ...                         ...
> + * MSI0IR6                     base_addr +  0x60000
> + * MSI0IR7                     base_addr +  0x70000
> + * MSI1IR0                     base_addr +  0x80000
> + * MSI1IR1                     base_addr +  0x90000
> + * ...                         ...
> + * MSI1IR7                     base_addr +  0xF0000
> + * MSI2IR0                     base_addr + 0x100000
> + * ...                         ...
> + * MSIFIR0                     base_addr + 0x780000
> + * MSIFIR1                     base_addr + 0x790000
> + * ...                         ...
> + * MSIFIR7                     base_addr + 0x7F0000
> + * MSIINT0                     base_addr + 0x800000
> + * MSIINT1                     base_addr + 0x810000
> + * ...                         ...
> + * MSIINTF                     base_addr + 0x8F0000
> + *
> + * Each index register support 16 MSI vectors (0..15) to generate interrupt.
> + * There are total 16 GIC IRQs assigned for these 16 groups of MSI termination
> + * registers.
> + *
> + * Each MSI termination group has 1 MSIINTn register (n is 0..15) to indicate
> + * the MSI pending status caused by 1 of its 8 index registers.
> + */
> +
> +/* MSInIRx read helper */
> +static inline u32 xgene_msi_ir_read(struct xgene_msi *msi,
> +                                   u32 msi_grp, u32 msir_idx)
> +{
> +       return readl_relaxed(msi->msi_regs + MSI_IR0 +
> +                             (msi_grp << 19) + (msir_idx << 16));
> +}
> +
> +/* MSIINTn read helper */
> +static inline u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp)
> +{
> +       return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16));
> +}
> +
> +/* With 2048 MSI vectors supported, the MSI message can be construct using
> + * following scheme:
> + * - Divide into 8 256-vector groups
> + *             Group 0: 0-255
> + *             Group 1: 256-511
> + *             Group 2: 512-767
> + *             ...
> + *             Group 7: 1792-2047
> + * - Each 256-vector group is divided into 16 16-vector groups
> + *     As an example: 16 16-vector groups for 256-vector group 0-255 is
> + *             Group 0: 0-15
> + *             Group 1: 16-32
> + *             ...
> + *             Group 15: 240-255
> + * - The termination address of MSI vector in 256-vector group n and 16-vector
> + * group x is the address of MSIxIRn
> + * - The data for MSI vector in 16-vector group x is x
> + */
> +static inline u32 hwirq_to_reg_set(unsigned long hwirq)
> +{
> +       return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX));
> +}
> +
> +static inline u32 hwirq_to_group(unsigned long hwirq)
> +{
> +       return (hwirq % NR_HW_IRQS);
> +}
> +
> +static inline u32 hwirq_to_msi_data(unsigned long hwirq)
> +{
> +       return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX);
> +}

These "inline" are superfluous (including the ones for the HW
accessors). The compiler is bright enough to do it itself.

> +
> +static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
> +{
> +       struct xgene_msi *msi = irq_data_get_irq_chip_data(data);
> +       u32 reg_set = hwirq_to_reg_set(data->hwirq);
> +       u32 group = hwirq_to_group(data->hwirq);
> +       u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16);
> +
> +       msg->address_hi = upper_32_bits(target_addr);
> +       msg->address_lo = lower_32_bits(target_addr);
> +       msg->data = hwirq_to_msi_data(data->hwirq);
> +}
> +
> +/*
> + * X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors.
> + * To maintain the expected behaviour of .set_affinity for each MSI
> + * interrupt, the 16 MSI GIC IRQs are statically allocated to 8 X-Gene
> + * v1 cores (2 GIC IRQs for each core). The MSI vector is moved fom 1
> + * MSI GIC IRQ to another MSI GIC IRQ to steer its MSI interrupt to
> + * correct X-Gene v1 core. As a consequence, the total MSI vectors that
> + * X-Gene v1 supports will be reduced to 256 (2048/8) vectors.
> + */
> +static inline int hwirq_to_cpu(unsigned long hwirq)
> +{
> +       return ((hwirq % NR_HW_IRQS) % xgene_msi_ctrl.num_cpus);
> +}
> +
> +static inline unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq)
> +{
> +       return (hwirq - (hwirq % xgene_msi_ctrl.num_cpus));
> +}

Hmmm. This is weird. The canonical hwirq is defined as the IRQ seen from
CPU0. So it is possible to write hwirq_to_canonical_hwirq as:

static irq_hw_number_t hwirq_to_canonical_hwirq(irq_hw_number_t hwirq)
{
	return hwirq - hwirq_to_cpu(hwirq);
}

But you've defined hwirq_to_cpu as "(hwirq % NR_HW_IRQS) %
xgene_msi_ctrl.num_cpus", which doesn't match your definition of a
canonical hwirq. Can you explain the discrepancy?

> +
> +static int xgene_msi_set_affinity(struct irq_data *irq_data,
> +                                 const struct cpumask *mask, bool force)
> +{
> +       int target_cpu = cpumask_first(mask);
> +       int curr_cpu;
> +
> +       curr_cpu = hwirq_to_cpu(irq_data->hwirq);
> +       if (curr_cpu == target_cpu)
> +               return IRQ_SET_MASK_OK_DONE;
> +
> +       /* Update MSI number to target the new CPU */
> +       irq_data->hwirq = irq_data->hwirq + (target_cpu - curr_cpu);

irq_data->hwirq = hwirq_to_canonical_hwirq(irq_data->hwirq) + target_cpu;

> +
> +       return IRQ_SET_MASK_OK;
> +}

Thanks,

	M.
Duc Dang May 28, 2015, 5:16 p.m. UTC | #2
On Thu, May 28, 2015 at 1:05 AM, Marc Zyngier <marc.zyngier@arm.com> wrote:
> On 27/05/15 19:27, Duc Dang wrote:
>> APM X-Gene v1 SoC supports its own implementation of MSI, which is not
>> compliant to GIC V2M specification for MSI Termination.
>>
>> There is single MSI block in X-Gene v1 SOC which serves all 5 PCIe ports.
>> This MSI block supports 2048 MSI termination ports coalesced into 16
>> physical HW IRQ lines and shared across all 5 PCIe ports.
>>
>> As there are only 16 HW IRQs to serve 2048 MSI vectors, to support
>> set_affinity correctly for each MSI vectors, the 16 HW IRQs are statically
>> allocated to 8 X-Gene v1 cores (2 HW IRQs for each cores). To steer MSI
>> interrupt to target CPU, MSI vector is moved around these HW IRQs lines.
>> With this approach, the total MSI vectors this driver supports is reduced
>> to 256.
>>
>> Signed-off-by: Duc Dang <dhdang@apm.com>
>> Signed-off-by: Tanmay Inamdar <tinamdar@apm.com>
>> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
>> ---
>>  drivers/pci/host/Kconfig         |  10 +
>>  drivers/pci/host/Makefile        |   1 +
>>  drivers/pci/host/pci-xgene-msi.c | 595 +++++++++++++++++++++++++++++++++++++++
>>  drivers/pci/host/pci-xgene.c     |  21 ++
>>  4 files changed, 627 insertions(+)
>>  create mode 100644 drivers/pci/host/pci-xgene-msi.c
>>
>> diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
>> index 1dfb567..a93fda8 100644
>> --- a/drivers/pci/host/Kconfig
>> +++ b/drivers/pci/host/Kconfig
>> @@ -89,11 +89,21 @@ config PCI_XGENE
>>         depends on ARCH_XGENE
>>         depends on OF
>>         select PCIEPORTBUS
>> +       select PCI_MSI_IRQ_DOMAIN if PCI_MSI
>>         help
>>           Say Y here if you want internal PCI support on APM X-Gene SoC.
>>           There are 5 internal PCIe ports available. Each port is GEN3 capable
>>           and have varied lanes from x1 to x8.
>>
>> +config PCI_XGENE_MSI
>> +       bool "X-Gene v1 PCIe MSI feature"
>> +       depends on PCI_XGENE && PCI_MSI
>> +       default y
>> +       help
>> +         Say Y here if you want PCIE MSI support for APM X-Gene v1 SoC.
>> +         This MSI driver will provide MSI support for 5 PCIe ports of
>> +         APM X-Gene v1 SoC
>> +
>>  config PCI_LAYERSCAPE
>>         bool "Freescale Layerscape PCIe controller"
>>         depends on OF && ARM
>> diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
>> index f733b4e..1957431 100644
>> --- a/drivers/pci/host/Makefile
>> +++ b/drivers/pci/host/Makefile
>> @@ -11,6 +11,7 @@ obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
>>  obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
>>  obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
>>  obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
>> +obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
>>  obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
>>  obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
>>  obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o
>> diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c
>> new file mode 100644
>> index 0000000..461cb11
>> --- /dev/null
>> +++ b/drivers/pci/host/pci-xgene-msi.c
>> @@ -0,0 +1,595 @@
>> +/*
>> + * APM X-Gene MSI Driver
>> + *
>> + * Copyright (c) 2014, Applied Micro Circuits Corporation
>> + * Author: Tanmay Inamdar <tinamdar@apm.com>
>> + *        Duc Dang <dhdang@apm.com>
>> + *
>> + * This program is free software; you can redistribute  it and/or modify it
>> + * under  the terms of  the GNU General  Public License as published by the
>> + * Free Software Foundation;  either version 2 of the  License, or (at your
>> + * option) any later version.
>> + *
>> + * This program is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
>> + * GNU General Public License for more details.
>> + */
>> +#include <linux/cpu.h>
>> +#include <linux/interrupt.h>
>> +#include <linux/module.h>
>> +#include <linux/msi.h>
>> +#include <linux/of_irq.h>
>> +#include <linux/irqchip/chained_irq.h>
>> +#include <linux/pci.h>
>> +#include <linux/platform_device.h>
>> +#include <linux/of_pci.h>
>> +
>> +#define MSI_IR0                        0x000000
>> +#define MSI_INT0               0x800000
>> +#define IDX_PER_GROUP          8
>> +#define IRQS_PER_IDX           16
>> +#define NR_HW_IRQS             16
>> +#define NR_MSI_VEC             (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS)
>> +
>> +struct xgene_msi_group {
>> +       struct xgene_msi                *msi;
>> +       int                             gic_irq;
>> +       u32                             msi_grp;
>> +};
>> +
>> +struct xgene_msi {
>> +       struct device_node              *node;
>> +       struct msi_controller           mchip;
>> +       struct irq_domain               *domain;
>> +       u64                             msi_addr;
>> +       void __iomem                    *msi_regs;
>> +       unsigned long                   *bitmap;
>> +       struct mutex                    bitmap_lock;
>> +       struct xgene_msi_group          *msi_groups;
>> +       int                             num_cpus;
>> +};
>> +
>> +/* Global data */
>> +static struct xgene_msi xgene_msi_ctrl;
>> +
>> +static struct irq_chip xgene_msi_top_irq_chip = {
>> +       .name           = "X-Gene1 MSI",
>> +       .irq_enable     = pci_msi_unmask_irq,
>> +       .irq_disable    = pci_msi_mask_irq,
>> +       .irq_mask       = pci_msi_mask_irq,
>> +       .irq_unmask     = pci_msi_unmask_irq,
>> +};
>> +
>> +static struct  msi_domain_info xgene_msi_domain_info = {
>> +       .flags  = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
>> +                 MSI_FLAG_PCI_MSIX),
>> +       .chip   = &xgene_msi_top_irq_chip,
>> +};
>> +
>> +/*
>> + * X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where
>> + * n is group number (0..F), x is index of registers in each group (0..7)
>> + * The registers layout is like following:
>> + * MSI0IR0                     base_addr
>> + * MSI0IR1                     base_addr +  0x10000
>> + * ...                         ...
>> + * MSI0IR6                     base_addr +  0x60000
>> + * MSI0IR7                     base_addr +  0x70000
>> + * MSI1IR0                     base_addr +  0x80000
>> + * MSI1IR1                     base_addr +  0x90000
>> + * ...                         ...
>> + * MSI1IR7                     base_addr +  0xF0000
>> + * MSI2IR0                     base_addr + 0x100000
>> + * ...                         ...
>> + * MSIFIR0                     base_addr + 0x780000
>> + * MSIFIR1                     base_addr + 0x790000
>> + * ...                         ...
>> + * MSIFIR7                     base_addr + 0x7F0000
>> + * MSIINT0                     base_addr + 0x800000
>> + * MSIINT1                     base_addr + 0x810000
>> + * ...                         ...
>> + * MSIINTF                     base_addr + 0x8F0000
>> + *
>> + * Each index register support 16 MSI vectors (0..15) to generate interrupt.
>> + * There are total 16 GIC IRQs assigned for these 16 groups of MSI termination
>> + * registers.
>> + *
>> + * Each MSI termination group has 1 MSIINTn register (n is 0..15) to indicate
>> + * the MSI pending status caused by 1 of its 8 index registers.
>> + */
>> +
>> +/* MSInIRx read helper */
>> +static inline u32 xgene_msi_ir_read(struct xgene_msi *msi,
>> +                                   u32 msi_grp, u32 msir_idx)
>> +{
>> +       return readl_relaxed(msi->msi_regs + MSI_IR0 +
>> +                             (msi_grp << 19) + (msir_idx << 16));
>> +}
>> +
>> +/* MSIINTn read helper */
>> +static inline u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp)
>> +{
>> +       return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16));
>> +}
>> +
>> +/* With 2048 MSI vectors supported, the MSI message can be construct using
>> + * following scheme:
>> + * - Divide into 8 256-vector groups
>> + *             Group 0: 0-255
>> + *             Group 1: 256-511
>> + *             Group 2: 512-767
>> + *             ...
>> + *             Group 7: 1792-2047
>> + * - Each 256-vector group is divided into 16 16-vector groups
>> + *     As an example: 16 16-vector groups for 256-vector group 0-255 is
>> + *             Group 0: 0-15
>> + *             Group 1: 16-32
>> + *             ...
>> + *             Group 15: 240-255
>> + * - The termination address of MSI vector in 256-vector group n and 16-vector
>> + * group x is the address of MSIxIRn
>> + * - The data for MSI vector in 16-vector group x is x
>> + */
>> +static inline u32 hwirq_to_reg_set(unsigned long hwirq)
>> +{
>> +       return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX));
>> +}
>> +
>> +static inline u32 hwirq_to_group(unsigned long hwirq)
>> +{
>> +       return (hwirq % NR_HW_IRQS);
>> +}
>> +
>> +static inline u32 hwirq_to_msi_data(unsigned long hwirq)
>> +{
>> +       return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX);
>> +}
>
> These "inline" are superfluous (including the ones for the HW
> accessors). The compiler is bright enough to do it itself.

I will remove these "inline"
>
>> +
>> +static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
>> +{
>> +       struct xgene_msi *msi = irq_data_get_irq_chip_data(data);
>> +       u32 reg_set = hwirq_to_reg_set(data->hwirq);
>> +       u32 group = hwirq_to_group(data->hwirq);
>> +       u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16);
>> +
>> +       msg->address_hi = upper_32_bits(target_addr);
>> +       msg->address_lo = lower_32_bits(target_addr);
>> +       msg->data = hwirq_to_msi_data(data->hwirq);
>> +}
>> +
>> +/*
>> + * X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors.
>> + * To maintain the expected behaviour of .set_affinity for each MSI
>> + * interrupt, the 16 MSI GIC IRQs are statically allocated to 8 X-Gene
>> + * v1 cores (2 GIC IRQs for each core). The MSI vector is moved fom 1
>> + * MSI GIC IRQ to another MSI GIC IRQ to steer its MSI interrupt to
>> + * correct X-Gene v1 core. As a consequence, the total MSI vectors that
>> + * X-Gene v1 supports will be reduced to 256 (2048/8) vectors.
>> + */
>> +static inline int hwirq_to_cpu(unsigned long hwirq)
>> +{
>> +       return ((hwirq % NR_HW_IRQS) % xgene_msi_ctrl.num_cpus);
>> +}
>> +
>> +static inline unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq)
>> +{
>> +       return (hwirq - (hwirq % xgene_msi_ctrl.num_cpus));
>> +}
>
> Hmmm. This is weird. The canonical hwirq is defined as the IRQ seen from
> CPU0. So it is possible to write hwirq_to_canonical_hwirq as:
>
> static irq_hw_number_t hwirq_to_canonical_hwirq(irq_hw_number_t hwirq)
> {
>         return hwirq - hwirq_to_cpu(hwirq);
> }
>
> But you've defined hwirq_to_cpu as "(hwirq % NR_HW_IRQS) %
> xgene_msi_ctrl.num_cpus", which doesn't match your definition of a
> canonical hwirq. Can you explain the discrepancy?
>
In "(hwirq % NR_HW_IRQS) % xgene_msi_ctrl.num_cpus", I looked for the
group IRQ that the hwirq belongs to first, then figure out the CPU
that this group IRQ assigned to. With hwirq allocation scheme that is
using in this driver, defining hwirq_to_cpu as "hwirq %
xgene_msi_ctrl.num_cpus" also provides the same result. So I will
change it and use hwirq_to_cpu in hwirq_to_canonical_hwirq
implementation.

>> +
>> +static int xgene_msi_set_affinity(struct irq_data *irq_data,
>> +                                 const struct cpumask *mask, bool force)
>> +{
>> +       int target_cpu = cpumask_first(mask);
>> +       int curr_cpu;
>> +
>> +       curr_cpu = hwirq_to_cpu(irq_data->hwirq);
>> +       if (curr_cpu == target_cpu)
>> +               return IRQ_SET_MASK_OK_DONE;
>> +
>> +       /* Update MSI number to target the new CPU */
>> +       irq_data->hwirq = irq_data->hwirq + (target_cpu - curr_cpu);
>
> irq_data->hwirq = hwirq_to_canonical_hwirq(irq_data->hwirq) + target_cpu;
>
>> +
>> +       return IRQ_SET_MASK_OK;
>> +}
>
> Thanks,
>
>         M.
> --
> Jazz is not dead. It just smells funny...
Regards,
Duc Dang.
Duc Dang May 29, 2015, 6:24 p.m. UTC | #3
This patch set adds MSI/MSIX termination driver support for APM X-Gene v1 SoC.
APM X-Gene v1 SoC supports its own implementation of MSI, which is not compliant
to GIC V2M specification for MSI Termination.

There is single MSI block in X-Gene v1 SOC which serves all 5 PCIe ports.
This MSI block supports 2048 MSI termination ports coalesced into 16 physical
HW IRQ lines and shared across all 5 PCIe ports. Starting from version 5 of
this patch series, the total MSI vectors this driver supports is reduced to 256
to maintain the correct set_affinity behavior for each MSI.

v10 changes:
	1. Remove inline in helper functions
	2. Simplify hwirq_to_cpu implementation

v9 changes:
        1. Add more helpers for manipulating MSI messages
        2. Improve code readability
        3. Make PCI_XGENE_MSI selectable

v8 changes:
        1. Add helper to read MSI registers
        2. Resample interrupt status of the same group after handling interrupts
           in that group
        3. Error handling in case fail to allocate CPU mask for affinity setting

v7 changes:
        1. Add more error handling cases
        2. Clear spurious interrupts that may happen during driver probe
        3. Not using free_irq for chained irqs
        4. Improve GIC IRQ number look up in chained handler

v6 changes:
        1. Correctly allocate MSI with bitmap_find_next_zero_area
        2. Add notifier for CPU hotplug case
        3. Driver clean up
        4. Add more detailed information into device tree binding document
           and move the device tree binding patch before the driver/dts patch

v5 changes:
        1. Implement set_affinity for each MSI by statically allocating 2
	   MSI GIC IRQs for each X-Gene CPU core and moving MSI vectors around
	   these GIC IRQs to steer them to target CPU core. As a consequence,
	   the total MSI vectors that X-Gene v1 supports is reduced to 256.

v4 changes:
        1. Remove affinity setting for each MSI
        2. Add description about register layout, MSI termination address
	   and data
        3. Correct total number of MSI vectors to 2048
        4. Clean up error messages
        5. Remove unused module code

v3 changes:
        1. Implement MSI support using PCI MSI IRQ domain
        2. Only use msi_controller to store IRQ domain

v2 changes:
        1. Use msi_controller structure
        2. Remove arch hooks arch_teardown_msi_irqs and arch_setup_msi_irqs

 .../devicetree/bindings/pci/xgene-pci-msi.txt      |  68 +++
 MAINTAINERS                                        |   8 +
 arch/arm64/boot/dts/apm/apm-storm.dtsi             |  27 +
 drivers/pci/host/Kconfig                           |  10 +
 drivers/pci/host/Makefile                          |   1 +
 drivers/pci/host/pci-xgene-msi.c                   | 595 +++++++++++++++++++++
 drivers/pci/host/pci-xgene.c                       |  21 +
 7 files changed, 730 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/pci/xgene-pci-msi.txt
 create mode 100644 drivers/pci/host/pci-xgene-msi.c
Bjorn Helgaas June 5, 2015, 9:05 p.m. UTC | #4
On Fri, May 29, 2015 at 11:24:28AM -0700, Duc Dang wrote:
> This patch set adds MSI/MSIX termination driver support for APM X-Gene v1 SoC.
> APM X-Gene v1 SoC supports its own implementation of MSI, which is not compliant
> to GIC V2M specification for MSI Termination.
> 
> There is single MSI block in X-Gene v1 SOC which serves all 5 PCIe ports.
> This MSI block supports 2048 MSI termination ports coalesced into 16 physical
> HW IRQ lines and shared across all 5 PCIe ports. Starting from version 5 of
> this patch series, the total MSI vectors this driver supports is reduced to 256
> to maintain the correct set_affinity behavior for each MSI.
> 
> v10 changes:
> 	1. Remove inline in helper functions
> 	2. Simplify hwirq_to_cpu implementation
> 
> v9 changes:
>         1. Add more helpers for manipulating MSI messages
>         2. Improve code readability
>         3. Make PCI_XGENE_MSI selectable
> 
> v8 changes:
>         1. Add helper to read MSI registers
>         2. Resample interrupt status of the same group after handling interrupts
>            in that group
>         3. Error handling in case fail to allocate CPU mask for affinity setting
> 
> v7 changes:
>         1. Add more error handling cases
>         2. Clear spurious interrupts that may happen during driver probe
>         3. Not using free_irq for chained irqs
>         4. Improve GIC IRQ number look up in chained handler
> 
> v6 changes:
>         1. Correctly allocate MSI with bitmap_find_next_zero_area
>         2. Add notifier for CPU hotplug case
>         3. Driver clean up
>         4. Add more detailed information into device tree binding document
>            and move the device tree binding patch before the driver/dts patch
> 
> v5 changes:
>         1. Implement set_affinity for each MSI by statically allocating 2
> 	   MSI GIC IRQs for each X-Gene CPU core and moving MSI vectors around
> 	   these GIC IRQs to steer them to target CPU core. As a consequence,
> 	   the total MSI vectors that X-Gene v1 supports is reduced to 256.
> 
> v4 changes:
>         1. Remove affinity setting for each MSI
>         2. Add description about register layout, MSI termination address
> 	   and data
>         3. Correct total number of MSI vectors to 2048
>         4. Clean up error messages
>         5. Remove unused module code
> 
> v3 changes:
>         1. Implement MSI support using PCI MSI IRQ domain
>         2. Only use msi_controller to store IRQ domain
> 
> v2 changes:
>         1. Use msi_controller structure
>         2. Remove arch hooks arch_teardown_msi_irqs and arch_setup_msi_irqs
> 
>  .../devicetree/bindings/pci/xgene-pci-msi.txt      |  68 +++
>  MAINTAINERS                                        |   8 +
>  arch/arm64/boot/dts/apm/apm-storm.dtsi             |  27 +
>  drivers/pci/host/Kconfig                           |  10 +
>  drivers/pci/host/Makefile                          |   1 +
>  drivers/pci/host/pci-xgene-msi.c                   | 595 +++++++++++++++++++++
>  drivers/pci/host/pci-xgene.c                       |  21 +
>  7 files changed, 730 insertions(+)
>  create mode 100644 Documentation/devicetree/bindings/pci/xgene-pci-msi.txt
>  create mode 100644 drivers/pci/host/pci-xgene-msi.c

I applied these to pci/host-xgene for v4.2, thanks!

I squashed the doc, driver, and maintainer updates into a single patch with
the following changelog:

PCI: xgene: Add APM X-Gene v1 PCIe MSI/MSIX termination driver

APM X-Gene v1 SoC supports its own implementation of MSI, which is not 
compliant to GIC V2M specification for MSI Termination.

There is a single MSI block in X-Gene v1 SOC which serves all 5 PCIe ports.
This MSI block supports 2048 MSI termination ports coalesced into 16
physical HW IRQ lines and shared across all 5 PCIe ports.

As there are only 16 HW IRQs to serve 2048 MSI vectors, to support
set_affinity correctly for each MSI vectors, the 16 HW IRQs are statically
allocated to 8 X-Gene v1 cores (2 HW IRQs for each cores).  To steer MSI 
interrupt to target CPU, MSI vector is moved around these HW IRQs lines.
With this approach, the total MSI vectors this driver supports is reduced
to 256.

[bhelgaas: squash doc, driver, maintainer update]
Signed-off-by: Duc Dang <dhdang@apm.com>
Signed-off-by: Tanmay Inamdar <tinamdar@apm.com>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Duc Dang June 5, 2015, 9:11 p.m. UTC | #5
On Fri, Jun 5, 2015 at 2:05 PM, Bjorn Helgaas <bhelgaas@google.com> wrote:
> On Fri, May 29, 2015 at 11:24:28AM -0700, Duc Dang wrote:
>> This patch set adds MSI/MSIX termination driver support for APM X-Gene v1 SoC.
>> APM X-Gene v1 SoC supports its own implementation of MSI, which is not compliant
>> to GIC V2M specification for MSI Termination.
>>
>> There is single MSI block in X-Gene v1 SOC which serves all 5 PCIe ports.
>> This MSI block supports 2048 MSI termination ports coalesced into 16 physical
>> HW IRQ lines and shared across all 5 PCIe ports. Starting from version 5 of
>> this patch series, the total MSI vectors this driver supports is reduced to 256
>> to maintain the correct set_affinity behavior for each MSI.
>>
>> v10 changes:
>>       1. Remove inline in helper functions
>>       2. Simplify hwirq_to_cpu implementation
>>
>> v9 changes:
>>         1. Add more helpers for manipulating MSI messages
>>         2. Improve code readability
>>         3. Make PCI_XGENE_MSI selectable
>>
>> v8 changes:
>>         1. Add helper to read MSI registers
>>         2. Resample interrupt status of the same group after handling interrupts
>>            in that group
>>         3. Error handling in case fail to allocate CPU mask for affinity setting
>>
>> v7 changes:
>>         1. Add more error handling cases
>>         2. Clear spurious interrupts that may happen during driver probe
>>         3. Not using free_irq for chained irqs
>>         4. Improve GIC IRQ number look up in chained handler
>>
>> v6 changes:
>>         1. Correctly allocate MSI with bitmap_find_next_zero_area
>>         2. Add notifier for CPU hotplug case
>>         3. Driver clean up
>>         4. Add more detailed information into device tree binding document
>>            and move the device tree binding patch before the driver/dts patch
>>
>> v5 changes:
>>         1. Implement set_affinity for each MSI by statically allocating 2
>>          MSI GIC IRQs for each X-Gene CPU core and moving MSI vectors around
>>          these GIC IRQs to steer them to target CPU core. As a consequence,
>>          the total MSI vectors that X-Gene v1 supports is reduced to 256.
>>
>> v4 changes:
>>         1. Remove affinity setting for each MSI
>>         2. Add description about register layout, MSI termination address
>>          and data
>>         3. Correct total number of MSI vectors to 2048
>>         4. Clean up error messages
>>         5. Remove unused module code
>>
>> v3 changes:
>>         1. Implement MSI support using PCI MSI IRQ domain
>>         2. Only use msi_controller to store IRQ domain
>>
>> v2 changes:
>>         1. Use msi_controller structure
>>         2. Remove arch hooks arch_teardown_msi_irqs and arch_setup_msi_irqs
>>
>>  .../devicetree/bindings/pci/xgene-pci-msi.txt      |  68 +++
>>  MAINTAINERS                                        |   8 +
>>  arch/arm64/boot/dts/apm/apm-storm.dtsi             |  27 +
>>  drivers/pci/host/Kconfig                           |  10 +
>>  drivers/pci/host/Makefile                          |   1 +
>>  drivers/pci/host/pci-xgene-msi.c                   | 595 +++++++++++++++++++++
>>  drivers/pci/host/pci-xgene.c                       |  21 +
>>  7 files changed, 730 insertions(+)
>>  create mode 100644 Documentation/devicetree/bindings/pci/xgene-pci-msi.txt
>>  create mode 100644 drivers/pci/host/pci-xgene-msi.c
>
> I applied these to pci/host-xgene for v4.2, thanks!
>
> I squashed the doc, driver, and maintainer updates into a single patch with
> the following changelog:
>
> PCI: xgene: Add APM X-Gene v1 PCIe MSI/MSIX termination driver
>
> APM X-Gene v1 SoC supports its own implementation of MSI, which is not
> compliant to GIC V2M specification for MSI Termination.
>
> There is a single MSI block in X-Gene v1 SOC which serves all 5 PCIe ports.
> This MSI block supports 2048 MSI termination ports coalesced into 16
> physical HW IRQ lines and shared across all 5 PCIe ports.
>
> As there are only 16 HW IRQs to serve 2048 MSI vectors, to support
> set_affinity correctly for each MSI vectors, the 16 HW IRQs are statically
> allocated to 8 X-Gene v1 cores (2 HW IRQs for each cores).  To steer MSI
> interrupt to target CPU, MSI vector is moved around these HW IRQs lines.
> With this approach, the total MSI vectors this driver supports is reduced
> to 256.
>
> [bhelgaas: squash doc, driver, maintainer update]
> Signed-off-by: Duc Dang <dhdang@apm.com>
> Signed-off-by: Tanmay Inamdar <tinamdar@apm.com>
> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
>
Thanks, Bjorn and Marc.

Regards,
Duc Dang.
diff mbox

Patch

diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 1dfb567..a93fda8 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -89,11 +89,21 @@  config PCI_XGENE
 	depends on ARCH_XGENE
 	depends on OF
 	select PCIEPORTBUS
+	select PCI_MSI_IRQ_DOMAIN if PCI_MSI
 	help
 	  Say Y here if you want internal PCI support on APM X-Gene SoC.
 	  There are 5 internal PCIe ports available. Each port is GEN3 capable
 	  and have varied lanes from x1 to x8.
 
+config PCI_XGENE_MSI
+	bool "X-Gene v1 PCIe MSI feature"
+	depends on PCI_XGENE && PCI_MSI
+	default y
+	help
+	  Say Y here if you want PCIE MSI support for APM X-Gene v1 SoC.
+	  This MSI driver will provide MSI support for 5 PCIe ports of
+	  APM X-Gene v1 SoC
+
 config PCI_LAYERSCAPE
 	bool "Freescale Layerscape PCIe controller"
 	depends on OF && ARM
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index f733b4e..1957431 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -11,6 +11,7 @@  obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
 obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
 obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
 obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
+obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
 obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
 obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
 obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o
diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c
new file mode 100644
index 0000000..461cb11
--- /dev/null
+++ b/drivers/pci/host/pci-xgene-msi.c
@@ -0,0 +1,595 @@ 
+/*
+ * APM X-Gene MSI Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Author: Tanmay Inamdar <tinamdar@apm.com>
+ *	   Duc Dang <dhdang@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/of_pci.h>
+
+#define MSI_IR0			0x000000
+#define MSI_INT0		0x800000
+#define IDX_PER_GROUP		8
+#define IRQS_PER_IDX		16
+#define NR_HW_IRQS		16
+#define NR_MSI_VEC		(IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS)
+
+struct xgene_msi_group {
+	struct xgene_msi		*msi;
+	int				gic_irq;
+	u32				msi_grp;
+};
+
+struct xgene_msi {
+	struct device_node		*node;
+	struct msi_controller		mchip;
+	struct irq_domain		*domain;
+	u64				msi_addr;
+	void __iomem			*msi_regs;
+	unsigned long			*bitmap;
+	struct mutex			bitmap_lock;
+	struct xgene_msi_group		*msi_groups;
+	int				num_cpus;
+};
+
+/* Global data */
+static struct xgene_msi xgene_msi_ctrl;
+
+static struct irq_chip xgene_msi_top_irq_chip = {
+	.name		= "X-Gene1 MSI",
+	.irq_enable	= pci_msi_unmask_irq,
+	.irq_disable	= pci_msi_mask_irq,
+	.irq_mask	= pci_msi_mask_irq,
+	.irq_unmask	= pci_msi_unmask_irq,
+};
+
+static struct  msi_domain_info xgene_msi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+		  MSI_FLAG_PCI_MSIX),
+	.chip	= &xgene_msi_top_irq_chip,
+};
+
+/*
+ * X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where
+ * n is group number (0..F), x is index of registers in each group (0..7)
+ * The registers layout is like following:
+ * MSI0IR0			base_addr
+ * MSI0IR1			base_addr +  0x10000
+ * ...				...
+ * MSI0IR6			base_addr +  0x60000
+ * MSI0IR7			base_addr +  0x70000
+ * MSI1IR0			base_addr +  0x80000
+ * MSI1IR1			base_addr +  0x90000
+ * ...				...
+ * MSI1IR7			base_addr +  0xF0000
+ * MSI2IR0			base_addr + 0x100000
+ * ...				...
+ * MSIFIR0			base_addr + 0x780000
+ * MSIFIR1			base_addr + 0x790000
+ * ...				...
+ * MSIFIR7			base_addr + 0x7F0000
+ * MSIINT0			base_addr + 0x800000
+ * MSIINT1			base_addr + 0x810000
+ * ...				...
+ * MSIINTF			base_addr + 0x8F0000
+ *
+ * Each index register support 16 MSI vectors (0..15) to generate interrupt.
+ * There are total 16 GIC IRQs assigned for these 16 groups of MSI termination
+ * registers.
+ *
+ * Each MSI termination group has 1 MSIINTn register (n is 0..15) to indicate
+ * the MSI pending status caused by 1 of its 8 index registers.
+ */
+
+/* MSInIRx read helper */
+static inline u32 xgene_msi_ir_read(struct xgene_msi *msi,
+				    u32 msi_grp, u32 msir_idx)
+{
+	return readl_relaxed(msi->msi_regs + MSI_IR0 +
+			      (msi_grp << 19) + (msir_idx << 16));
+}
+
+/* MSIINTn read helper */
+static inline u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp)
+{
+	return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16));
+}
+
+/* With 2048 MSI vectors supported, the MSI message can be construct using
+ * following scheme:
+ * - Divide into 8 256-vector groups
+ *		Group 0: 0-255
+ *		Group 1: 256-511
+ *		Group 2: 512-767
+ *		...
+ *		Group 7: 1792-2047
+ * - Each 256-vector group is divided into 16 16-vector groups
+ *	As an example: 16 16-vector groups for 256-vector group 0-255 is
+ *		Group 0: 0-15
+ *		Group 1: 16-32
+ *		...
+ *		Group 15: 240-255
+ * - The termination address of MSI vector in 256-vector group n and 16-vector
+ * group x is the address of MSIxIRn
+ * - The data for MSI vector in 16-vector group x is x
+ */
+static inline u32 hwirq_to_reg_set(unsigned long hwirq)
+{
+	return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX));
+}
+
+static inline u32 hwirq_to_group(unsigned long hwirq)
+{
+	return (hwirq % NR_HW_IRQS);
+}
+
+static inline u32 hwirq_to_msi_data(unsigned long hwirq)
+{
+	return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX);
+}
+
+static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+	struct xgene_msi *msi = irq_data_get_irq_chip_data(data);
+	u32 reg_set = hwirq_to_reg_set(data->hwirq);
+	u32 group = hwirq_to_group(data->hwirq);
+	u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16);
+
+	msg->address_hi = upper_32_bits(target_addr);
+	msg->address_lo = lower_32_bits(target_addr);
+	msg->data = hwirq_to_msi_data(data->hwirq);
+}
+
+/*
+ * X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors.
+ * To maintain the expected behaviour of .set_affinity for each MSI
+ * interrupt, the 16 MSI GIC IRQs are statically allocated to 8 X-Gene
+ * v1 cores (2 GIC IRQs for each core). The MSI vector is moved fom 1
+ * MSI GIC IRQ to another MSI GIC IRQ to steer its MSI interrupt to
+ * correct X-Gene v1 core. As a consequence, the total MSI vectors that
+ * X-Gene v1 supports will be reduced to 256 (2048/8) vectors.
+ */
+static inline int hwirq_to_cpu(unsigned long hwirq)
+{
+	return ((hwirq % NR_HW_IRQS) % xgene_msi_ctrl.num_cpus);
+}
+
+static inline unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq)
+{
+	return (hwirq - (hwirq % xgene_msi_ctrl.num_cpus));
+}
+
+static int xgene_msi_set_affinity(struct irq_data *irq_data,
+				  const struct cpumask *mask, bool force)
+{
+	int target_cpu = cpumask_first(mask);
+	int curr_cpu;
+
+	curr_cpu = hwirq_to_cpu(irq_data->hwirq);
+	if (curr_cpu == target_cpu)
+		return IRQ_SET_MASK_OK_DONE;
+
+	/* Update MSI number to target the new CPU */
+	irq_data->hwirq = irq_data->hwirq + (target_cpu - curr_cpu);
+
+	return IRQ_SET_MASK_OK;
+}
+
+static struct irq_chip xgene_msi_bottom_irq_chip = {
+	.name			= "MSI",
+	.irq_set_affinity       = xgene_msi_set_affinity,
+	.irq_compose_msi_msg	= xgene_compose_msi_msg,
+};
+
+static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+				  unsigned int nr_irqs, void *args)
+{
+	struct xgene_msi *msi = domain->host_data;
+	int msi_irq;
+
+	mutex_lock(&msi->bitmap_lock);
+
+	msi_irq = bitmap_find_next_zero_area(msi->bitmap, NR_MSI_VEC, 0,
+					     msi->num_cpus, 0);
+	if (msi_irq < NR_MSI_VEC)
+		bitmap_set(msi->bitmap, msi_irq, msi->num_cpus);
+	else
+		msi_irq = -ENOSPC;
+
+	mutex_unlock(&msi->bitmap_lock);
+
+	if (msi_irq < 0)
+		return msi_irq;
+
+	irq_domain_set_info(domain, virq, msi_irq,
+			    &xgene_msi_bottom_irq_chip, domain->host_data,
+			    handle_simple_irq, NULL, NULL);
+	set_irq_flags(virq, IRQF_VALID);
+
+	return 0;
+}
+
+static void xgene_irq_domain_free(struct irq_domain *domain,
+				  unsigned int virq, unsigned int nr_irqs)
+{
+	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+	struct xgene_msi *msi = irq_data_get_irq_chip_data(d);
+	u32 hwirq;
+
+	mutex_lock(&msi->bitmap_lock);
+
+	hwirq = hwirq_to_canonical_hwirq(d->hwirq);
+	bitmap_clear(msi->bitmap, hwirq, msi->num_cpus);
+
+	mutex_unlock(&msi->bitmap_lock);
+
+	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+	.alloc  = xgene_irq_domain_alloc,
+	.free   = xgene_irq_domain_free,
+};
+
+static int xgene_allocate_domains(struct xgene_msi *msi)
+{
+	msi->domain = irq_domain_add_linear(NULL, NR_MSI_VEC,
+					    &msi_domain_ops, msi);
+	if (!msi->domain)
+		return -ENOMEM;
+
+	msi->mchip.domain = pci_msi_create_irq_domain(msi->mchip.of_node,
+						      &xgene_msi_domain_info,
+						      msi->domain);
+
+	if (!msi->mchip.domain) {
+		irq_domain_remove(msi->domain);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void xgene_free_domains(struct xgene_msi *msi)
+{
+	if (msi->mchip.domain)
+		irq_domain_remove(msi->mchip.domain);
+	if (msi->domain)
+		irq_domain_remove(msi->domain);
+}
+
+static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi)
+{
+	int size = BITS_TO_LONGS(NR_MSI_VEC) * sizeof(long);
+
+	xgene_msi->bitmap = kzalloc(size, GFP_KERNEL);
+	if (!xgene_msi->bitmap)
+		return -ENOMEM;
+
+	mutex_init(&xgene_msi->bitmap_lock);
+
+	xgene_msi->msi_groups = kcalloc(NR_HW_IRQS,
+					sizeof(struct xgene_msi_group),
+					GFP_KERNEL);
+	if (!xgene_msi->msi_groups)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void xgene_msi_isr(unsigned int irq, struct irq_desc *desc)
+{
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct xgene_msi_group *msi_groups;
+	struct xgene_msi *xgene_msi;
+	unsigned int virq;
+	int msir_index, msir_val, hw_irq;
+	u32 intr_index, grp_select, msi_grp;
+
+	chained_irq_enter(chip, desc);
+
+	msi_groups = irq_desc_get_handler_data(desc);
+	xgene_msi = msi_groups->msi;
+	msi_grp = msi_groups->msi_grp;
+
+	/*
+	 * MSIINTn (n is 0..F) indicates if there is a pending MSI interrupt
+	 * If bit x of this register is set (x is 0..7), one or more interupts
+	 * corresponding to MSInIRx is set.
+	 */
+	grp_select = xgene_msi_int_read(xgene_msi, msi_grp);
+	while (grp_select) {
+		msir_index = ffs(grp_select) - 1;
+		/*
+		 * Calculate MSInIRx address to read to check for interrupts
+		 * (refer to termination address and data assignment
+		 * described in xgene_compose_msi_msg function)
+		 */
+		msir_val = xgene_msi_ir_read(xgene_msi, msi_grp, msir_index);
+		while (msir_val) {
+			intr_index = ffs(msir_val) - 1;
+			/*
+			 * Calculate MSI vector number (refer to the termination
+			 * address and data assignment described in
+			 * xgene_compose_msi_msg function)
+			 */
+			hw_irq = (((msir_index * IRQS_PER_IDX) + intr_index) *
+				 NR_HW_IRQS) + msi_grp;
+			/*
+			 * As we have multiple hw_irq that maps to single MSI,
+			 * always look up the virq using the hw_irq as seen from
+			 * CPU0
+			 */
+			hw_irq = hwirq_to_canonical_hwirq(hw_irq);
+			virq = irq_find_mapping(xgene_msi->domain, hw_irq);
+			WARN_ON(!virq);
+			if (virq != 0)
+				generic_handle_irq(virq);
+			msir_val &= ~(1 << intr_index);
+		}
+		grp_select &= ~(1 << msir_index);
+
+		if (!grp_select) {
+			/*
+			 * We handled all interrupts happened in this group,
+			 * resample this group MSI_INTx register in case
+			 * something else has been made pending in the meantime
+			 */
+			grp_select = xgene_msi_int_read(xgene_msi, msi_grp);
+		}
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+static int xgene_msi_remove(struct platform_device *pdev)
+{
+	int virq, i;
+	struct xgene_msi *msi = platform_get_drvdata(pdev);
+
+	for (i = 0; i < NR_HW_IRQS; i++) {
+		virq = msi->msi_groups[i].gic_irq;
+		if (virq != 0) {
+			irq_set_chained_handler(virq, NULL);
+			irq_set_handler_data(virq, NULL);
+		}
+	}
+	kfree(msi->msi_groups);
+
+	kfree(msi->bitmap);
+	msi->bitmap = NULL;
+
+	xgene_free_domains(msi);
+
+	return 0;
+}
+
+static int xgene_msi_hwirq_alloc(unsigned int cpu)
+{
+	struct xgene_msi *msi = &xgene_msi_ctrl;
+	struct xgene_msi_group *msi_group;
+	cpumask_var_t mask;
+	int i;
+	int err;
+
+	for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) {
+		msi_group = &msi->msi_groups[i];
+		if (!msi_group->gic_irq)
+			continue;
+
+		irq_set_chained_handler(msi_group->gic_irq,
+					xgene_msi_isr);
+		err = irq_set_handler_data(msi_group->gic_irq, msi_group);
+		if (err) {
+			pr_err("failed to register GIC IRQ handler\n");
+			return -EINVAL;
+		}
+		/*
+		 * Statically allocate MSI GIC IRQs to each CPU core
+		 * With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated
+		 * to each core.
+		 */
+		if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
+			cpumask_clear(mask);
+			cpumask_set_cpu(cpu, mask);
+			err = irq_set_affinity(msi_group->gic_irq, mask);
+			if (err)
+				pr_err("failed to set affinity for GIC IRQ");
+			free_cpumask_var(mask);
+		} else {
+			pr_err("failed to alloc CPU mask for affinity\n");
+			err = -EINVAL;
+		}
+
+		if (err) {
+			irq_set_chained_handler(msi_group->gic_irq, NULL);
+			irq_set_handler_data(msi_group->gic_irq, NULL);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+static void xgene_msi_hwirq_free(unsigned int cpu)
+{
+	struct xgene_msi *msi = &xgene_msi_ctrl;
+	struct xgene_msi_group *msi_group;
+	int i;
+
+	for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) {
+		msi_group = &msi->msi_groups[i];
+		if (!msi_group->gic_irq)
+			continue;
+
+		irq_set_chained_handler(msi_group->gic_irq, NULL);
+		irq_set_handler_data(msi_group->gic_irq, NULL);
+	}
+}
+
+static int xgene_msi_cpu_callback(struct notifier_block *nfb,
+				  unsigned long action, void *hcpu)
+{
+	unsigned cpu = (unsigned long)hcpu;
+
+	switch (action) {
+	case CPU_ONLINE:
+	case CPU_ONLINE_FROZEN:
+		xgene_msi_hwirq_alloc(cpu);
+		break;
+	case CPU_DEAD:
+	case CPU_DEAD_FROZEN:
+		xgene_msi_hwirq_free(cpu);
+		break;
+	default:
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block xgene_msi_cpu_notifier = {
+	.notifier_call = xgene_msi_cpu_callback,
+};
+
+static const struct of_device_id xgene_msi_match_table[] = {
+	{.compatible = "apm,xgene1-msi"},
+	{},
+};
+
+static int xgene_msi_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	int rc, irq_index;
+	struct xgene_msi *xgene_msi;
+	unsigned int cpu;
+	int virt_msir;
+	u32 msi_val, msi_idx;
+
+	xgene_msi = &xgene_msi_ctrl;
+
+	platform_set_drvdata(pdev, xgene_msi);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	xgene_msi->msi_regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(xgene_msi->msi_regs)) {
+		dev_err(&pdev->dev, "no reg space\n");
+		rc = -EINVAL;
+		goto error;
+	}
+	xgene_msi->msi_addr = res->start;
+
+	xgene_msi->num_cpus = num_possible_cpus();
+
+	rc = xgene_msi_init_allocator(xgene_msi);
+	if (rc) {
+		dev_err(&pdev->dev, "Error allocating MSI bitmap\n");
+		goto error;
+	}
+
+	rc = xgene_allocate_domains(xgene_msi);
+	if (rc) {
+		dev_err(&pdev->dev, "Failed to allocate MSI domain\n");
+		goto error;
+	}
+
+	for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) {
+		virt_msir = platform_get_irq(pdev, irq_index);
+		if (virt_msir < 0) {
+			dev_err(&pdev->dev, "Cannot translate IRQ index %d\n",
+				irq_index);
+			rc = -EINVAL;
+			goto error;
+		}
+		xgene_msi->msi_groups[irq_index].gic_irq = virt_msir;
+		xgene_msi->msi_groups[irq_index].msi_grp = irq_index;
+		xgene_msi->msi_groups[irq_index].msi = xgene_msi;
+	}
+
+	/*
+	 * MSInIRx registers are read-to-clear, before registering interrupt
+	 * handlers, reading all of them to clear all spurious interrupts
+	 * that may occur before the driver is probed.
+	 */
+	for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) {
+		for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++)
+			msi_val = xgene_msi_ir_read(xgene_msi, irq_index,
+						    msi_idx);
+		/* Read MSIINTn to confirm */
+		msi_val = xgene_msi_int_read(xgene_msi, irq_index);
+		if (msi_val) {
+			dev_err(&pdev->dev, "Failed to clear spurious IRQ\n");
+			rc = -EINVAL;
+			goto error;
+		}
+	}
+
+	cpu_notifier_register_begin();
+
+	for_each_online_cpu(cpu)
+		if (xgene_msi_hwirq_alloc(cpu)) {
+			dev_err(&pdev->dev, "failed to regiser MSI handlers\n");
+			cpu_notifier_register_done();
+			goto error;
+		}
+
+	rc = __register_hotcpu_notifier(&xgene_msi_cpu_notifier);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to add CPU MSI notifier\n");
+		cpu_notifier_register_done();
+		goto error;
+	}
+
+	cpu_notifier_register_done();
+
+	xgene_msi->mchip.of_node = pdev->dev.of_node;
+	rc = of_pci_msi_chip_add(&xgene_msi->mchip);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to add MSI controller chip\n");
+		goto error_notifier;
+	}
+
+	dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n");
+
+	return 0;
+
+error_notifier:
+	unregister_hotcpu_notifier(&xgene_msi_cpu_notifier);
+error:
+	xgene_msi_remove(pdev);
+	return rc;
+}
+
+static struct platform_driver xgene_msi_driver = {
+	.driver = {
+		.name = "xgene-msi",
+		.owner = THIS_MODULE,
+		.of_match_table = xgene_msi_match_table,
+	},
+	.probe = xgene_msi_probe,
+	.remove = xgene_msi_remove,
+};
+
+static int __init xgene_pcie_msi_init(void)
+{
+	return platform_driver_register(&xgene_msi_driver);
+}
+subsys_initcall(xgene_pcie_msi_init);
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index ee082c0..a9b4f16 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -468,6 +468,23 @@  static int xgene_pcie_setup(struct xgene_pcie_port *port,
 	return 0;
 }
 
+static int xgene_pcie_msi_enable(struct pci_bus *bus)
+{
+	struct device_node *msi_node;
+
+	msi_node = of_parse_phandle(bus->dev.of_node,
+					"msi-parent", 0);
+	if (!msi_node)
+		return -ENODEV;
+
+	bus->msi = of_pci_find_msi_chip_by_node(msi_node);
+	if (bus->msi)
+		bus->msi->dev = &bus->dev;
+	else
+		return -ENODEV;
+	return 0;
+}
+
 static int xgene_pcie_probe_bridge(struct platform_device *pdev)
 {
 	struct device_node *dn = pdev->dev.of_node;
@@ -504,6 +521,10 @@  static int xgene_pcie_probe_bridge(struct platform_device *pdev)
 	if (!bus)
 		return -ENOMEM;
 
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		if (xgene_pcie_msi_enable(bus))
+			dev_info(port->dev, "failed to enable MSI\n");
+
 	pci_scan_child_bus(bus);
 	pci_assign_unassigned_bus_resources(bus);
 	pci_bus_add_devices(bus);