diff mbox

[v5,3/3] pci, pci-thunder-ecam: Add driver for ThunderX-pass1 on-chip devices

Message ID 1454715675-17512-4-git-send-email-ddaney.cavm@gmail.com (mailing list archive)
State New, archived
Headers show

Commit Message

David Daney Feb. 5, 2016, 11:41 p.m. UTC
From: David Daney <david.daney@cavium.com>

The cavium,pci-thunder-ecam devices are exactly ECAM based PCI root
complexes.  These root complexes (loosely referred to as ECAM units in
the hardware manuals) are used to access the Thunder on-chips devices.
They are special in that all the BARs on devices behind these root
complexes are at fixed addresses.  To handle this in a manner
compatible with the core PCI code, we have the config access functions
synthesize Enhanced Allocation (EA) capability entries for each BAR.

Since this EA synthesis is needed for exactly one chip model, we can
hard code some assumptions about the device topology and the
properties of specific DEVFNs in the driver.

Signed-off-by: David Daney <david.daney@cavium.com>
---
 .../devicetree/bindings/pci/pci-thunder-ecam.txt   |  30 ++
 drivers/pci/host/Kconfig                           |   7 +
 drivers/pci/host/Makefile                          |   1 +
 drivers/pci/host/pci-thunder-ecam.c                | 358 +++++++++++++++++++++
 4 files changed, 396 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/pci/pci-thunder-ecam.txt
 create mode 100644 drivers/pci/host/pci-thunder-ecam.c

Comments

Rob Herring Feb. 8, 2016, 7:56 p.m. UTC | #1
On Fri, Feb 05, 2016 at 03:41:15PM -0800, David Daney wrote:
> From: David Daney <david.daney@cavium.com>
> 
> The cavium,pci-thunder-ecam devices are exactly ECAM based PCI root
> complexes.  These root complexes (loosely referred to as ECAM units in
> the hardware manuals) are used to access the Thunder on-chips devices.
> They are special in that all the BARs on devices behind these root
> complexes are at fixed addresses.  To handle this in a manner
> compatible with the core PCI code, we have the config access functions
> synthesize Enhanced Allocation (EA) capability entries for each BAR.
> 
> Since this EA synthesis is needed for exactly one chip model, we can
> hard code some assumptions about the device topology and the
> properties of specific DEVFNs in the driver.
> 
> Signed-off-by: David Daney <david.daney@cavium.com>
> ---
>  .../devicetree/bindings/pci/pci-thunder-ecam.txt   |  30 ++
>  drivers/pci/host/Kconfig                           |   7 +
>  drivers/pci/host/Makefile                          |   1 +
>  drivers/pci/host/pci-thunder-ecam.c                | 358 +++++++++++++++++++++
>  4 files changed, 396 insertions(+)
>  create mode 100644 Documentation/devicetree/bindings/pci/pci-thunder-ecam.txt
>  create mode 100644 drivers/pci/host/pci-thunder-ecam.c
> 
> diff --git a/Documentation/devicetree/bindings/pci/pci-thunder-ecam.txt b/Documentation/devicetree/bindings/pci/pci-thunder-ecam.txt
> new file mode 100644
> index 0000000..34658f2
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/pci/pci-thunder-ecam.txt
> @@ -0,0 +1,30 @@
> +* ThunderX PCI host controller for pass-1.x silicon
> +
> +Firmware-initialized PCI host controller to on-chip devices found on
> +some Cavium ThunderX processors.  These devices have ECAM based config
> +access, but the BARs are all at fixed addresses.  We handle the fixed
> +addresses by synthesizing Enhanced Allocation (EA) capabilities for
> +these devices.
> +
> +The properties and their meanings are identical to those described in
> +host-generic-pci.txt except as listed below.
> +
> +Properties of the host controller node that differ from
> +host-generic-pci.txt:
> +
> +- compatible     : Must be "cavium,pci-host-thunder-ecam"
> +
> +Example:
> +
> +	pci@84b0,00000000 {

Drop the comma, and the node name should be "pcie".

Otherwise,

Acked-by: Rob Herring <robh@kernel.org>

> +		compatible = "cavium,pci-host-thunder-ecam";
> +		device_type = "pci";
> +		msi-parent = <&its>;
> +		msi-map = <0 &its 0x30000 0x10000>;
> +		bus-range = <0 31>;
> +		#size-cells = <2>;
> +		#address-cells = <3>;
> +		#stream-id-cells = <1>;
> +		reg = <0x84b0 0x00000000 0 0x02000000>;  /* Configuration space */
> +		ranges = <0x03000000 0x8180 0x00000000 0x8180 0x00000000 0x80 0x00000000>; /* mem ranges */
> +	};
David Daney Feb. 8, 2016, 8:47 p.m. UTC | #2
On 02/08/2016 11:56 AM, Rob Herring wrote:
> On Fri, Feb 05, 2016 at 03:41:15PM -0800, David Daney wrote:
>> From: David Daney <david.daney@cavium.com>
>>
>> The cavium,pci-thunder-ecam devices are exactly ECAM based PCI root
>> complexes.  These root complexes (loosely referred to as ECAM units in
>> the hardware manuals) are used to access the Thunder on-chips devices.
>> They are special in that all the BARs on devices behind these root
>> complexes are at fixed addresses.  To handle this in a manner
>> compatible with the core PCI code, we have the config access functions
>> synthesize Enhanced Allocation (EA) capability entries for each BAR.
>>
>> Since this EA synthesis is needed for exactly one chip model, we can
>> hard code some assumptions about the device topology and the
>> properties of specific DEVFNs in the driver.
>>
>> Signed-off-by: David Daney <david.daney@cavium.com>
>> ---
>>   .../devicetree/bindings/pci/pci-thunder-ecam.txt   |  30 ++
>>   drivers/pci/host/Kconfig                           |   7 +
>>   drivers/pci/host/Makefile                          |   1 +
>>   drivers/pci/host/pci-thunder-ecam.c                | 358 +++++++++++++++++++++
>>   4 files changed, 396 insertions(+)
>>   create mode 100644 Documentation/devicetree/bindings/pci/pci-thunder-ecam.txt
>>   create mode 100644 drivers/pci/host/pci-thunder-ecam.c
>>
>> diff --git a/Documentation/devicetree/bindings/pci/pci-thunder-ecam.txt b/Documentation/devicetree/bindings/pci/pci-thunder-ecam.txt
>> new file mode 100644
>> index 0000000..34658f2
>> --- /dev/null
>> +++ b/Documentation/devicetree/bindings/pci/pci-thunder-ecam.txt
>> @@ -0,0 +1,30 @@
>> +* ThunderX PCI host controller for pass-1.x silicon
>> +
>> +Firmware-initialized PCI host controller to on-chip devices found on
>> +some Cavium ThunderX processors.  These devices have ECAM based config
>> +access, but the BARs are all at fixed addresses.  We handle the fixed
>> +addresses by synthesizing Enhanced Allocation (EA) capabilities for
>> +these devices.
>> +
>> +The properties and their meanings are identical to those described in
>> +host-generic-pci.txt except as listed below.
>> +
>> +Properties of the host controller node that differ from
>> +host-generic-pci.txt:
>> +
>> +- compatible     : Must be "cavium,pci-host-thunder-ecam"
>> +
>> +Example:
>> +
>> +	pci@84b0,00000000 {
>
> Drop the comma,

OK...

> and the node name should be "pcie".
>

Why pcie?

There are no PCIe devices or buses reachable from this type of root 
complex.  There are however many PCI devices.

> Otherwise,
>
> Acked-by: Rob Herring <robh@kernel.org>
>
>> +		compatible = "cavium,pci-host-thunder-ecam";
>> +		device_type = "pci";
>> +		msi-parent = <&its>;
>> +		msi-map = <0 &its 0x30000 0x10000>;
>> +		bus-range = <0 31>;
>> +		#size-cells = <2>;
>> +		#address-cells = <3>;
>> +		#stream-id-cells = <1>;
>> +		reg = <0x84b0 0x00000000 0 0x02000000>;  /* Configuration space */
>> +		ranges = <0x03000000 0x8180 0x00000000 0x8180 0x00000000 0x80 0x00000000>; /* mem ranges */
>> +	};
Rob Herring Feb. 8, 2016, 9:12 p.m. UTC | #3
On Mon, Feb 8, 2016 at 2:47 PM, David Daney <ddaney@caviumnetworks.com> wrote:
> On 02/08/2016 11:56 AM, Rob Herring wrote:
>>
>> On Fri, Feb 05, 2016 at 03:41:15PM -0800, David Daney wrote:
>>>
>>> From: David Daney <david.daney@cavium.com>

[...]

>>> +Properties of the host controller node that differ from
>>> +host-generic-pci.txt:
>>> +
>>> +- compatible     : Must be "cavium,pci-host-thunder-ecam"
>>> +
>>> +Example:
>>> +
>>> +       pci@84b0,00000000 {
>>
>>
>> Drop the comma,
>
>
> OK...
>
>> and the node name should be "pcie".
>>
>
> Why pcie?
>
> There are no PCIe devices or buses reachable from this type of root complex.
> There are however many PCI devices.

I thought ECAM is a PCIe thing. If not, then nevermind.

Rob
David Daney Feb. 8, 2016, 9:39 p.m. UTC | #4
On 02/08/2016 01:12 PM, Rob Herring wrote:
> On Mon, Feb 8, 2016 at 2:47 PM, David Daney <ddaney@caviumnetworks.com> wrote:
>> On 02/08/2016 11:56 AM, Rob Herring wrote:
>>>
>>> On Fri, Feb 05, 2016 at 03:41:15PM -0800, David Daney wrote:
>>>>
>>>> From: David Daney <david.daney@cavium.com>
>
> [...]
>
>>>> +Properties of the host controller node that differ from
>>>> +host-generic-pci.txt:
>>>> +
>>>> +- compatible     : Must be "cavium,pci-host-thunder-ecam"
>>>> +
>>>> +Example:
>>>> +
>>>> +       pci@84b0,00000000 {
>>>
>>>
>>> Drop the comma,
>>
>>
>> OK...
>>
>>> and the node name should be "pcie".
>>>
>>
>> Why pcie?
>>
>> There are no PCIe devices or buses reachable from this type of root complex.
>> There are however many PCI devices.
>
> I thought ECAM is a PCIe thing. If not, then nevermind.


Well, Enhanced Configuration Access Mechanism (ECAM) is defined  the the 
PCI Express(R) base Specification, but it just defines a standard layout 
of address bits to memory map config space operations.  Since the PCI 
config space is a sub set of the PCIe config space, ECAM can also be 
used in PCI systems.

Really, it is a bit of a gray area here as we don't have any bridges to 
PCIe buses and there are multiple devices residing on each bus, so from 
that point of view it cannot be PCIe.  There are, however, devices that 
implement the PCI Express Capability structure, so does that make it 
PCIe?  It is not clear what the specifications demand here.

I choose to call it "pci" as it lacks much of what it means to be PCIe, 
and also you cannot plug things into it.

David Daney



>
> Rob
>
Bjorn Helgaas Feb. 8, 2016, 10:12 p.m. UTC | #5
On Mon, Feb 08, 2016 at 01:39:21PM -0800, David Daney wrote:
> On 02/08/2016 01:12 PM, Rob Herring wrote:
> >On Mon, Feb 8, 2016 at 2:47 PM, David Daney <ddaney@caviumnetworks.com> wrote:
> >>On 02/08/2016 11:56 AM, Rob Herring wrote:
> >>>On Fri, Feb 05, 2016 at 03:41:15PM -0800, David Daney wrote:
> >>>>From: David Daney <david.daney@cavium.com>
> >
> >[...]
> >
> >>>>+Properties of the host controller node that differ from
> >>>>+host-generic-pci.txt:
> >>>>+
> >>>>+- compatible     : Must be "cavium,pci-host-thunder-ecam"
> >>>>+
> >>>>+Example:
> >>>>+
> >>>>+       pci@84b0,00000000 {
> >>>
> >>>
> >>>Drop the comma,
> >>
> >>
> >>OK...
> >>
> >>>and the node name should be "pcie".
> >>>
> >>
> >>Why pcie?
> >>
> >>There are no PCIe devices or buses reachable from this type of root complex.
> >>There are however many PCI devices.
> >
> >I thought ECAM is a PCIe thing. If not, then nevermind.

The "ECAM" confusion bites again :)

> Well, Enhanced Configuration Access Mechanism (ECAM) is defined  the
> the PCI Express(R) base Specification, but it just defines a
> standard layout of address bits to memory map config space
> operations.  Since the PCI config space is a sub set of the PCIe
> config space, ECAM can also be used in PCI systems.
> 
> Really, it is a bit of a gray area here as we don't have any bridges
> to PCIe buses and there are multiple devices residing on each bus,
> so from that point of view it cannot be PCIe.  There are, however,
> devices that implement the PCI Express Capability structure, so does
> that make it PCIe?  It is not clear what the specifications demand
> here.

The PCI core doesn't care about the node name in the device tree.  But
it *does* care about some details of PCI/PCIe topology.  We consider
anything with a PCIe capability to be PCIe.  For example,

  - pci_cfg_space_size() thinks PCIe devices have 4K of config space

  - only_one_child() thinks a PCIe bus, i.e., a link, only has a
    single device on it

  - a PCIe device should have a PCIe Root Port or PCIe Downstream Port
    upstream from it (we did remove some of these restrictions with
    b35b1df5e6c2 ("PCI: Tolerate hierarchies with no Root Port"), but 
    it's possible we didn't get them all)

I assume your system conforms to expectations like these; I'm just
pointing them out because you mentioned buses with multiple devices on
them, which is definitely something one doesn't expect in PCIe.

Bjorn
David Daney Feb. 8, 2016, 10:41 p.m. UTC | #6
On 02/08/2016 02:12 PM, Bjorn Helgaas wrote:
> On Mon, Feb 08, 2016 at 01:39:21PM -0800, David Daney wrote:
>> On 02/08/2016 01:12 PM, Rob Herring wrote:
>>> On Mon, Feb 8, 2016 at 2:47 PM, David Daney <ddaney@caviumnetworks.com> wrote:
>>>> On 02/08/2016 11:56 AM, Rob Herring wrote:
>>>>> On Fri, Feb 05, 2016 at 03:41:15PM -0800, David Daney wrote:
>>>>>> From: David Daney <david.daney@cavium.com>
>>>
>>> [...]
>>>
>>>>>> +Properties of the host controller node that differ from
>>>>>> +host-generic-pci.txt:
>>>>>> +
>>>>>> +- compatible     : Must be "cavium,pci-host-thunder-ecam"
>>>>>> +
>>>>>> +Example:
>>>>>> +
>>>>>> +       pci@84b0,00000000 {
>>>>>
>>>>>
>>>>> Drop the comma,
>>>>
>>>>
>>>> OK...
>>>>
>>>>> and the node name should be "pcie".
>>>>>
>>>>
>>>> Why pcie?
>>>>
>>>> There are no PCIe devices or buses reachable from this type of root complex.
>>>> There are however many PCI devices.
>>>
>>> I thought ECAM is a PCIe thing. If not, then nevermind.
>
> The "ECAM" confusion bites again :)
>
>> Well, Enhanced Configuration Access Mechanism (ECAM) is defined  the
>> the PCI Express(R) base Specification, but it just defines a
>> standard layout of address bits to memory map config space
>> operations.  Since the PCI config space is a sub set of the PCIe
>> config space, ECAM can also be used in PCI systems.
>>
>> Really, it is a bit of a gray area here as we don't have any bridges
>> to PCIe buses and there are multiple devices residing on each bus,
>> so from that point of view it cannot be PCIe.  There are, however,
>> devices that implement the PCI Express Capability structure, so does
>> that make it PCIe?  It is not clear what the specifications demand
>> here.
>
> The PCI core doesn't care about the node name in the device tree.  But
> it *does* care about some details of PCI/PCIe topology.  We consider
> anything with a PCIe capability to be PCIe.  For example,
>
>    - pci_cfg_space_size() thinks PCIe devices have 4K of config space
>
>    - only_one_child() thinks a PCIe bus, i.e., a link, only has a
>      single device on it
>
>    - a PCIe device should have a PCIe Root Port or PCIe Downstream Port
>      upstream from it (we did remove some of these restrictions with
>      b35b1df5e6c2 ("PCI: Tolerate hierarchies with no Root Port"), but
>      it's possible we didn't get them all)
>
> I assume your system conforms to expectations like these; I'm just
> pointing them out because you mentioned buses with multiple devices on
> them, which is definitely something one doesn't expect in PCIe.
>

The topology we have is currently working with the kernel's core PCI 
code.  I don't really want to get into discussing what the definition of 
PCIe is.  We have multiple devices (more than 32) on a single bus, and 
they have PCI Express and ARI Capabilities.  Is that PCIe?  I don't know.

For the purpose of naming the device tree node, I would like to stick 
with the name "pci@..." as it is somewhat accurate, a value contemplated 
by the device tree specifications, ignored by the kernel code, and 
already implemented.

David Daney


> Bjorn
>
Bjorn Helgaas Feb. 8, 2016, 11:24 p.m. UTC | #7
On Mon, Feb 08, 2016 at 02:41:41PM -0800, David Daney wrote:
> On 02/08/2016 02:12 PM, Bjorn Helgaas wrote:
> >On Mon, Feb 08, 2016 at 01:39:21PM -0800, David Daney wrote:
> >>On 02/08/2016 01:12 PM, Rob Herring wrote:
> >>>On Mon, Feb 8, 2016 at 2:47 PM, David Daney <ddaney@caviumnetworks.com> wrote:
> >>>>On 02/08/2016 11:56 AM, Rob Herring wrote:
> >>>>>On Fri, Feb 05, 2016 at 03:41:15PM -0800, David Daney wrote:
> >>>>>>From: David Daney <david.daney@cavium.com>
> >>>>>>+Properties of the host controller node that differ from
> >>>>>>+host-generic-pci.txt:
> >>>>>>+
> >>>>>>+- compatible     : Must be "cavium,pci-host-thunder-ecam"
> >>>>>>+
> >>>>>>+Example:
> >>>>>>+
> >>>>>>+       pci@84b0,00000000 {
> ...
> >>>>>and the node name should be "pcie".
> >>>>
> >>>>Why pcie?
> >>>>
> >>>>There are no PCIe devices or buses reachable from this type of root complex.
> >>>>There are however many PCI devices.
> ...

> >>Really, it is a bit of a gray area here as we don't have any bridges
> >>to PCIe buses and there are multiple devices residing on each bus,
> >>so from that point of view it cannot be PCIe.  There are, however,
> >>devices that implement the PCI Express Capability structure, so does
> >>that make it PCIe?  It is not clear what the specifications demand
> >>here.
> >
> >The PCI core doesn't care about the node name in the device tree.  But
> >it *does* care about some details of PCI/PCIe topology.  We consider
> >anything with a PCIe capability to be PCIe.  For example,
> >
> >   - pci_cfg_space_size() thinks PCIe devices have 4K of config space
> >
> >   - only_one_child() thinks a PCIe bus, i.e., a link, only has a
> >     single device on it
> >
> >   - a PCIe device should have a PCIe Root Port or PCIe Downstream Port
> >     upstream from it (we did remove some of these restrictions with
> >     b35b1df5e6c2 ("PCI: Tolerate hierarchies with no Root Port"), but
> >     it's possible we didn't get them all)
> >
> >I assume your system conforms to expectations like these; I'm just
> >pointing them out because you mentioned buses with multiple devices on
> >them, which is definitely something one doesn't expect in PCIe.
> 
> The topology we have is currently working with the kernel's core PCI
> code.  I don't really want to get into discussing what the
> definition of PCIe is.  We have multiple devices (more than 32) on a
> single bus, and they have PCI Express and ARI Capabilities.  Is that
> PCIe?  I don't know.

I don't need to know the details of your topology.  As long as it
conforms to the PCIe spec, it should be fine.  If it *doesn't* conform
to the spec, but things currently seem to work, that's less fine,
because a future Linux change is liable to break something for you.

I was a little concerned about your statement that "there are multiple
devices residing on each bus, so from that point of view it cannot be
PCIe."  That made it sound like you're doing something outside the
spec.  If you're just using regular multi-function devices or ARI,
then I don't see any issue (or any reason to say it can't be PCIe).

Bjorn
David Daney Feb. 8, 2016, 11:31 p.m. UTC | #8
On 02/08/2016 03:24 PM, Bjorn Helgaas wrote:
> On Mon, Feb 08, 2016 at 02:41:41PM -0800, David Daney wrote:
>> On 02/08/2016 02:12 PM, Bjorn Helgaas wrote:
>>> On Mon, Feb 08, 2016 at 01:39:21PM -0800, David Daney wrote:
>>>> On 02/08/2016 01:12 PM, Rob Herring wrote:
>>>>> On Mon, Feb 8, 2016 at 2:47 PM, David Daney <ddaney@caviumnetworks.com> wrote:
>>>>>> On 02/08/2016 11:56 AM, Rob Herring wrote:
>>>>>>> On Fri, Feb 05, 2016 at 03:41:15PM -0800, David Daney wrote:
>>>>>>>> From: David Daney <david.daney@cavium.com>
>>>>>>>> +Properties of the host controller node that differ from
>>>>>>>> +host-generic-pci.txt:
>>>>>>>> +
>>>>>>>> +- compatible     : Must be "cavium,pci-host-thunder-ecam"
>>>>>>>> +
>>>>>>>> +Example:
>>>>>>>> +
>>>>>>>> +       pci@84b0,00000000 {
>> ...
>>>>>>> and the node name should be "pcie".
>>>>>>
>>>>>> Why pcie?
>>>>>>
>>>>>> There are no PCIe devices or buses reachable from this type of root complex.
>>>>>> There are however many PCI devices.
>> ...
>
>>>> Really, it is a bit of a gray area here as we don't have any bridges
>>>> to PCIe buses and there are multiple devices residing on each bus,
>>>> so from that point of view it cannot be PCIe.  There are, however,
>>>> devices that implement the PCI Express Capability structure, so does
>>>> that make it PCIe?  It is not clear what the specifications demand
>>>> here.
>>>
>>> The PCI core doesn't care about the node name in the device tree.  But
>>> it *does* care about some details of PCI/PCIe topology.  We consider
>>> anything with a PCIe capability to be PCIe.  For example,
>>>
>>>    - pci_cfg_space_size() thinks PCIe devices have 4K of config space
>>>
>>>    - only_one_child() thinks a PCIe bus, i.e., a link, only has a
>>>      single device on it
>>>
>>>    - a PCIe device should have a PCIe Root Port or PCIe Downstream Port
>>>      upstream from it (we did remove some of these restrictions with
>>>      b35b1df5e6c2 ("PCI: Tolerate hierarchies with no Root Port"), but
>>>      it's possible we didn't get them all)
>>>
>>> I assume your system conforms to expectations like these; I'm just
>>> pointing them out because you mentioned buses with multiple devices on
>>> them, which is definitely something one doesn't expect in PCIe.
>>
>> The topology we have is currently working with the kernel's core PCI
>> code.  I don't really want to get into discussing what the
>> definition of PCIe is.  We have multiple devices (more than 32) on a
>> single bus, and they have PCI Express and ARI Capabilities.  Is that
>> PCIe?  I don't know.
>
> I don't need to know the details of your topology.  As long as it
> conforms to the PCIe spec, it should be fine.  If it *doesn't* conform
> to the spec, but things currently seem to work, that's less fine,
> because a future Linux change is liable to break something for you.
>
> I was a little concerned about your statement that "there are multiple
> devices residing on each bus, so from that point of view it cannot be
> PCIe."  That made it sound like you're doing something outside the
> spec.  If you're just using regular multi-function devices or ARI,
> then I don't see any issue (or any reason to say it can't be PCIe).

OK, I will make it "pcie@...."

Really, ARI is the only reason.  But since ARI is defined in the PCI 
Express specification, pcie it is.

I will send revised patches today.


David Daney

>
> Bjorn
>
Arnd Bergmann Feb. 9, 2016, 9:25 a.m. UTC | #9
On Monday 08 February 2016 17:24:30 Bjorn Helgaas wrote:
> > >
> > >I assume your system conforms to expectations like these; I'm just
> > >pointing them out because you mentioned buses with multiple devices on
> > >them, which is definitely something one doesn't expect in PCIe.
> > 
> > The topology we have is currently working with the kernel's core PCI
> > code.  I don't really want to get into discussing what the
> > definition of PCIe is.  We have multiple devices (more than 32) on a
> > single bus, and they have PCI Express and ARI Capabilities.  Is that
> > PCIe?  I don't know.
> 
> I don't need to know the details of your topology.  As long as it
> conforms to the PCIe spec, it should be fine.  If it *doesn't* conform
> to the spec, but things currently seem to work, that's less fine,
> because a future Linux change is liable to break something for you.
> 
> I was a little concerned about your statement that "there are multiple
> devices residing on each bus, so from that point of view it cannot be
> PCIe."  That made it sound like you're doing something outside the
> spec.  If you're just using regular multi-function devices or ARI,
> then I don't see any issue (or any reason to say it can't be PCIe).

It doesn't conform to the PCIe port spec, because there are no external
ports but just integrated devices in the host bridge. For this special
case, I don't think it matters at all from the point of view of the DT
binding whether we call the node name "pci" or "pcie".

IIRC, even on real Open Firmware, the three companies that shipped
PCIe (or Hypertransport, which doesn't even have a formal binding)
based machines (Sun, IBM, Apple) were using slightly different
bindings in practice, so I wouldn't read to much into it. Any OS
that wants to run on real OF already has to support it either way.

	Arnd
Bjorn Helgaas Feb. 9, 2016, 4:26 p.m. UTC | #10
On Tue, Feb 09, 2016 at 10:25:33AM +0100, Arnd Bergmann wrote:
> On Monday 08 February 2016 17:24:30 Bjorn Helgaas wrote:
> > > >
> > > >I assume your system conforms to expectations like these; I'm just
> > > >pointing them out because you mentioned buses with multiple devices on
> > > >them, which is definitely something one doesn't expect in PCIe.
> > > 
> > > The topology we have is currently working with the kernel's core PCI
> > > code.  I don't really want to get into discussing what the
> > > definition of PCIe is.  We have multiple devices (more than 32) on a
> > > single bus, and they have PCI Express and ARI Capabilities.  Is that
> > > PCIe?  I don't know.
> > 
> > I don't need to know the details of your topology.  As long as it
> > conforms to the PCIe spec, it should be fine.  If it *doesn't* conform
> > to the spec, but things currently seem to work, that's less fine,
> > because a future Linux change is liable to break something for you.
> > 
> > I was a little concerned about your statement that "there are multiple
> > devices residing on each bus, so from that point of view it cannot be
> > PCIe."  That made it sound like you're doing something outside the
> > spec.  If you're just using regular multi-function devices or ARI,
> > then I don't see any issue (or any reason to say it can't be PCIe).
> 
> It doesn't conform to the PCIe port spec, because there are no external
> ports but just integrated devices in the host bridge. 

Is there a spec section you have in mind?  Based on sec 1.3.1, I don't
think there's a requirement to have PCI Express Ports (is that what
you mean by "external ports"?)

Root Complex Integrated Endpoints (sec 1.3.2.3) are clearly supported
and they would not be behind a Root Port.  If you're using those, I
hope they're correctly identified via the PCIe capability Device/Port
Type (sec 7.8.2) because we rely on that type to figure out whether
the link-related registers are implemented.

The spec does include rules related to peer-to-peer transactions, MPS,
ASPM, error reporting, etc., and Linux relies on those, so I think it
would be important to get those right.

> For this special
> case, I don't think it matters at all from the point of view of the DT
> binding whether we call the node name "pci" or "pcie".

And the PCI core doesn't even know the node name, it doesn't matter
there either.

Bjorn
Arnd Bergmann Feb. 9, 2016, 4:31 p.m. UTC | #11
On Tuesday 09 February 2016 10:26:28 Bjorn Helgaas wrote:
> On Tue, Feb 09, 2016 at 10:25:33AM +0100, Arnd Bergmann wrote:
> > On Monday 08 February 2016 17:24:30 Bjorn Helgaas wrote:
> > > > >
> > > > >I assume your system conforms to expectations like these; I'm just
> > > > >pointing them out because you mentioned buses with multiple devices on
> > > > >them, which is definitely something one doesn't expect in PCIe.
> > > > 
> > > > The topology we have is currently working with the kernel's core PCI
> > > > code.  I don't really want to get into discussing what the
> > > > definition of PCIe is.  We have multiple devices (more than 32) on a
> > > > single bus, and they have PCI Express and ARI Capabilities.  Is that
> > > > PCIe?  I don't know.
> > > 
> > > I don't need to know the details of your topology.  As long as it
> > > conforms to the PCIe spec, it should be fine.  If it *doesn't* conform
> > > to the spec, but things currently seem to work, that's less fine,
> > > because a future Linux change is liable to break something for you.
> > > 
> > > I was a little concerned about your statement that "there are multiple
> > > devices residing on each bus, so from that point of view it cannot be
> > > PCIe."  That made it sound like you're doing something outside the
> > > spec.  If you're just using regular multi-function devices or ARI,
> > > then I don't see any issue (or any reason to say it can't be PCIe).
> > 
> > It doesn't conform to the PCIe port spec, because there are no external
> > ports but just integrated devices in the host bridge. 
> 
> Is there a spec section you have in mind?  Based on sec 1.3.1, I don't
> think there's a requirement to have PCI Express Ports (is that what
> you mean by "external ports"?)

No, I was just assuming that ports are specified in their own document,
which would not be followed here if there are none. There is nothing in
here that leads me to believe that the hardware is actually noncompliant
with any relevant standard.

> Root Complex Integrated Endpoints (sec 1.3.2.3) are clearly supported
> and they would not be behind a Root Port.  If you're using those, I
> hope they're correctly identified via the PCIe capability Device/Port
> Type (sec 7.8.2) because we rely on that type to figure out whether
> the link-related registers are implemented.
> 
> The spec does include rules related to peer-to-peer transactions, MPS,
> ASPM, error reporting, etc., and Linux relies on those, so I think it
> would be important to get those right.

David can probably explain more if the registers are compliant with
those parts of the spec.

	Arnd
David Daney Feb. 9, 2016, 4:58 p.m. UTC | #12
On 02/09/2016 08:31 AM, Arnd Bergmann wrote:
> On Tuesday 09 February 2016 10:26:28 Bjorn Helgaas wrote:
>> On Tue, Feb 09, 2016 at 10:25:33AM +0100, Arnd Bergmann wrote:
>>> On Monday 08 February 2016 17:24:30 Bjorn Helgaas wrote:
>>>>>>
>>>>>> I assume your system conforms to expectations like these; I'm just
>>>>>> pointing them out because you mentioned buses with multiple devices on
>>>>>> them, which is definitely something one doesn't expect in PCIe.
>>>>>
>>>>> The topology we have is currently working with the kernel's core PCI
>>>>> code.  I don't really want to get into discussing what the
>>>>> definition of PCIe is.  We have multiple devices (more than 32) on a
>>>>> single bus, and they have PCI Express and ARI Capabilities.  Is that
>>>>> PCIe?  I don't know.
>>>>
>>>> I don't need to know the details of your topology.  As long as it
>>>> conforms to the PCIe spec, it should be fine.  If it *doesn't* conform
>>>> to the spec, but things currently seem to work, that's less fine,
>>>> because a future Linux change is liable to break something for you.
>>>>
>>>> I was a little concerned about your statement that "there are multiple
>>>> devices residing on each bus, so from that point of view it cannot be
>>>> PCIe."  That made it sound like you're doing something outside the
>>>> spec.  If you're just using regular multi-function devices or ARI,
>>>> then I don't see any issue (or any reason to say it can't be PCIe).
>>>
>>> It doesn't conform to the PCIe port spec, because there are no external
>>> ports but just integrated devices in the host bridge.
>>
>> Is there a spec section you have in mind?  Based on sec 1.3.1, I don't
>> think there's a requirement to have PCI Express Ports (is that what
>> you mean by "external ports"?)
>
> No, I was just assuming that ports are specified in their own document,
> which would not be followed here if there are none. There is nothing in
> here that leads me to believe that the hardware is actually noncompliant
> with any relevant standard.
>
>> Root Complex Integrated Endpoints (sec 1.3.2.3) are clearly supported
>> and they would not be behind a Root Port.  If you're using those, I
>> hope they're correctly identified via the PCIe capability Device/Port
>> Type (sec 7.8.2) because we rely on that type to figure out whether
>> the link-related registers are implemented.
>>
>> The spec does include rules related to peer-to-peer transactions, MPS,
>> ASPM, error reporting, etc., and Linux relies on those, so I think it
>> would be important to get those right.
>
> David can probably explain more if the registers are compliant with
> those parts of the spec.

It is somewhat moot, but in the interest of keeping this thread alive:

None of the "on-chip" devices behind these root complexes implements 
ASPM, AER, etc.  The capability structures for all the features you 
mention are not present.

All that is there beyond standard PCI capabilities are:

   - PCI Express capability, to indicate presence of PCI Express 
Extended Capabilities.

   - ARI capability so we can fit more than 16 devices on a bus.

   - SRIOV capability on devices that are virtualizable.

   - That's it!

The reality is that they are not really PCI/PCIe devices at all.  All 
the device registers are at fixed addresses and are connected to 
proprietary internal buses with various weird properties.  Witness the 
need for Enhanced Allocation capabilities to describe the fixed addressing.

The PCI config space is a veneer laid on top of it all to aid in device 
discovery and interrupt routing.

So is it PCI or PCIe?  It is not really important to say.  All we want, 
is to be able to get the pci-host-generic root complex driver to bind to 
our ECAM/ECAM-like configuration space accessors.

David Daney



>
> 	Arnd
>
diff mbox

Patch

diff --git a/Documentation/devicetree/bindings/pci/pci-thunder-ecam.txt b/Documentation/devicetree/bindings/pci/pci-thunder-ecam.txt
new file mode 100644
index 0000000..34658f2
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/pci-thunder-ecam.txt
@@ -0,0 +1,30 @@ 
+* ThunderX PCI host controller for pass-1.x silicon
+
+Firmware-initialized PCI host controller to on-chip devices found on
+some Cavium ThunderX processors.  These devices have ECAM based config
+access, but the BARs are all at fixed addresses.  We handle the fixed
+addresses by synthesizing Enhanced Allocation (EA) capabilities for
+these devices.
+
+The properties and their meanings are identical to those described in
+host-generic-pci.txt except as listed below.
+
+Properties of the host controller node that differ from
+host-generic-pci.txt:
+
+- compatible     : Must be "cavium,pci-host-thunder-ecam"
+
+Example:
+
+	pci@84b0,00000000 {
+		compatible = "cavium,pci-host-thunder-ecam";
+		device_type = "pci";
+		msi-parent = <&its>;
+		msi-map = <0 &its 0x30000 0x10000>;
+		bus-range = <0 31>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		#stream-id-cells = <1>;
+		reg = <0x84b0 0x00000000 0 0x02000000>;  /* Configuration space */
+		ranges = <0x03000000 0x8180 0x00000000 0x8180 0x00000000 0x80 0x00000000>; /* mem ranges */
+	};
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 184df22..f8912c6 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -202,4 +202,11 @@  config PCI_HOST_THUNDER_PEM
 	help
 	  Say Y here if you want PCIe support for CN88XX Cavium Thunder SoCs.
 
+config PCI_HOST_THUNDER_ECAM
+	bool "Cavium Thunder ECAMe controller to on-chip devices on pass-1.x silicon"
+	depends on OF && ARM64
+	select PCI_HOST_COMMON
+	help
+	  Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs.
+
 endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 8903172..d6af3ba 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -23,4 +23,5 @@  obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o
 obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o
 obj-$(CONFIG_PCI_HISI) += pcie-hisi.o
 obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
+obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o
 obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o
diff --git a/drivers/pci/host/pci-thunder-ecam.c b/drivers/pci/host/pci-thunder-ecam.c
new file mode 100644
index 0000000..83ee590
--- /dev/null
+++ b/drivers/pci/host/pci-thunder-ecam.c
@@ -0,0 +1,358 @@ 
+/*
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/of_pci.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "pci-host-common.h"
+
+/* Mapping is standard ECAM */
+static void __iomem *thunder_ecam_map_bus(struct pci_bus *bus,
+					  unsigned int devfn,
+					  int where)
+{
+	struct gen_pci *pci = bus->sysdata;
+	resource_size_t idx = bus->number - pci->cfg.bus_range->start;
+
+	return pci->cfg.win[idx] + ((devfn << 12) | where);
+}
+
+static void set_val(u32 v, int where, int size, u32 *val)
+{
+	int shift = (where & 3) * 8;
+
+	pr_debug("set_val %04x: %08x\n", (unsigned)(where & ~3), v);
+	v >>= shift;
+	if (size == 1)
+		v &= 0xff;
+	else if (size == 2)
+		v &= 0xffff;
+	*val = v;
+}
+
+static int handle_ea_bar(u32 e0, int bar, struct pci_bus *bus,
+			 unsigned int devfn, int where, int size, u32 *val)
+{
+	void __iomem *addr;
+	u32 v;
+	/*
+	 * Each entry is 16-byte aligned bits[2,3] select which word
+	 * in the entry
+	 */
+	int where_a = where & 0xc;
+
+	if (where_a == 0) {
+		set_val(e0, where, size, val);
+		return PCIBIOS_SUCCESSFUL;
+	}
+	if (where_a == 0x4) {
+		addr = bus->ops->map_bus(bus, devfn, bar); /* BAR 0 */
+		if (!addr) {
+			*val = ~0;
+			return PCIBIOS_DEVICE_NOT_FOUND;
+		}
+		v = readl(addr);
+		v &= ~0xf;
+		v |= 2; /* EA entry-1. Base-L */
+		set_val(v, where, size, val);
+		return PCIBIOS_SUCCESSFUL;
+	}
+	if (where_a == 0x8) {
+		u32 barl_orig;
+		u32 barl_rb;
+
+		addr = bus->ops->map_bus(bus, devfn, bar); /* BAR 0 */
+		if (!addr) {
+			*val = ~0;
+			return PCIBIOS_DEVICE_NOT_FOUND;
+		}
+		barl_orig = readl(addr + 0);
+		writel(0xffffffff, addr + 0);
+		barl_rb = readl(addr + 0);
+		writel(barl_orig, addr + 0);
+		/* zeros in unsettable bits. */
+		v = ~barl_rb & ~3;
+		v |= 0xc; /* EA entry-2. Offset-L */
+		set_val(v, where, size, val);
+		return PCIBIOS_SUCCESSFUL;
+	}
+	if (where_a == 0xc) {
+		addr = bus->ops->map_bus(bus, devfn, bar + 4); /* BAR 1 */
+		if (!addr) {
+			*val = ~0;
+			return PCIBIOS_DEVICE_NOT_FOUND;
+		}
+		v = readl(addr); /* EA entry-3. Base-H */
+		set_val(v, where, size, val);
+		return PCIBIOS_SUCCESSFUL;
+	}
+	return PCIBIOS_DEVICE_NOT_FOUND;
+}
+
+static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn,
+				    int where, int size, u32 *val)
+{
+	u32 v;
+	u32 vendor_device;
+	void __iomem *addr;
+	int cfg_type;
+	int where_a = where & ~3;
+
+
+	addr = bus->ops->map_bus(bus, devfn, 0xc);
+	if (!addr) {
+		*val = ~0;
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+
+	v = readl(addr);
+
+	/* Check for non type-00 header. */
+	cfg_type = (v >> 16) & 0x7f;
+
+	/*
+	 * All BARs have fixed addresses specified by the EA
+	 * capability, they must return zero on read.
+	 */
+	if (cfg_type == 0 &&
+	    ((where >= 0x10 && where < 0x2c) ||
+	     (where >= 0x1a4 && where < 0x1bc))) {
+		/* BAR or SRIOV BAR */
+		*val = 0;
+		return PCIBIOS_SUCCESSFUL;
+	}
+
+	addr = bus->ops->map_bus(bus, devfn, 0);
+	if (!addr) {
+		*val = ~0;
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+
+	vendor_device = readl(addr);
+	if (vendor_device == 0xffffffff)
+		goto no_emulation;
+
+	addr = bus->ops->map_bus(bus, devfn, 8);
+	if (!addr) {
+		*val = ~0;
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+
+	v = readl(addr);
+	if (v == 0xffffffff)
+		goto no_emulation;
+
+	pr_debug("%04x:%04x - Fix pass#: %08x, where: %03x, devfn: %03x\n",
+		 vendor_device & 0xffff, vendor_device >> 16, v,
+		 (unsigned) where, devfn);
+
+	/* Check for non type-00 header. */
+	if (cfg_type == 0) {
+		bool has_msix;
+		bool is_nic = (vendor_device == 0xa01e177d);
+		bool is_tns = (vendor_device == 0xa01f177d);
+
+		addr = bus->ops->map_bus(bus, devfn, 0x70);
+		if (!addr) {
+			*val = ~0;
+			return PCIBIOS_DEVICE_NOT_FOUND;
+		}
+		/* E_CAP */
+		v = readl(addr);
+		has_msix = (v & 0xff00) != 0;
+
+		if (!has_msix && where_a == 0x70) {
+			v |= 0xbc00; /* next capability is EA at 0xbc */
+			set_val(v, where, size, val);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		if (where_a == 0xb0) {
+			addr = bus->ops->map_bus(bus, devfn, where_a);
+			if (!addr) {
+				*val = ~0;
+				return PCIBIOS_DEVICE_NOT_FOUND;
+			}
+			v = readl(addr);
+			if (v & 0xff00)
+				pr_err("Bad MSIX cap header: %08x\n", v);
+			v |= 0xbc00; /* next capability is EA at 0xbc */
+			set_val(v, where, size, val);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		if (where_a == 0xbc) {
+			if (is_nic)
+				v = 0x40014; /* EA last in chain, 4 entries. */
+			else if (is_tns)
+				v = 0x30014; /* EA last in chain, 3 entries. */
+			else if (has_msix)
+				v = 0x20014; /* EA last in chain, 2 entries. */
+			else
+				v = 0x10014; /* EA last in chain, 1 entry. */
+			set_val(v, where, size, val);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		if (where_a >= 0xc0 && where_a < 0xd0)
+			/* EA entry-0. PP=0, BAR0 Size:3 */
+			return handle_ea_bar(0x80ff0003,
+					     0x10, bus, devfn, where,
+					     size, val);
+		if (where_a >= 0xd0 && where_a < 0xe0 && has_msix)
+			 /* EA entry-1. PP=0, BAR4 Size:3 */
+			return handle_ea_bar(0x80ff0043,
+					     0x20, bus, devfn, where,
+					     size, val);
+		if (where_a >= 0xe0 && where_a < 0xf0 && is_tns)
+			/* EA entry-2. PP=0, BAR2, Size:3 */
+			return handle_ea_bar(0x80ff0023,
+					     0x18, bus, devfn, where,
+					     size, val);
+		if (where_a >= 0xe0 && where_a < 0xf0 && is_nic)
+			/* EA entry-2. PP=4, VF_BAR0 (9), Size:3 */
+			return handle_ea_bar(0x80ff0493,
+					     0x1a4, bus, devfn, where,
+					     size, val);
+		if (where_a >= 0xf0 && where_a < 0x100 && is_nic)
+			/* EA entry-3. PP=4, VF_BAR4 (d), Size:3 */
+			return handle_ea_bar(0x80ff04d3,
+					     0x1b4, bus, devfn, where,
+					     size, val);
+	} else if (cfg_type == 1) {
+		bool is_rsl_bridge = devfn == 0x08;
+		bool is_rad_bridge = devfn == 0xa0;
+		bool is_zip_bridge = devfn == 0xa8;
+		bool is_dfa_bridge = devfn == 0xb0;
+		bool is_nic_bridge = devfn == 0x10;
+
+		if (where_a == 0x70) {
+			addr = bus->ops->map_bus(bus, devfn, where_a);
+			if (!addr) {
+				*val = ~0;
+				return PCIBIOS_DEVICE_NOT_FOUND;
+			}
+			v = readl(addr);
+			if (v & 0xff00)
+				pr_err("Bad PCIe cap header: %08x\n", v);
+			v |= 0xbc00; /* next capability is EA at 0xbc */
+			set_val(v, where, size, val);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		if (where_a == 0xbc) {
+			if (is_nic_bridge)
+				v = 0x10014; /* EA last in chain, 1 entry */
+			else
+				v = 0x00014; /* EA last in chain, no entries */
+			set_val(v, where, size, val);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		if (where_a == 0xc0) {
+			if (is_rsl_bridge || is_nic_bridge)
+				v = 0x0101; /* subordinate:secondary = 1:1 */
+			else if (is_rad_bridge)
+				v = 0x0202; /* subordinate:secondary = 2:2 */
+			else if (is_zip_bridge)
+				v = 0x0303; /* subordinate:secondary = 3:3 */
+			else if (is_dfa_bridge)
+				v = 0x0404; /* subordinate:secondary = 4:4 */
+			set_val(v, where, size, val);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		if (where_a == 0xc4 && is_nic_bridge) {
+			/* Enabled, not-Write, SP=ff, PP=05, BEI=6, ES=4 */
+			v = 0x80ff0564;
+			set_val(v, where, size, val);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		if (where_a == 0xc8 && is_nic_bridge) {
+			v = 0x00000002; /* Base-L 64-bit */
+			set_val(v, where, size, val);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		if (where_a == 0xcc && is_nic_bridge) {
+			v = 0xfffffffe; /* MaxOffset-L 64-bit */
+			set_val(v, where, size, val);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		if (where_a == 0xd0 && is_nic_bridge) {
+			v = 0x00008430; /* NIC Base-H */
+			set_val(v, where, size, val);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		if (where_a == 0xd4 && is_nic_bridge) {
+			v = 0x0000000f; /* MaxOffset-H */
+			set_val(v, where, size, val);
+			return PCIBIOS_SUCCESSFUL;
+		}
+	}
+no_emulation:
+	return pci_generic_config_read(bus, devfn, where, size, val);
+}
+
+static int thunder_ecam_config_write(struct pci_bus *bus, unsigned int devfn,
+				     int where, int size, u32 val)
+{
+	/*
+	 * All BARs have fixed addresses, ignore BAR writes so they
+	 * don't get corrupted.
+	 */
+	if ((where >= 0x10 && where < 0x2c) ||
+	    (where >= 0x1a4 && where < 0x1bc))
+		/* BAR or SRIOV BAR */
+		return PCIBIOS_SUCCESSFUL;
+
+	return pci_generic_config_write(bus, devfn, where, size, val);
+}
+
+static struct gen_pci_cfg_bus_ops thunder_ecam_bus_ops = {
+	.bus_shift	= 20,
+	.ops		= {
+		.map_bus        = thunder_ecam_map_bus,
+		.read           = thunder_ecam_config_read,
+		.write          = thunder_ecam_config_write,
+	}
+};
+
+static const struct of_device_id thunder_ecam_of_match[] = {
+	{ .compatible = "cavium,pci-host-thunder-ecam",
+	  .data = &thunder_ecam_bus_ops },
+
+	{ },
+};
+MODULE_DEVICE_TABLE(of, thunder_ecam_of_match);
+
+static int thunder_ecam_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	const struct of_device_id *of_id;
+	struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+
+	if (!pci)
+		return -ENOMEM;
+
+	of_id = of_match_node(thunder_ecam_of_match, dev->of_node);
+	pci->cfg.ops = (struct gen_pci_cfg_bus_ops *)of_id->data;
+
+	return pci_host_common_probe(pdev, pci);
+}
+
+static struct platform_driver thunder_ecam_driver = {
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.of_match_table = thunder_ecam_of_match,
+	},
+	.probe = thunder_ecam_probe,
+};
+module_platform_driver(thunder_ecam_driver);
+
+MODULE_DESCRIPTION("Thunder ECAM PCI host driver");
+MODULE_LICENSE("GPL v2");