Message ID | 20210903083347.131786-10-andr2000@gmail.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | PCI devices passthrough on Arm, part 2 | expand |
Hi Oleksandr, On 03/09/2021 09:33, Oleksandr Andrushchenko wrote: > From: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> > > In order vPCI to work it needs all access to PCI configuration space > (ECAM) to be synchronized among all entities, e.g. hardware domain and > guests. I am not entirely sure what you mean by "synchronized" here. Are you refer to the content of the configuration space? > For that implement PCI host bridge specific callbacks to > properly setup those ranges depending on particular host bridge > implementation. > > Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> > --- > xen/arch/arm/pci/ecam.c | 11 +++++++++++ > xen/arch/arm/pci/pci-host-common.c | 16 ++++++++++++++++ > xen/arch/arm/vpci.c | 13 +++++++++++++ > xen/include/asm-arm/pci.h | 8 ++++++++ > 4 files changed, 48 insertions(+) > > diff --git a/xen/arch/arm/pci/ecam.c b/xen/arch/arm/pci/ecam.c > index 91c691b41fdf..92ecb2e0762b 100644 > --- a/xen/arch/arm/pci/ecam.c > +++ b/xen/arch/arm/pci/ecam.c > @@ -42,6 +42,16 @@ void __iomem *pci_ecam_map_bus(struct pci_host_bridge *bridge, > return base + (PCI_DEVFN(sbdf_t.dev, sbdf_t.fn) << devfn_shift) + where; > } > > +static int pci_ecam_register_mmio_handler(struct domain *d, > + struct pci_host_bridge *bridge, > + const struct mmio_handler_ops *ops) > +{ > + struct pci_config_window *cfg = bridge->sysdata; > + > + register_mmio_handler(d, ops, cfg->phys_addr, cfg->size, NULL); We have a fixed array for handling the MMIO handlers. So you need to make sure we have enough space in it to store one handler per handler. This is quite similar to the problem we had with the re-distribuor on GICv3. Have a look there to see how we dealt with it. > + return 0; > +} > + > /* ECAM ops */ > const struct pci_ecam_ops pci_generic_ecam_ops = { > .bus_shift = 20, > @@ -49,6 +59,7 @@ const struct pci_ecam_ops pci_generic_ecam_ops = { > .map_bus = pci_ecam_map_bus, > .read = pci_generic_config_read, > .write = pci_generic_config_write, > + .register_mmio_handler = pci_ecam_register_mmio_handler, > } > }; > > diff --git a/xen/arch/arm/pci/pci-host-common.c b/xen/arch/arm/pci/pci-host-common.c > index d2fef5476b8e..a89112bfbb7c 100644 > --- a/xen/arch/arm/pci/pci-host-common.c > +++ b/xen/arch/arm/pci/pci-host-common.c > @@ -318,6 +318,22 @@ struct dt_device_node *pci_find_host_bridge_node(struct device *dev) > } > return bridge->dt_node; > } > + > +int pci_host_iterate_bridges(struct domain *d, > + int (*clb)(struct domain *d, NIT: We tend to name callback variable 'cb'. Cheers,
Hi, Julien! On 09.09.21 20:43, Julien Grall wrote: > Hi Oleksandr, > > On 03/09/2021 09:33, Oleksandr Andrushchenko wrote: >> From: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> >> >> In order vPCI to work it needs all access to PCI configuration space >> (ECAM) to be synchronized among all entities, e.g. hardware domain and >> guests. > > I am not entirely sure what you mean by "synchronized" here. Are you refer to the content of the configuration space? We maintain hwdom's and guest's view of the registers we are interested in So, to have hwdom's view we also need to trap its access to the configuration space. Probably "synchronized" is not the right wording here. > >> For that implement PCI host bridge specific callbacks to >> properly setup those ranges depending on particular host bridge >> implementation. >> >> Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> >> --- >> xen/arch/arm/pci/ecam.c | 11 +++++++++++ >> xen/arch/arm/pci/pci-host-common.c | 16 ++++++++++++++++ >> xen/arch/arm/vpci.c | 13 +++++++++++++ >> xen/include/asm-arm/pci.h | 8 ++++++++ >> 4 files changed, 48 insertions(+) >> >> diff --git a/xen/arch/arm/pci/ecam.c b/xen/arch/arm/pci/ecam.c >> index 91c691b41fdf..92ecb2e0762b 100644 >> --- a/xen/arch/arm/pci/ecam.c >> +++ b/xen/arch/arm/pci/ecam.c >> @@ -42,6 +42,16 @@ void __iomem *pci_ecam_map_bus(struct pci_host_bridge *bridge, >> return base + (PCI_DEVFN(sbdf_t.dev, sbdf_t.fn) << devfn_shift) + where; >> } >> +static int pci_ecam_register_mmio_handler(struct domain *d, >> + struct pci_host_bridge *bridge, >> + const struct mmio_handler_ops *ops) >> +{ >> + struct pci_config_window *cfg = bridge->sysdata; >> + >> + register_mmio_handler(d, ops, cfg->phys_addr, cfg->size, NULL); > > We have a fixed array for handling the MMIO handlers. Didn't know that: xen/include/asm-arm/mmio.h:27:#define MAX_IO_HANDLER 16 > So you need to make sure we have enough space in it to store one handler per handler. > > This is quite similar to the problem we had with the re-distribuor on GICv3. Have a look there to see how we dealt with it. Could you please point me to that solution? I can only see /* Register mmio handle for the Distributor */ register_mmio_handler(d, &vgic_distr_mmio_handler, d->arch.vgic.dbase, SZ_64K, NULL); /* * Register mmio handler per contiguous region occupied by the * redistributors. The handler will take care to choose which * redistributor is targeted. */ for ( i = 0; i < d->arch.vgic.nr_regions; i++ ) { struct vgic_rdist_region *region = &d->arch.vgic.rdist_regions[i]; register_mmio_handler(d, &vgic_rdistr_mmio_handler, region->base, region->size, region); } which IMO doesn't care about the number of MMIOs we can handle > >> + return 0; >> +} >> + >> /* ECAM ops */ >> const struct pci_ecam_ops pci_generic_ecam_ops = { >> .bus_shift = 20, >> @@ -49,6 +59,7 @@ const struct pci_ecam_ops pci_generic_ecam_ops = { >> .map_bus = pci_ecam_map_bus, >> .read = pci_generic_config_read, >> .write = pci_generic_config_write, >> + .register_mmio_handler = pci_ecam_register_mmio_handler, >> } >> }; >> diff --git a/xen/arch/arm/pci/pci-host-common.c b/xen/arch/arm/pci/pci-host-common.c >> index d2fef5476b8e..a89112bfbb7c 100644 >> --- a/xen/arch/arm/pci/pci-host-common.c >> +++ b/xen/arch/arm/pci/pci-host-common.c >> @@ -318,6 +318,22 @@ struct dt_device_node *pci_find_host_bridge_node(struct device *dev) >> } >> return bridge->dt_node; >> } >> + >> +int pci_host_iterate_bridges(struct domain *d, >> + int (*clb)(struct domain *d, > > NIT: We tend to name callback variable 'cb'. Sure > > Cheers, > Thank you, Oleksandr
On 10/09/2021 12:43, Oleksandr Andrushchenko wrote: > Hi, Julien! Hi Oleksandr, > On 09.09.21 20:43, Julien Grall wrote: >> Hi Oleksandr, >> >> On 03/09/2021 09:33, Oleksandr Andrushchenko wrote: >>> From: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> >>> >>> In order vPCI to work it needs all access to PCI configuration space >>> (ECAM) to be synchronized among all entities, e.g. hardware domain and >>> guests. >> >> I am not entirely sure what you mean by "synchronized" here. Are you refer to the content of the configuration space? > > We maintain hwdom's and guest's view of the registers we are interested in > > So, to have hwdom's view we also need to trap its access to the configuration space. > > Probably "synchronized" is not the right wording here. I would simply say that we want to expose an emulated hostbridge to the dom0 so we need to unmap the configuration space. > >> >>> For that implement PCI host bridge specific callbacks to >>> properly setup those ranges depending on particular host bridge >>> implementation. >>> >>> Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> >>> --- >>> xen/arch/arm/pci/ecam.c | 11 +++++++++++ >>> xen/arch/arm/pci/pci-host-common.c | 16 ++++++++++++++++ >>> xen/arch/arm/vpci.c | 13 +++++++++++++ >>> xen/include/asm-arm/pci.h | 8 ++++++++ >>> 4 files changed, 48 insertions(+) >>> >>> diff --git a/xen/arch/arm/pci/ecam.c b/xen/arch/arm/pci/ecam.c >>> index 91c691b41fdf..92ecb2e0762b 100644 >>> --- a/xen/arch/arm/pci/ecam.c >>> +++ b/xen/arch/arm/pci/ecam.c >>> @@ -42,6 +42,16 @@ void __iomem *pci_ecam_map_bus(struct pci_host_bridge *bridge, >>> return base + (PCI_DEVFN(sbdf_t.dev, sbdf_t.fn) << devfn_shift) + where; >>> } >>> +static int pci_ecam_register_mmio_handler(struct domain *d, >>> + struct pci_host_bridge *bridge, >>> + const struct mmio_handler_ops *ops) >>> +{ >>> + struct pci_config_window *cfg = bridge->sysdata; >>> + >>> + register_mmio_handler(d, ops, cfg->phys_addr, cfg->size, NULL); >> >> We have a fixed array for handling the MMIO handlers. > > Didn't know that: > > xen/include/asm-arm/mmio.h:27:#define MAX_IO_HANDLER 16 > >> So you need to make sure we have enough space in it to store one handler per handler. >> >> This is quite similar to the problem we had with the re-distribuor on GICv3. Have a look there to see how we dealt with it. > > Could you please point me to that solution? I can only see > > /* Register mmio handle for the Distributor */ > register_mmio_handler(d, &vgic_distr_mmio_handler, d->arch.vgic.dbase, > SZ_64K, NULL); > > /* > * Register mmio handler per contiguous region occupied by the > * redistributors. The handler will take care to choose which > * redistributor is targeted. > */ > for ( i = 0; i < d->arch.vgic.nr_regions; i++ ) > { > struct vgic_rdist_region *region = &d->arch.vgic.rdist_regions[i]; > > register_mmio_handler(d, &vgic_rdistr_mmio_handler, > region->base, region->size, region); > } > which IMO doesn't care about the number of MMIOs we can handle Please see vgic_v3_init(). We update mmio_count that is then used as a the second argument for domain_io_init(). Cheers,
Hi, Julien! On 10.09.21 16:04, Julien Grall wrote: > > > On 10/09/2021 12:43, Oleksandr Andrushchenko wrote: >> Hi, Julien! > > Hi Oleksandr, > >> On 09.09.21 20:43, Julien Grall wrote: >>> Hi Oleksandr, >>> >>> On 03/09/2021 09:33, Oleksandr Andrushchenko wrote: >>>> From: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> >>>> >>>> In order vPCI to work it needs all access to PCI configuration space >>>> (ECAM) to be synchronized among all entities, e.g. hardware domain and >>>> guests. >>> >>> I am not entirely sure what you mean by "synchronized" here. Are you refer to the content of the configuration space? >> >> We maintain hwdom's and guest's view of the registers we are interested in >> >> So, to have hwdom's view we also need to trap its access to the configuration space. >> >> Probably "synchronized" is not the right wording here. > I would simply say that we want to expose an emulated hostbridge to the dom0 so we need to unmap the configuration space. Sounds good > >> >>> >>>> For that implement PCI host bridge specific callbacks to >>>> properly setup those ranges depending on particular host bridge >>>> implementation. >>>> >>>> Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> >>>> --- >>>> xen/arch/arm/pci/ecam.c | 11 +++++++++++ >>>> xen/arch/arm/pci/pci-host-common.c | 16 ++++++++++++++++ >>>> xen/arch/arm/vpci.c | 13 +++++++++++++ >>>> xen/include/asm-arm/pci.h | 8 ++++++++ >>>> 4 files changed, 48 insertions(+) >>>> >>>> diff --git a/xen/arch/arm/pci/ecam.c b/xen/arch/arm/pci/ecam.c >>>> index 91c691b41fdf..92ecb2e0762b 100644 >>>> --- a/xen/arch/arm/pci/ecam.c >>>> +++ b/xen/arch/arm/pci/ecam.c >>>> @@ -42,6 +42,16 @@ void __iomem *pci_ecam_map_bus(struct pci_host_bridge *bridge, >>>> return base + (PCI_DEVFN(sbdf_t.dev, sbdf_t.fn) << devfn_shift) + where; >>>> } >>>> +static int pci_ecam_register_mmio_handler(struct domain *d, >>>> + struct pci_host_bridge *bridge, >>>> + const struct mmio_handler_ops *ops) >>>> +{ >>>> + struct pci_config_window *cfg = bridge->sysdata; >>>> + >>>> + register_mmio_handler(d, ops, cfg->phys_addr, cfg->size, NULL); >>> >>> We have a fixed array for handling the MMIO handlers. >> >> Didn't know that: >> >> xen/include/asm-arm/mmio.h:27:#define MAX_IO_HANDLER 16 >> >>> So you need to make sure we have enough space in it to store one handler per handler. >>> >>> This is quite similar to the problem we had with the re-distribuor on GICv3. Have a look there to see how we dealt with it. >> >> Could you please point me to that solution? I can only see >> >> /* Register mmio handle for the Distributor */ >> register_mmio_handler(d, &vgic_distr_mmio_handler, d->arch.vgic.dbase, >> SZ_64K, NULL); >> >> /* >> * Register mmio handler per contiguous region occupied by the >> * redistributors. The handler will take care to choose which >> * redistributor is targeted. >> */ >> for ( i = 0; i < d->arch.vgic.nr_regions; i++ ) >> { >> struct vgic_rdist_region *region = &d->arch.vgic.rdist_regions[i]; >> >> register_mmio_handler(d, &vgic_rdistr_mmio_handler, >> region->base, region->size, region); >> } >> which IMO doesn't care about the number of MMIOs we can handle > > Please see vgic_v3_init(). We update mmio_count that is then used as a the second argument for domain_io_init(). Ah, so 1) This needs to be done before the array for the handlers is allocated 2) How do I know at the time of 1) how many bridges we have? > > Cheers, > Thank you, Oleksandr
On 10/09/2021 14:15, Oleksandr Andrushchenko wrote: > Hi, Julien! Hi, > On 10.09.21 16:04, Julien Grall wrote: >> >> >> On 10/09/2021 12:43, Oleksandr Andrushchenko wrote: >>> Hi, Julien! >> >> Hi Oleksandr, >> >>> On 09.09.21 20:43, Julien Grall wrote: >>>> Hi Oleksandr, >>>> >>>> On 03/09/2021 09:33, Oleksandr Andrushchenko wrote: >>>>> From: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> >>>>> >>>>> In order vPCI to work it needs all access to PCI configuration space >>>>> (ECAM) to be synchronized among all entities, e.g. hardware domain and >>>>> guests. >>>> >>>> I am not entirely sure what you mean by "synchronized" here. Are you refer to the content of the configuration space? >>> >>> We maintain hwdom's and guest's view of the registers we are interested in >>> >>> So, to have hwdom's view we also need to trap its access to the configuration space. >>> >>> Probably "synchronized" is not the right wording here. >> I would simply say that we want to expose an emulated hostbridge to the dom0 so we need to unmap the configuration space. > Sounds good >> >>> >>>> >>>>> For that implement PCI host bridge specific callbacks to >>>>> properly setup those ranges depending on particular host bridge >>>>> implementation. >>>>> >>>>> Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> >>>>> --- >>>>> xen/arch/arm/pci/ecam.c | 11 +++++++++++ >>>>> xen/arch/arm/pci/pci-host-common.c | 16 ++++++++++++++++ >>>>> xen/arch/arm/vpci.c | 13 +++++++++++++ >>>>> xen/include/asm-arm/pci.h | 8 ++++++++ >>>>> 4 files changed, 48 insertions(+) >>>>> >>>>> diff --git a/xen/arch/arm/pci/ecam.c b/xen/arch/arm/pci/ecam.c >>>>> index 91c691b41fdf..92ecb2e0762b 100644 >>>>> --- a/xen/arch/arm/pci/ecam.c >>>>> +++ b/xen/arch/arm/pci/ecam.c >>>>> @@ -42,6 +42,16 @@ void __iomem *pci_ecam_map_bus(struct pci_host_bridge *bridge, >>>>> return base + (PCI_DEVFN(sbdf_t.dev, sbdf_t.fn) << devfn_shift) + where; >>>>> } >>>>> +static int pci_ecam_register_mmio_handler(struct domain *d, >>>>> + struct pci_host_bridge *bridge, >>>>> + const struct mmio_handler_ops *ops) >>>>> +{ >>>>> + struct pci_config_window *cfg = bridge->sysdata; >>>>> + >>>>> + register_mmio_handler(d, ops, cfg->phys_addr, cfg->size, NULL); >>>> >>>> We have a fixed array for handling the MMIO handlers. >>> >>> Didn't know that: >>> >>> xen/include/asm-arm/mmio.h:27:#define MAX_IO_HANDLER 16 >>> >>>> So you need to make sure we have enough space in it to store one handler per handler. >>>> >>>> This is quite similar to the problem we had with the re-distribuor on GICv3. Have a look there to see how we dealt with it. >>> >>> Could you please point me to that solution? I can only see >>> >>> /* Register mmio handle for the Distributor */ >>> register_mmio_handler(d, &vgic_distr_mmio_handler, d->arch.vgic.dbase, >>> SZ_64K, NULL); >>> >>> /* >>> * Register mmio handler per contiguous region occupied by the >>> * redistributors. The handler will take care to choose which >>> * redistributor is targeted. >>> */ >>> for ( i = 0; i < d->arch.vgic.nr_regions; i++ ) >>> { >>> struct vgic_rdist_region *region = &d->arch.vgic.rdist_regions[i]; >>> >>> register_mmio_handler(d, &vgic_rdistr_mmio_handler, >>> region->base, region->size, region); >>> } >>> which IMO doesn't care about the number of MMIOs we can handle >> >> Please see vgic_v3_init(). We update mmio_count that is then used as a the second argument for domain_io_init(). > > Ah, so > > 1) This needs to be done before the array for the handlers is allocated > > 2) How do I know at the time of 1) how many bridges we have? By counting the number of bridge you want to expose to dom0? I am not entirely sure what else you expect me to say. Cheers,
Hi, On 10.09.21 16:20, Julien Grall wrote: > > > On 10/09/2021 14:15, Oleksandr Andrushchenko wrote: >> Hi, Julien! > > Hi, > >> On 10.09.21 16:04, Julien Grall wrote: >>> >>> >>> On 10/09/2021 12:43, Oleksandr Andrushchenko wrote: >>>> Hi, Julien! >>> >>> Hi Oleksandr, >>> >>>> On 09.09.21 20:43, Julien Grall wrote: >>>>> Hi Oleksandr, >>>>> >>>>> On 03/09/2021 09:33, Oleksandr Andrushchenko wrote: >>>>>> From: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> >>>>>> >>>>>> In order vPCI to work it needs all access to PCI configuration space >>>>>> (ECAM) to be synchronized among all entities, e.g. hardware domain and >>>>>> guests. >>>>> >>>>> I am not entirely sure what you mean by "synchronized" here. Are you refer to the content of the configuration space? >>>> >>>> We maintain hwdom's and guest's view of the registers we are interested in >>>> >>>> So, to have hwdom's view we also need to trap its access to the configuration space. >>>> >>>> Probably "synchronized" is not the right wording here. >>> I would simply say that we want to expose an emulated hostbridge to the dom0 so we need to unmap the configuration space. >> Sounds good >>> >>>> >>>>> >>>>>> For that implement PCI host bridge specific callbacks to >>>>>> properly setup those ranges depending on particular host bridge >>>>>> implementation. >>>>>> >>>>>> Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> >>>>>> --- >>>>>> xen/arch/arm/pci/ecam.c | 11 +++++++++++ >>>>>> xen/arch/arm/pci/pci-host-common.c | 16 ++++++++++++++++ >>>>>> xen/arch/arm/vpci.c | 13 +++++++++++++ >>>>>> xen/include/asm-arm/pci.h | 8 ++++++++ >>>>>> 4 files changed, 48 insertions(+) >>>>>> >>>>>> diff --git a/xen/arch/arm/pci/ecam.c b/xen/arch/arm/pci/ecam.c >>>>>> index 91c691b41fdf..92ecb2e0762b 100644 >>>>>> --- a/xen/arch/arm/pci/ecam.c >>>>>> +++ b/xen/arch/arm/pci/ecam.c >>>>>> @@ -42,6 +42,16 @@ void __iomem *pci_ecam_map_bus(struct pci_host_bridge *bridge, >>>>>> return base + (PCI_DEVFN(sbdf_t.dev, sbdf_t.fn) << devfn_shift) + where; >>>>>> } >>>>>> +static int pci_ecam_register_mmio_handler(struct domain *d, >>>>>> + struct pci_host_bridge *bridge, >>>>>> + const struct mmio_handler_ops *ops) >>>>>> +{ >>>>>> + struct pci_config_window *cfg = bridge->sysdata; >>>>>> + >>>>>> + register_mmio_handler(d, ops, cfg->phys_addr, cfg->size, NULL); >>>>> >>>>> We have a fixed array for handling the MMIO handlers. >>>> >>>> Didn't know that: >>>> >>>> xen/include/asm-arm/mmio.h:27:#define MAX_IO_HANDLER 16 >>>> >>>>> So you need to make sure we have enough space in it to store one handler per handler. >>>>> >>>>> This is quite similar to the problem we had with the re-distribuor on GICv3. Have a look there to see how we dealt with it. >>>> >>>> Could you please point me to that solution? I can only see >>>> >>>> /* Register mmio handle for the Distributor */ >>>> register_mmio_handler(d, &vgic_distr_mmio_handler, d->arch.vgic.dbase, >>>> SZ_64K, NULL); >>>> >>>> /* >>>> * Register mmio handler per contiguous region occupied by the >>>> * redistributors. The handler will take care to choose which >>>> * redistributor is targeted. >>>> */ >>>> for ( i = 0; i < d->arch.vgic.nr_regions; i++ ) >>>> { >>>> struct vgic_rdist_region *region = &d->arch.vgic.rdist_regions[i]; >>>> >>>> register_mmio_handler(d, &vgic_rdistr_mmio_handler, >>>> region->base, region->size, region); >>>> } >>>> which IMO doesn't care about the number of MMIOs we can handle >>> >>> Please see vgic_v3_init(). We update mmio_count that is then used as a the second argument for domain_io_init(). >> >> Ah, so >> >> 1) This needs to be done before the array for the handlers is allocated >> >> 2) How do I know at the time of 1) how many bridges we have? > > By counting the number of bridge you want to expose to dom0? I am not entirely sure what else you expect me to say. Ok, so I'll go over the device tree and find out all the bridges, e.g. devices with DEVICE_PCI type. Then I'll also need to exclude those being passed through (xen,passthrough) and the rest are the bridges for Domain-0? Is this what you mean? > > Cheers, > Thank you, Oleksandr
On 10/09/2021 14:27, Oleksandr Andrushchenko wrote: > Hi, > > On 10.09.21 16:20, Julien Grall wrote: >> >> >> On 10/09/2021 14:15, Oleksandr Andrushchenko wrote: >>> Hi, Julien! >> >> Hi, >> >>> On 10.09.21 16:04, Julien Grall wrote: >>>> >>>> >>>> On 10/09/2021 12:43, Oleksandr Andrushchenko wrote: >>>>> Hi, Julien! >>>> >>>> Hi Oleksandr, >>>> >>>>> On 09.09.21 20:43, Julien Grall wrote: >>>>>> Hi Oleksandr, >>>>>> >>>>>> On 03/09/2021 09:33, Oleksandr Andrushchenko wrote: >>>>>>> From: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> >>>>>>> >>>>>>> In order vPCI to work it needs all access to PCI configuration space >>>>>>> (ECAM) to be synchronized among all entities, e.g. hardware domain and >>>>>>> guests. >>>>>> >>>>>> I am not entirely sure what you mean by "synchronized" here. Are you refer to the content of the configuration space? >>>>> >>>>> We maintain hwdom's and guest's view of the registers we are interested in >>>>> >>>>> So, to have hwdom's view we also need to trap its access to the configuration space. >>>>> >>>>> Probably "synchronized" is not the right wording here. >>>> I would simply say that we want to expose an emulated hostbridge to the dom0 so we need to unmap the configuration space. >>> Sounds good >>>> >>>>> >>>>>> >>>>>>> For that implement PCI host bridge specific callbacks to >>>>>>> properly setup those ranges depending on particular host bridge >>>>>>> implementation. >>>>>>> >>>>>>> Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> >>>>>>> --- >>>>>>> xen/arch/arm/pci/ecam.c | 11 +++++++++++ >>>>>>> xen/arch/arm/pci/pci-host-common.c | 16 ++++++++++++++++ >>>>>>> xen/arch/arm/vpci.c | 13 +++++++++++++ >>>>>>> xen/include/asm-arm/pci.h | 8 ++++++++ >>>>>>> 4 files changed, 48 insertions(+) >>>>>>> >>>>>>> diff --git a/xen/arch/arm/pci/ecam.c b/xen/arch/arm/pci/ecam.c >>>>>>> index 91c691b41fdf..92ecb2e0762b 100644 >>>>>>> --- a/xen/arch/arm/pci/ecam.c >>>>>>> +++ b/xen/arch/arm/pci/ecam.c >>>>>>> @@ -42,6 +42,16 @@ void __iomem *pci_ecam_map_bus(struct pci_host_bridge *bridge, >>>>>>> return base + (PCI_DEVFN(sbdf_t.dev, sbdf_t.fn) << devfn_shift) + where; >>>>>>> } >>>>>>> +static int pci_ecam_register_mmio_handler(struct domain *d, >>>>>>> + struct pci_host_bridge *bridge, >>>>>>> + const struct mmio_handler_ops *ops) >>>>>>> +{ >>>>>>> + struct pci_config_window *cfg = bridge->sysdata; >>>>>>> + >>>>>>> + register_mmio_handler(d, ops, cfg->phys_addr, cfg->size, NULL); >>>>>> >>>>>> We have a fixed array for handling the MMIO handlers. >>>>> >>>>> Didn't know that: >>>>> >>>>> xen/include/asm-arm/mmio.h:27:#define MAX_IO_HANDLER 16 >>>>> >>>>>> So you need to make sure we have enough space in it to store one handler per handler. >>>>>> >>>>>> This is quite similar to the problem we had with the re-distribuor on GICv3. Have a look there to see how we dealt with it. >>>>> >>>>> Could you please point me to that solution? I can only see >>>>> >>>>> /* Register mmio handle for the Distributor */ >>>>> register_mmio_handler(d, &vgic_distr_mmio_handler, d->arch.vgic.dbase, >>>>> SZ_64K, NULL); >>>>> >>>>> /* >>>>> * Register mmio handler per contiguous region occupied by the >>>>> * redistributors. The handler will take care to choose which >>>>> * redistributor is targeted. >>>>> */ >>>>> for ( i = 0; i < d->arch.vgic.nr_regions; i++ ) >>>>> { >>>>> struct vgic_rdist_region *region = &d->arch.vgic.rdist_regions[i]; >>>>> >>>>> register_mmio_handler(d, &vgic_rdistr_mmio_handler, >>>>> region->base, region->size, region); >>>>> } >>>>> which IMO doesn't care about the number of MMIOs we can handle >>>> >>>> Please see vgic_v3_init(). We update mmio_count that is then used as a the second argument for domain_io_init(). >>> >>> Ah, so >>> >>> 1) This needs to be done before the array for the handlers is allocated >>> >>> 2) How do I know at the time of 1) how many bridges we have? >> >> By counting the number of bridge you want to expose to dom0? I am not entirely sure what else you expect me to say. > > Ok, so I'll go over the device tree and find out all the bridges, e.g. devices with DEVICE_PCI type. > > Then I'll also need to exclude those being passed through (xen,passthrough) and the rest are the bridges for Domain-0? What you want to know if how many times register_mmio_handler() will be called from domain_vpci_init(). You introduced a function pci_host_iterate_bridges() that will walk over the bridges and then call the callback vpci_setup_mmio_handler(). So you could introduce a new callback that will return 1 if bridge->ops->register_mmio_handler is not NULL or 0. Cheers,
On 10.09.21 16:33, Julien Grall wrote: > > > On 10/09/2021 14:27, Oleksandr Andrushchenko wrote: >> Hi, >> >> On 10.09.21 16:20, Julien Grall wrote: >>> >>> >>> On 10/09/2021 14:15, Oleksandr Andrushchenko wrote: >>>> Hi, Julien! >>> >>> Hi, >>> >>>> On 10.09.21 16:04, Julien Grall wrote: >>>>> >>>>> >>>>> On 10/09/2021 12:43, Oleksandr Andrushchenko wrote: >>>>>> Hi, Julien! >>>>> >>>>> Hi Oleksandr, >>>>> >>>>>> On 09.09.21 20:43, Julien Grall wrote: >>>>>>> Hi Oleksandr, >>>>>>> >>>>>>> On 03/09/2021 09:33, Oleksandr Andrushchenko wrote: >>>>>>>> From: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> >>>>>>>> >>>>>>>> In order vPCI to work it needs all access to PCI configuration space >>>>>>>> (ECAM) to be synchronized among all entities, e.g. hardware domain and >>>>>>>> guests. >>>>>>> >>>>>>> I am not entirely sure what you mean by "synchronized" here. Are you refer to the content of the configuration space? >>>>>> >>>>>> We maintain hwdom's and guest's view of the registers we are interested in >>>>>> >>>>>> So, to have hwdom's view we also need to trap its access to the configuration space. >>>>>> >>>>>> Probably "synchronized" is not the right wording here. >>>>> I would simply say that we want to expose an emulated hostbridge to the dom0 so we need to unmap the configuration space. >>>> Sounds good >>>>> >>>>>> >>>>>>> >>>>>>>> For that implement PCI host bridge specific callbacks to >>>>>>>> properly setup those ranges depending on particular host bridge >>>>>>>> implementation. >>>>>>>> >>>>>>>> Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> >>>>>>>> --- >>>>>>>> xen/arch/arm/pci/ecam.c | 11 +++++++++++ >>>>>>>> xen/arch/arm/pci/pci-host-common.c | 16 ++++++++++++++++ >>>>>>>> xen/arch/arm/vpci.c | 13 +++++++++++++ >>>>>>>> xen/include/asm-arm/pci.h | 8 ++++++++ >>>>>>>> 4 files changed, 48 insertions(+) >>>>>>>> >>>>>>>> diff --git a/xen/arch/arm/pci/ecam.c b/xen/arch/arm/pci/ecam.c >>>>>>>> index 91c691b41fdf..92ecb2e0762b 100644 >>>>>>>> --- a/xen/arch/arm/pci/ecam.c >>>>>>>> +++ b/xen/arch/arm/pci/ecam.c >>>>>>>> @@ -42,6 +42,16 @@ void __iomem *pci_ecam_map_bus(struct pci_host_bridge *bridge, >>>>>>>> return base + (PCI_DEVFN(sbdf_t.dev, sbdf_t.fn) << devfn_shift) + where; >>>>>>>> } >>>>>>>> +static int pci_ecam_register_mmio_handler(struct domain *d, >>>>>>>> + struct pci_host_bridge *bridge, >>>>>>>> + const struct mmio_handler_ops *ops) >>>>>>>> +{ >>>>>>>> + struct pci_config_window *cfg = bridge->sysdata; >>>>>>>> + >>>>>>>> + register_mmio_handler(d, ops, cfg->phys_addr, cfg->size, NULL); >>>>>>> >>>>>>> We have a fixed array for handling the MMIO handlers. >>>>>> >>>>>> Didn't know that: >>>>>> >>>>>> xen/include/asm-arm/mmio.h:27:#define MAX_IO_HANDLER 16 >>>>>> >>>>>>> So you need to make sure we have enough space in it to store one handler per handler. >>>>>>> >>>>>>> This is quite similar to the problem we had with the re-distribuor on GICv3. Have a look there to see how we dealt with it. >>>>>> >>>>>> Could you please point me to that solution? I can only see >>>>>> >>>>>> /* Register mmio handle for the Distributor */ >>>>>> register_mmio_handler(d, &vgic_distr_mmio_handler, d->arch.vgic.dbase, >>>>>> SZ_64K, NULL); >>>>>> >>>>>> /* >>>>>> * Register mmio handler per contiguous region occupied by the >>>>>> * redistributors. The handler will take care to choose which >>>>>> * redistributor is targeted. >>>>>> */ >>>>>> for ( i = 0; i < d->arch.vgic.nr_regions; i++ ) >>>>>> { >>>>>> struct vgic_rdist_region *region = &d->arch.vgic.rdist_regions[i]; >>>>>> >>>>>> register_mmio_handler(d, &vgic_rdistr_mmio_handler, >>>>>> region->base, region->size, region); >>>>>> } >>>>>> which IMO doesn't care about the number of MMIOs we can handle >>>>> >>>>> Please see vgic_v3_init(). We update mmio_count that is then used as a the second argument for domain_io_init(). >>>> >>>> Ah, so >>>> >>>> 1) This needs to be done before the array for the handlers is allocated >>>> >>>> 2) How do I know at the time of 1) how many bridges we have? >>> >>> By counting the number of bridge you want to expose to dom0? I am not entirely sure what else you expect me to say. >> >> Ok, so I'll go over the device tree and find out all the bridges, e.g. devices with DEVICE_PCI type. >> >> Then I'll also need to exclude those being passed through (xen,passthrough) and the rest are the bridges for Domain-0? > > What you want to know if how many times register_mmio_handler() will be called from domain_vpci_init(). > > You introduced a function pci_host_iterate_bridges() that will walk over the bridges and then call the callback vpci_setup_mmio_handler(). So you could introduce a new callback that will return 1 if bridge->ops->register_mmio_handler is not NULL or 0. Ok, clear. Something like: if ( (rc = domain_vgic_register(d, &count)) != 0 ) goto fail; *find out how many bridges and update count* if ( (rc = domain_io_init(d, count + MAX_IO_HANDLER)) != 0 ) goto fail; > > Cheers, > Thank you, Oleksandr
On Fri, 3 Sep 2021, Oleksandr Andrushchenko wrote: > From: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> > > In order vPCI to work it needs all access to PCI configuration space > (ECAM) to be synchronized among all entities, e.g. hardware domain and > guests. For that implement PCI host bridge specific callbacks to > properly setup those ranges depending on particular host bridge > implementation. > > Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> > --- > xen/arch/arm/pci/ecam.c | 11 +++++++++++ > xen/arch/arm/pci/pci-host-common.c | 16 ++++++++++++++++ > xen/arch/arm/vpci.c | 13 +++++++++++++ > xen/include/asm-arm/pci.h | 8 ++++++++ > 4 files changed, 48 insertions(+) > > diff --git a/xen/arch/arm/pci/ecam.c b/xen/arch/arm/pci/ecam.c > index 91c691b41fdf..92ecb2e0762b 100644 > --- a/xen/arch/arm/pci/ecam.c > +++ b/xen/arch/arm/pci/ecam.c > @@ -42,6 +42,16 @@ void __iomem *pci_ecam_map_bus(struct pci_host_bridge *bridge, > return base + (PCI_DEVFN(sbdf_t.dev, sbdf_t.fn) << devfn_shift) + where; > } > > +static int pci_ecam_register_mmio_handler(struct domain *d, > + struct pci_host_bridge *bridge, > + const struct mmio_handler_ops *ops) > +{ > + struct pci_config_window *cfg = bridge->sysdata; > + > + register_mmio_handler(d, ops, cfg->phys_addr, cfg->size, NULL); > + return 0; > +} Given that struct pci_config_window is generic (it is not specific to one bridge), I wonder if we even need the .register_mmio_handler callback here. In fact, pci_host_bridge->sysdata doesn't even need to be a void*, it could be a struct pci_config_window*, right? We could simply call: register_mmio_handler(d, ops, cfg->phys_addr, cfg->size, NULL); for each bridge directly from domain_vpci_init ? > /* ECAM ops */ > const struct pci_ecam_ops pci_generic_ecam_ops = { > .bus_shift = 20, > @@ -49,6 +59,7 @@ const struct pci_ecam_ops pci_generic_ecam_ops = { > .map_bus = pci_ecam_map_bus, > .read = pci_generic_config_read, > .write = pci_generic_config_write, > + .register_mmio_handler = pci_ecam_register_mmio_handler, > } > }; > > diff --git a/xen/arch/arm/pci/pci-host-common.c b/xen/arch/arm/pci/pci-host-common.c > index d2fef5476b8e..a89112bfbb7c 100644 > --- a/xen/arch/arm/pci/pci-host-common.c > +++ b/xen/arch/arm/pci/pci-host-common.c > @@ -318,6 +318,22 @@ struct dt_device_node *pci_find_host_bridge_node(struct device *dev) > } > return bridge->dt_node; > } > + > +int pci_host_iterate_bridges(struct domain *d, > + int (*clb)(struct domain *d, > + struct pci_host_bridge *bridge)) > +{ > + struct pci_host_bridge *bridge; > + int err; > + > + list_for_each_entry( bridge, &pci_host_bridges, node ) > + { > + err = clb(d, bridge); > + if ( err ) > + return err; > + } > + return 0; > +} > /* > * Local variables: > * mode: C > diff --git a/xen/arch/arm/vpci.c b/xen/arch/arm/vpci.c > index da8b1ca13c07..258134292458 100644 > --- a/xen/arch/arm/vpci.c > +++ b/xen/arch/arm/vpci.c > @@ -74,11 +74,24 @@ static const struct mmio_handler_ops vpci_mmio_handler = { > .write = vpci_mmio_write, > }; > > +static int vpci_setup_mmio_handler(struct domain *d, > + struct pci_host_bridge *bridge) > +{ > + if ( bridge->ops->register_mmio_handler ) > + return bridge->ops->register_mmio_handler(d, bridge, > + &vpci_mmio_handler); > + return 0; > +} > + > int domain_vpci_init(struct domain *d) > { > if ( !has_vpci(d) ) > return 0; > > + if ( is_hardware_domain(d) ) > + return pci_host_iterate_bridges(d, vpci_setup_mmio_handler); > + > + /* Guest domains use what is programmed in their device tree. */ > register_mmio_handler(d, &vpci_mmio_handler, > GUEST_VPCI_ECAM_BASE, GUEST_VPCI_ECAM_SIZE, NULL); > > diff --git a/xen/include/asm-arm/pci.h b/xen/include/asm-arm/pci.h > index 7dc4c8dc9026..2c7c7649e00f 100644 > --- a/xen/include/asm-arm/pci.h > +++ b/xen/include/asm-arm/pci.h > @@ -17,6 +17,8 @@ > #ifndef __ARM_PCI_H__ > #define __ARM_PCI_H__ > > +#include <asm/mmio.h> > + > #ifdef CONFIG_HAS_PCI > > #define pci_to_dev(pcidev) (&(pcidev)->arch.dev) > @@ -77,6 +79,9 @@ struct pci_ops { > uint32_t reg, uint32_t len, uint32_t *value); > int (*write)(struct pci_host_bridge *bridge, uint32_t sbdf, > uint32_t reg, uint32_t len, uint32_t value); > + int (*register_mmio_handler)(struct domain *d, > + struct pci_host_bridge *bridge, > + const struct mmio_handler_ops *ops); > }; > > /* > @@ -107,6 +112,9 @@ int pci_get_host_bridge_segment(const struct dt_device_node *node, > uint16_t *segment); > struct dt_device_node *pci_find_host_bridge_node(struct device *dev); > > +int pci_host_iterate_bridges(struct domain *d, > + int (*clb)(struct domain *d, > + struct pci_host_bridge *bridge)); > #else /*!CONFIG_HAS_PCI*/ > > struct arch_pci_dev { }; > -- > 2.25.1 >
>> What you want to know if how many times register_mmio_handler() will be called from domain_vpci_init(). >> >> You introduced a function pci_host_iterate_bridges() that will walk over the bridges and then call the callback vpci_setup_mmio_handler(). So you could introduce a new callback that will return 1 if bridge->ops->register_mmio_handler is not NULL or 0. > > Ok, clear. Something like: > > if ( (rc = domain_vgic_register(d, &count)) != 0 ) > goto fail; > > *find out how many bridges and update count* > > > if ( (rc = domain_io_init(d, count + MAX_IO_HANDLER)) != 0 ) > goto fail; > I have the following code now: int domain_vpci_get_num_mmio_handlers(struct domain *d) { int count; if ( is_hardware_domain(d) ) /* For each PCI host bridge's configuration space. */ count += pci_host_get_num_bridges(); else /* * VPCI_MSIX_MEM_NUM handlers for MSI-X tables per each PCI device * being passed through. Maximum number of supported devices * is 32 as virtual bus topology emulates the devices as embedded * endpoints. * +1 for a single emulated host bridge's configuration space. */ count = VPCI_MSIX_MEM_NUM * 32 + 1; return count; } Please note that we cannot tell how many PCIe devices are going to be passed through So, worst case for DomU is going to be 65 to what we already have... This sounds scary a bit as most probably we won't pass through 32 devices most of the time, but will make d->arch.vmmio.handlers almost 4 times bigger then it is now. This may have influence on the MMIO handlers performance... Thanks, Oleksandr
} >> >> +static int pci_ecam_register_mmio_handler(struct domain *d, >> + struct pci_host_bridge *bridge, >> + const struct mmio_handler_ops *ops) >> +{ >> + struct pci_config_window *cfg = bridge->sysdata; >> + >> + register_mmio_handler(d, ops, cfg->phys_addr, cfg->size, NULL); >> + return 0; >> +} > Given that struct pci_config_window is generic (it is not specific to > one bridge), I wonder if we even need the .register_mmio_handler > callback here. > > In fact, pci_host_bridge->sysdata doesn't even need to be a void*, it > could be a struct pci_config_window*, right? Rahul, this actually may change your series. Do you think we can do that? > > We could simply call: > > register_mmio_handler(d, ops, cfg->phys_addr, cfg->size, NULL); > > for each bridge directly from domain_vpci_init ? If Rahul changes the API then we can probably do that.
On Tue, 14 Sep 2021, Oleksandr Andrushchenko wrote: > >> What you want to know if how many times register_mmio_handler() will be called from domain_vpci_init(). > >> > >> You introduced a function pci_host_iterate_bridges() that will walk over the bridges and then call the callback vpci_setup_mmio_handler(). So you could introduce a new callback that will return 1 if bridge->ops->register_mmio_handler is not NULL or 0. > > > > Ok, clear. Something like: > > > > if ( (rc = domain_vgic_register(d, &count)) != 0 ) > > goto fail; > > > > *find out how many bridges and update count* > > > > > > if ( (rc = domain_io_init(d, count + MAX_IO_HANDLER)) != 0 ) > > goto fail; > > > I have the following code now: > > int domain_vpci_get_num_mmio_handlers(struct domain *d) > { > int count; count is incremented but not initialized > if ( is_hardware_domain(d) ) > /* For each PCI host bridge's configuration space. */ > count += pci_host_get_num_bridges(); > else > /* > * VPCI_MSIX_MEM_NUM handlers for MSI-X tables per each PCI device > * being passed through. Maximum number of supported devices > * is 32 as virtual bus topology emulates the devices as embedded > * endpoints. > * +1 for a single emulated host bridge's configuration space. */ > count = VPCI_MSIX_MEM_NUM * 32 + 1; > return count; > } > > Please note that we cannot tell how many PCIe devices are going to be passed through > > So, worst case for DomU is going to be 65 to what we already have... > > This sounds scary a bit as most probably we won't pass through 32 devices most of the > > time, but will make d->arch.vmmio.handlers almost 4 times bigger then it is now. > > This may have influence on the MMIO handlers performance... I am OK with that given that it doesn't affect performance until you actually start creating too many virtual devices for the DomU. In other words, find_mmio_handler restricts the search to vmmio->num_entries, so as long as most entries are allocated but unused, we should be fine.
On 15.09.21 03:25, Stefano Stabellini wrote: > On Tue, 14 Sep 2021, Oleksandr Andrushchenko wrote: >>>> What you want to know if how many times register_mmio_handler() will be called from domain_vpci_init(). >>>> >>>> You introduced a function pci_host_iterate_bridges() that will walk over the bridges and then call the callback vpci_setup_mmio_handler(). So you could introduce a new callback that will return 1 if bridge->ops->register_mmio_handler is not NULL or 0. >>> Ok, clear. Something like: >>> >>> if ( (rc = domain_vgic_register(d, &count)) != 0 ) >>> goto fail; >>> >>> *find out how many bridges and update count* >>> >>> >>> if ( (rc = domain_io_init(d, count + MAX_IO_HANDLER)) != 0 ) >>> goto fail; >>> >> I have the following code now: >> >> int domain_vpci_get_num_mmio_handlers(struct domain *d) >> { >> int count; > count is incremented but not initialized Excessive cleanup before sending ;) > > >> if ( is_hardware_domain(d) ) >> /* For each PCI host bridge's configuration space. */ >> count += pci_host_get_num_bridges(); >> else >> /* >> * VPCI_MSIX_MEM_NUM handlers for MSI-X tables per each PCI device >> * being passed through. Maximum number of supported devices >> * is 32 as virtual bus topology emulates the devices as embedded >> * endpoints. >> * +1 for a single emulated host bridge's configuration space. */ >> count = VPCI_MSIX_MEM_NUM * 32 + 1; >> return count; >> } >> >> Please note that we cannot tell how many PCIe devices are going to be passed through >> >> So, worst case for DomU is going to be 65 to what we already have... >> >> This sounds scary a bit as most probably we won't pass through 32 devices most of the >> >> time, but will make d->arch.vmmio.handlers almost 4 times bigger then it is now. >> >> This may have influence on the MMIO handlers performance... > I am OK with that given that it doesn't affect performance until you > actually start creating too many virtual devices for the DomU. In other > words, find_mmio_handler restricts the search to vmmio->num_entries, so > as long as most entries are allocated but unused, we should be fine. Ok, fine, so I'll have this change as above in v2. Thanks, Oleksandr
Hi, Rahul! On 14.09.21 17:24, Oleksandr Andrushchenko wrote: > > } >>> +static int pci_ecam_register_mmio_handler(struct domain *d, >>> + struct pci_host_bridge *bridge, >>> + const struct mmio_handler_ops *ops) >>> +{ >>> + struct pci_config_window *cfg = bridge->sysdata; >>> + >>> + register_mmio_handler(d, ops, cfg->phys_addr, cfg->size, NULL); >>> + return 0; >>> +} >> Given that struct pci_config_window is generic (it is not specific to >> one bridge), I wonder if we even need the .register_mmio_handler >> callback here. >> >> In fact, pci_host_bridge->sysdata doesn't even need to be a void*, it >> could be a struct pci_config_window*, right? > > Rahul, this actually may change your series. > > Do you think we can do that? > This is the only change requested that left unanswered by now. Will it be possible that you change the API accordingly, so I can implement as Stefano suggests? Thanks, Oleksandr
Hi Oleksandr, Stefano, > On 15 Sep 2021, at 6:30 am, Oleksandr Andrushchenko <Oleksandr_Andrushchenko@epam.com> wrote: > > Hi, Rahul! > > On 14.09.21 17:24, Oleksandr Andrushchenko wrote: >> >> } >>>> +static int pci_ecam_register_mmio_handler(struct domain *d, >>>> + struct pci_host_bridge *bridge, >>>> + const struct mmio_handler_ops *ops) >>>> +{ >>>> + struct pci_config_window *cfg = bridge->sysdata; >>>> + >>>> + register_mmio_handler(d, ops, cfg->phys_addr, cfg->size, NULL); >>>> + return 0; >>>> +} >>> Given that struct pci_config_window is generic (it is not specific to >>> one bridge), I wonder if we even need the .register_mmio_handler >>> callback here. >>> >>> In fact,pci_host_bridge->sysdata doesn't even need to be a void*, it >>> could be a struct pci_config_window*, right? >> >> Rahul, this actually may change your series. >> >> Do you think we can do that? >> > This is the only change requested that left unanswered by now. > > Will it be possible that you change the API accordingly, so I can > > implement as Stefano suggests? We need pci_host_bridge->sysdata as void* in case we need to implement the new non-ecam PCI controller in XEN. Please have a look once in Linux code[1] how bridge->sysdata will be used. struct pci_config_window is used only for ecam supported host controller. Different PCI host controller will have different PCI interface to access the PCI controller. [1] https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/pci/controller/pcie-rcar-host.c#n309 I think we need bridge->sysdata in future to implement the new PCI controller. Regards, Rahul > > Thanks, > > Oleksandr
On 15.09.21 13:45, Rahul Singh wrote: > Hi Oleksandr, Stefano, > >> On 15 Sep 2021, at 6:30 am, Oleksandr Andrushchenko <Oleksandr_Andrushchenko@epam.com> wrote: >> >> Hi, Rahul! >> >> On 14.09.21 17:24, Oleksandr Andrushchenko wrote: >>> } >>>>> +static int pci_ecam_register_mmio_handler(struct domain *d, >>>>> + struct pci_host_bridge *bridge, >>>>> + const struct mmio_handler_ops *ops) >>>>> +{ >>>>> + struct pci_config_window *cfg = bridge->sysdata; >>>>> + >>>>> + register_mmio_handler(d, ops, cfg->phys_addr, cfg->size, NULL); >>>>> + return 0; >>>>> +} >>>> Given that struct pci_config_window is generic (it is not specific to >>>> one bridge), I wonder if we even need the .register_mmio_handler >>>> callback here. >>>> >>>> In fact,pci_host_bridge->sysdata doesn't even need to be a void*, it >>>> could be a struct pci_config_window*, right? >>> Rahul, this actually may change your series. >>> >>> Do you think we can do that? >>> >> This is the only change requested that left unanswered by now. >> >> Will it be possible that you change the API accordingly, so I can >> >> implement as Stefano suggests? > We need pci_host_bridge->sysdata as void* in case we need to implement the new non-ecam PCI controller in XEN. > Please have a look once in Linux code[1] how bridge->sysdata will be used. struct pci_config_window is used only for > ecam supported host controller. Different PCI host controller will have different PCI interface to access the PCI controller. > > [1] https://urldefense.com/v3/__https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/pci/controller/pcie-rcar-host.c*n309__;Iw!!GF_29dbcQIUBPA!mbI_iuu-laYQoUn36kKf3z2H4AyxR4J8C59CcKb21pLldyVnDaKbgJSQhZ4liEnwnAe2uzK8OA$ [git[.]kernel[.]org] > > I think we need bridge->sysdata in future to implement the new PCI controller. > > Regards, > Rahul Stefano, does it sound reasonable then to keep the above code as is? > >> Thanks, >> >> Oleksandr
On Wed, 15 Sep 2021, Rahul Singh wrote: > Hi Oleksandr, Stefano, > > > On 15 Sep 2021, at 6:30 am, Oleksandr Andrushchenko <Oleksandr_Andrushchenko@epam.com> wrote: > > > > Hi, Rahul! > > > > On 14.09.21 17:24, Oleksandr Andrushchenko wrote: > >> > >> } > >>>> +static int pci_ecam_register_mmio_handler(struct domain *d, > >>>> + struct pci_host_bridge *bridge, > >>>> + const struct mmio_handler_ops *ops) > >>>> +{ > >>>> + struct pci_config_window *cfg = bridge->sysdata; > >>>> + > >>>> + register_mmio_handler(d, ops, cfg->phys_addr, cfg->size, NULL); > >>>> + return 0; > >>>> +} > >>> Given that struct pci_config_window is generic (it is not specific to > >>> one bridge), I wonder if we even need the .register_mmio_handler > >>> callback here. > >>> > >>> In fact,pci_host_bridge->sysdata doesn't even need to be a void*, it > >>> could be a struct pci_config_window*, right? > >> > >> Rahul, this actually may change your series. > >> > >> Do you think we can do that? > >> > > This is the only change requested that left unanswered by now. > > > > Will it be possible that you change the API accordingly, so I can > > > > implement as Stefano suggests? > > We need pci_host_bridge->sysdata as void* in case we need to implement the new non-ecam PCI controller in XEN. > Please have a look once in Linux code[1] how bridge->sysdata will be used. struct pci_config_window is used only for > ecam supported host controller. Different PCI host controller will have different PCI interface to access the PCI controller. > > [1] https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/pci/controller/pcie-rcar-host.c#n309 > > I think we need bridge->sysdata in future to implement the new PCI controller. In my opinion the pci_config_window is too important of an information to be left inside an opaque pointer, especially when the info under pci_config_window is both critical and vendor-neutral. My preference would be something like this: diff --git a/xen/include/asm-arm/pci.h b/xen/include/asm-arm/pci.h index 9c28a4bdc4..c80d846da3 100644 --- a/xen/include/asm-arm/pci.h +++ b/xen/include/asm-arm/pci.h @@ -55,7 +55,6 @@ struct pci_config_window { uint8_t busn_start; uint8_t busn_end; void __iomem *win; - const struct pci_ecam_ops *ops; }; /* @@ -68,7 +67,8 @@ struct pci_host_bridge { uint16_t segment; /* Segment number */ u8 bus_start; /* Bus start of this bridge. */ u8 bus_end; /* Bus end of this bridge. */ - void *sysdata; /* Pointer to the config space window*/ + struct pci_config_window* cfg; /* Pointer to the bridge config window */ + void *sysdata; /* Pointer to bridge private data */ const struct pci_ops *ops; }; As a reference the attached patch builds. However, I had to remove const where struct pci_ecam_ops *ops is used.
Hi, Rahul! On 15.09.21 23:33, Stefano Stabellini wrote: > On Wed, 15 Sep 2021, Rahul Singh wrote: >> Hi Oleksandr, Stefano, >> >>> On 15 Sep 2021, at 6:30 am, Oleksandr Andrushchenko <Oleksandr_Andrushchenko@epam.com> wrote: >>> >>> Hi, Rahul! >>> >>> On 14.09.21 17:24, Oleksandr Andrushchenko wrote: >>>> } >>>>>> +static int pci_ecam_register_mmio_handler(struct domain *d, >>>>>> + struct pci_host_bridge *bridge, >>>>>> + const struct mmio_handler_ops *ops) >>>>>> +{ >>>>>> + struct pci_config_window *cfg = bridge->sysdata; >>>>>> + >>>>>> + register_mmio_handler(d, ops, cfg->phys_addr, cfg->size, NULL); >>>>>> + return 0; >>>>>> +} >>>>> Given that struct pci_config_window is generic (it is not specific to >>>>> one bridge), I wonder if we even need the .register_mmio_handler >>>>> callback here. >>>>> >>>>> In fact,pci_host_bridge->sysdata doesn't even need to be a void*, it >>>>> could be a struct pci_config_window*, right? >>>> Rahul, this actually may change your series. >>>> >>>> Do you think we can do that? >>>> >>> This is the only change requested that left unanswered by now. >>> >>> Will it be possible that you change the API accordingly, so I can >>> >>> implement as Stefano suggests? >> We need pci_host_bridge->sysdata as void* in case we need to implement the new non-ecam PCI controller in XEN. >> Please have a look once in Linux code[1] how bridge->sysdata will be used. struct pci_config_window is used only for >> ecam supported host controller. Different PCI host controller will have different PCI interface to access the PCI controller. >> >> [1] https://urldefense.com/v3/__https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/pci/controller/pcie-rcar-host.c*n309__;Iw!!GF_29dbcQIUBPA!kjkv6KIlvXOEgVaS6BNPRo0gyABihhO0XmNHRPFJaFAGhhTEuK1mIsWqPs0cXEipzkT_MmA-Eg$ [git[.]kernel[.]org] >> >> I think we need bridge->sysdata in future to implement the new PCI controller. > In my opinion the pci_config_window is too important of an information > to be left inside an opaque pointer, especially when the info under > pci_config_window is both critical and vendor-neutral. > > My preference would be something like this: > > > diff --git a/xen/include/asm-arm/pci.h b/xen/include/asm-arm/pci.h > index 9c28a4bdc4..c80d846da3 100644 > --- a/xen/include/asm-arm/pci.h > +++ b/xen/include/asm-arm/pci.h > @@ -55,7 +55,6 @@ struct pci_config_window { > uint8_t busn_start; > uint8_t busn_end; > void __iomem *win; > - const struct pci_ecam_ops *ops; > }; > > /* > @@ -68,7 +67,8 @@ struct pci_host_bridge { > uint16_t segment; /* Segment number */ > u8 bus_start; /* Bus start of this bridge. */ > u8 bus_end; /* Bus end of this bridge. */ > - void *sysdata; /* Pointer to the config space window*/ > + struct pci_config_window* cfg; /* Pointer to the bridge config window */ > + void *sysdata; /* Pointer to bridge private data */ > const struct pci_ops *ops; > }; > > > As a reference the attached patch builds. However, I had to remove const > where struct pci_ecam_ops *ops is used. I'd like to know which route we go with this as this is now the last thing which stops me from sending v2 of this series. Thank you, Oleksandr
Hi Oleksandr, > On 17 Sep 2021, at 7:13 am, Oleksandr Andrushchenko <Oleksandr_Andrushchenko@epam.com> wrote: > > Hi, Rahul! > > On 15.09.21 23:33, Stefano Stabellini wrote: >> On Wed, 15 Sep 2021, Rahul Singh wrote: >>> Hi Oleksandr, Stefano, >>> >>>> On 15 Sep 2021, at 6:30 am, Oleksandr Andrushchenko <Oleksandr_Andrushchenko@epam.com> wrote: >>>> >>>> Hi, Rahul! >>>> >>>> On 14.09.21 17:24, Oleksandr Andrushchenko wrote: >>>>> } >>>>>>> +static int pci_ecam_register_mmio_handler(struct domain *d, >>>>>>> + struct pci_host_bridge *bridge, >>>>>>> + const struct mmio_handler_ops *ops) >>>>>>> +{ >>>>>>> + struct pci_config_window *cfg = bridge->sysdata; >>>>>>> + >>>>>>> + register_mmio_handler(d, ops, cfg->phys_addr, cfg->size, NULL); >>>>>>> + return 0; >>>>>>> +} >>>>>> Given that struct pci_config_window is generic (it is not specific to >>>>>> one bridge), I wonder if we even need the .register_mmio_handler >>>>>> callback here. >>>>>> >>>>>> In fact,pci_host_bridge->sysdata doesn't even need to be a void*, it >>>>>> could be a struct pci_config_window*, right? >>>>> Rahul, this actually may change your series. >>>>> >>>>> Do you think we can do that? >>>>> >>>> This is the only change requested that left unanswered by now. >>>> >>>> Will it be possible that you change the API accordingly, so I can >>>> >>>> implement as Stefano suggests? >>> We need pci_host_bridge->sysdata as void* in case we need to implement the new non-ecam PCI controller in XEN. >>> Please have a look once in Linux code[1] how bridge->sysdata will be used. struct pci_config_window is used only for >>> ecam supported host controller. Different PCI host controller will have different PCI interface to access the PCI controller. >>> >>> [1] https://urldefense.com/v3/__https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/pci/controller/pcie-rcar-host.c*n309__;Iw!!GF_29dbcQIUBPA!kjkv6KIlvXOEgVaS6BNPRo0gyABihhO0XmNHRPFJaFAGhhTEuK1mIsWqPs0cXEipzkT_MmA-Eg$ [git[.]kernel[.]org] >>> >>> I think we need bridge->sysdata in future to implement the new PCI controller. >> In my opinion the pci_config_window is too important of an information >> to be left inside an opaque pointer, especially when the info under >> pci_config_window is both critical and vendor-neutral. >> >> My preference would be something like this: >> >> >> diff --git a/xen/include/asm-arm/pci.h b/xen/include/asm-arm/pci.h >> index 9c28a4bdc4..c80d846da3 100644 >> --- a/xen/include/asm-arm/pci.h >> +++ b/xen/include/asm-arm/pci.h >> @@ -55,7 +55,6 @@ struct pci_config_window { >> uint8_t busn_start; >> uint8_t busn_end; >> void __iomem *win; >> - const struct pci_ecam_ops *ops; >> }; >> >> /* >> @@ -68,7 +67,8 @@ struct pci_host_bridge { >> uint16_t segment; /* Segment number */ >> u8 bus_start; /* Bus start of this bridge. */ >> u8 bus_end; /* Bus end of this bridge. */ >> - void *sysdata; /* Pointer to the config space window*/ >> + struct pci_config_window* cfg; /* Pointer to the bridge config window */ >> + void *sysdata; /* Pointer to bridge private data */ >> const struct pci_ops *ops; >> }; >> >> >> As a reference the attached patch builds. However, I had to remove const >> where struct pci_ecam_ops *ops is used. > > I'd like to know which route we go with this as this is now the last > > thing which stops me from sending v2 of this series. I will modify the code as per Stefano request and will send the next version. Regards, Rahul > > Thank you, > > Oleksandr
diff --git a/xen/arch/arm/pci/ecam.c b/xen/arch/arm/pci/ecam.c index 91c691b41fdf..92ecb2e0762b 100644 --- a/xen/arch/arm/pci/ecam.c +++ b/xen/arch/arm/pci/ecam.c @@ -42,6 +42,16 @@ void __iomem *pci_ecam_map_bus(struct pci_host_bridge *bridge, return base + (PCI_DEVFN(sbdf_t.dev, sbdf_t.fn) << devfn_shift) + where; } +static int pci_ecam_register_mmio_handler(struct domain *d, + struct pci_host_bridge *bridge, + const struct mmio_handler_ops *ops) +{ + struct pci_config_window *cfg = bridge->sysdata; + + register_mmio_handler(d, ops, cfg->phys_addr, cfg->size, NULL); + return 0; +} + /* ECAM ops */ const struct pci_ecam_ops pci_generic_ecam_ops = { .bus_shift = 20, @@ -49,6 +59,7 @@ const struct pci_ecam_ops pci_generic_ecam_ops = { .map_bus = pci_ecam_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, + .register_mmio_handler = pci_ecam_register_mmio_handler, } }; diff --git a/xen/arch/arm/pci/pci-host-common.c b/xen/arch/arm/pci/pci-host-common.c index d2fef5476b8e..a89112bfbb7c 100644 --- a/xen/arch/arm/pci/pci-host-common.c +++ b/xen/arch/arm/pci/pci-host-common.c @@ -318,6 +318,22 @@ struct dt_device_node *pci_find_host_bridge_node(struct device *dev) } return bridge->dt_node; } + +int pci_host_iterate_bridges(struct domain *d, + int (*clb)(struct domain *d, + struct pci_host_bridge *bridge)) +{ + struct pci_host_bridge *bridge; + int err; + + list_for_each_entry( bridge, &pci_host_bridges, node ) + { + err = clb(d, bridge); + if ( err ) + return err; + } + return 0; +} /* * Local variables: * mode: C diff --git a/xen/arch/arm/vpci.c b/xen/arch/arm/vpci.c index da8b1ca13c07..258134292458 100644 --- a/xen/arch/arm/vpci.c +++ b/xen/arch/arm/vpci.c @@ -74,11 +74,24 @@ static const struct mmio_handler_ops vpci_mmio_handler = { .write = vpci_mmio_write, }; +static int vpci_setup_mmio_handler(struct domain *d, + struct pci_host_bridge *bridge) +{ + if ( bridge->ops->register_mmio_handler ) + return bridge->ops->register_mmio_handler(d, bridge, + &vpci_mmio_handler); + return 0; +} + int domain_vpci_init(struct domain *d) { if ( !has_vpci(d) ) return 0; + if ( is_hardware_domain(d) ) + return pci_host_iterate_bridges(d, vpci_setup_mmio_handler); + + /* Guest domains use what is programmed in their device tree. */ register_mmio_handler(d, &vpci_mmio_handler, GUEST_VPCI_ECAM_BASE, GUEST_VPCI_ECAM_SIZE, NULL); diff --git a/xen/include/asm-arm/pci.h b/xen/include/asm-arm/pci.h index 7dc4c8dc9026..2c7c7649e00f 100644 --- a/xen/include/asm-arm/pci.h +++ b/xen/include/asm-arm/pci.h @@ -17,6 +17,8 @@ #ifndef __ARM_PCI_H__ #define __ARM_PCI_H__ +#include <asm/mmio.h> + #ifdef CONFIG_HAS_PCI #define pci_to_dev(pcidev) (&(pcidev)->arch.dev) @@ -77,6 +79,9 @@ struct pci_ops { uint32_t reg, uint32_t len, uint32_t *value); int (*write)(struct pci_host_bridge *bridge, uint32_t sbdf, uint32_t reg, uint32_t len, uint32_t value); + int (*register_mmio_handler)(struct domain *d, + struct pci_host_bridge *bridge, + const struct mmio_handler_ops *ops); }; /* @@ -107,6 +112,9 @@ int pci_get_host_bridge_segment(const struct dt_device_node *node, uint16_t *segment); struct dt_device_node *pci_find_host_bridge_node(struct device *dev); +int pci_host_iterate_bridges(struct domain *d, + int (*clb)(struct domain *d, + struct pci_host_bridge *bridge)); #else /*!CONFIG_HAS_PCI*/ struct arch_pci_dev { };