@@ -2971,7 +2971,10 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
struct pci_bus_region region;
struct resource *res;
int i, index;
- int rc;
+ unsigned int segsize;
+ unsigned long *segmap, *pe_segmap;
+ uint16_t win;
+ int64_t rc;
/*
* NOTE: We only care PCI bus based PE for now. For PCI
@@ -2988,50 +2991,44 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
if (res->flags & IORESOURCE_IO) {
region.start = res->start - phb->ioda.io_pci_base;
region.end = res->end - phb->ioda.io_pci_base;
- index = region.start / phb->ioda.io_segsize;
-
- while (index < phb->ioda.total_pe &&
- region.start <= region.end) {
- set_bit(index, phb->ioda.io_segmap);
- set_bit(index, pe->io_segmap);
- rc = opal_pci_map_pe_mmio_window(phb->opal_id,
- pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
- if (rc != OPAL_SUCCESS) {
- pr_err("%s: OPAL error %d when mapping IO "
- "segment #%d to PE#%d\n",
- __func__, rc, index, pe->pe_number);
- break;
- }
-
- region.start += phb->ioda.io_segsize;
- index++;
- }
+ segsize = phb->ioda.io_segsize;
+ segmap = phb->ioda.io_segmap;
+ pe_segmap = pe->io_segmap;
+ win = OPAL_IO_WINDOW_TYPE;
} else if ((res->flags & IORESOURCE_MEM) &&
- !pnv_pci_is_mem_pref_64(res->flags)) {
+ !pnv_pci_is_mem_pref_64(res->flags)) {
region.start = res->start -
hose->mem_offset[0] -
phb->ioda.m32_pci_base;
region.end = res->end -
hose->mem_offset[0] -
phb->ioda.m32_pci_base;
- index = region.start / phb->ioda.m32_segsize;
+ segsize = phb->ioda.m32_segsize;
+ segmap = phb->ioda.m32_segmap;
+ pe_segmap = pe->m32_segmap;
+ win = OPAL_M32_WINDOW_TYPE;
+ } else {
+ continue;
+ }
- while (index < phb->ioda.total_pe &&
- region.start <= region.end) {
- set_bit(index, phb->ioda.m32_segmap);
- set_bit(index, pe->m32_segmap);
- rc = opal_pci_map_pe_mmio_window(phb->opal_id,
- pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
- if (rc != OPAL_SUCCESS) {
- pr_err("%s: OPAL error %d when mapping M32 "
- "segment#%d to PE#%d",
- __func__, rc, index, pe->pe_number);
- break;
- }
+ index = region.start / phb->ioda.io_segsize;
+ while (index < phb->ioda.total_pe &&
+ region.start <= region.end) {
+ set_bit(index, segmap);
+ set_bit(index, pe_segmap);
- region.start += phb->ioda.m32_segsize;
- index++;
+ rc = opal_pci_map_pe_mmio_window(phb->opal_id,
+ pe->pe_number, win, 0, index);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("%s: Error %lld mapping (%d) seg#%d to PHB#%d-PE#%d\n",
+ __func__, rc, win, index,
+ pe->phb->hose->global_number,
+ pe->pe_number);
+ break;
}
+
+ region.start += segsize;
+ index++;
}
}
}
The original implementation of pnv_ioda_setup_pe_seg() configures IO and M32 segments by separate logics, which can be merged by by caching @seg_bitmap, @seg_size, @win in advance. The patch shouldn't cause any behavioural changes. Signed-off-by: Gavin Shan <gwshan@linux.vnet.ibm.com> --- v5: * Split from PATCH[v4 04/21] * Fixed coding style complained by checkpatch.pl --- arch/powerpc/platforms/powernv/pci-ioda.c | 67 +++++++++++++++---------------- 1 file changed, 32 insertions(+), 35 deletions(-)