@@ -1345,6 +1345,7 @@ static void pnv_ioda_release_vf_PE(struct pci_dev *pdev, u16 num_vfs)
vf_index < (vf_group + 1) * vf_per_group &&
vf_index < num_vfs;
vf_index++)
+
for (vf_index1 = vf_group * vf_per_group;
vf_index1 < (vf_group + 1) * vf_per_group &&
vf_index1 < num_vfs;
@@ -1363,9 +1364,20 @@ static void pnv_ioda_release_vf_PE(struct pci_dev *pdev, u16 num_vfs)
}
list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) {
+ struct pnv_ioda_pe *s, *sn;
if (pe->parent_dev != pdev)
continue;
+ if ((pe->flags & PNV_IODA_PE_MASTER) &&
+ (pe->flags & PNV_IODA_PE_VF)) {
+ list_for_each_entry_safe(s, sn, &pe->slaves, list) {
+ pnv_pci_ioda2_release_dma_pe(pdev, s);
+ list_del(&s->list);
+ pnv_ioda_deconfigure_pe(phb, s);
+ pnv_ioda_free_pe(phb, s->pe_number);
+ }
+ }
+
pnv_pci_ioda2_release_dma_pe(pdev, pe);
/* Remove from list */
@@ -1418,7 +1430,7 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
struct pci_bus *bus;
struct pci_controller *hose;
struct pnv_phb *phb;
- struct pnv_ioda_pe *pe;
+ struct pnv_ioda_pe *pe, *master_pe;
int pe_num;
u16 vf_index;
struct pci_dn *pdn;
@@ -1464,10 +1476,16 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
GFP_KERNEL, hose->node);
pe->tce32_table->data = pe;
- /* Put PE to the list */
- mutex_lock(&phb->ioda.pe_list_mutex);
- list_add_tail(&pe->list, &phb->ioda.pe_list);
- mutex_unlock(&phb->ioda.pe_list_mutex);
+ /*
+ * Put PE to the list,
+ * or postpone this if we have Compound PE
+ */
+ if ((pdn->m64_per_iov != M64_PER_IOV) ||
+ (num_vfs <= M64_PER_IOV)) {
+ mutex_lock(&phb->ioda.pe_list_mutex);
+ list_add_tail(&pe->list, &phb->ioda.pe_list);
+ mutex_unlock(&phb->ioda.pe_list_mutex);
+ }
pnv_pci_ioda2_setup_dma_pe(phb, pe);
}
@@ -1480,10 +1498,32 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
vf_per_group = roundup_pow_of_two(num_vfs) / pdn->m64_per_iov;
for (vf_group = 0; vf_group < M64_PER_IOV; vf_group++) {
+ master_pe = NULL;
+
for (vf_index = vf_group * vf_per_group;
vf_index < (vf_group + 1) * vf_per_group &&
vf_index < num_vfs;
vf_index++) {
+
+ /*
+ * Figure out the master PE and put all slave
+ * PEs to master PE's list.
+ */
+ pe = &phb->ioda.pe_array[pdn->offset + vf_index];
+ if (!master_pe) {
+ pe->flags |= PNV_IODA_PE_MASTER;
+ INIT_LIST_HEAD(&pe->slaves);
+ master_pe = pe;
+ mutex_lock(&phb->ioda.pe_list_mutex);
+ list_add_tail(&pe->list, &phb->ioda.pe_list);
+ mutex_unlock(&phb->ioda.pe_list_mutex);
+ } else {
+ pe->flags |= PNV_IODA_PE_SLAVE;
+ pe->master = master_pe;
+ list_add_tail(&pe->list,
+ &master_pe->slaves);
+ }
+
for (vf_index1 = vf_group * vf_per_group;
vf_index1 < (vf_group + 1) * vf_per_group &&
vf_index1 < num_vfs;
@@ -667,7 +667,7 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
struct pnv_phb *phb = hose->private_data;
#ifdef CONFIG_PCI_IOV
- struct pnv_ioda_pe *pe;
+ struct pnv_ioda_pe *pe, *slave;
struct pci_dn *pdn;
/* Fix the VF pdn PE number */
@@ -679,10 +679,23 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
(pdev->devfn & 0xff))) {
pdn->pe_number = pe->pe_number;
pe->pdev = pdev;
- break;
+ goto found;
+ }
+
+ if ((pe->flags & PNV_IODA_PE_MASTER) &&
+ (pe->flags & PNV_IODA_PE_VF)) {
+ list_for_each_entry(slave, &pe->slaves, list) {
+ if (slave->rid == ((pdev->bus->number << 8)
+ | (pdev->devfn & 0xff))) {
+ pdn->pe_number = slave->pe_number;
+ slave->pdev = pdev;
+ goto found;
+ }
+ }
}
}
}
+found:
#endif /* CONFIG_PCI_IOV */
if (phb && phb->dma_dev_setup)
When VF BAR size is larger than 64MB, we group VFs in terms of M64 BAR, which means those VFs in a group should form a compound PE. This patch links those VF PEs into compound PE in this case. Signed-off-by: Wei Yang <weiyang@linux.vnet.ibm.com> --- arch/powerpc/platforms/powernv/pci-ioda.c | 50 ++++++++++++++++++++++++++--- arch/powerpc/platforms/powernv/pci.c | 17 ++++++++-- 2 files changed, 60 insertions(+), 7 deletions(-)