@@ -7,31 +7,51 @@
#include <asm/mmio.h>
-static pci_sbdf_t vpci_sbdf_from_gpa(const struct pci_host_bridge *bridge,
- paddr_t gpa)
+static bool vpci_sbdf_from_gpa(struct domain *d,
+ const struct pci_host_bridge *bridge,
+ paddr_t gpa, pci_sbdf_t *sbdf)
{
- pci_sbdf_t sbdf;
+ bool translated = true;
+
+ ASSERT(sbdf);
if ( bridge )
{
- sbdf.sbdf = VPCI_ECAM_BDF(gpa - bridge->cfg->phys_addr);
- sbdf.seg = bridge->segment;
- sbdf.bus += bridge->cfg->busn_start;
+ sbdf->sbdf = VPCI_ECAM_BDF(gpa - bridge->cfg->phys_addr);
+ sbdf->seg = bridge->segment;
+ sbdf->bus += bridge->cfg->busn_start;
}
else
- sbdf.sbdf = VPCI_ECAM_BDF(gpa - GUEST_VPCI_ECAM_BASE);
+ {
+ /*
+ * For the passed through devices we need to map their virtual SBDF
+ * to the physical PCI device being passed through.
+ */
+ sbdf->sbdf = VPCI_ECAM_BDF(gpa - GUEST_VPCI_ECAM_BASE);
+ read_lock(&d->pci_lock);
+ translated = vpci_translate_virtual_device(d, sbdf);
+ read_unlock(&d->pci_lock);
+ }
- return sbdf;
+ return translated;
}
static int vpci_mmio_read(struct vcpu *v, mmio_info_t *info,
register_t *r, void *p)
{
struct pci_host_bridge *bridge = p;
- pci_sbdf_t sbdf = vpci_sbdf_from_gpa(bridge, info->gpa);
+ pci_sbdf_t sbdf;
/* data is needed to prevent a pointer cast on 32bit */
unsigned long data;
+ ASSERT(!bridge == !is_hardware_domain(v->domain));
+
+ if ( !vpci_sbdf_from_gpa(v->domain, bridge, info->gpa, &sbdf) )
+ {
+ *r = ~0UL;
+ return 1;
+ }
+
if ( vpci_ecam_read(sbdf, ECAM_REG_OFFSET(info->gpa),
1U << info->dabt.size, &data) )
{
@@ -39,7 +59,7 @@ static int vpci_mmio_read(struct vcpu *v, mmio_info_t *info,
return 1;
}
- *r = ~0ul;
+ *r = ~0UL;
return 0;
}
@@ -48,7 +68,12 @@ static int vpci_mmio_write(struct vcpu *v, mmio_info_t *info,
register_t r, void *p)
{
struct pci_host_bridge *bridge = p;
- pci_sbdf_t sbdf = vpci_sbdf_from_gpa(bridge, info->gpa);
+ pci_sbdf_t sbdf;
+
+ ASSERT(!bridge == !is_hardware_domain(v->domain));
+
+ if ( !vpci_sbdf_from_gpa(v->domain, bridge, info->gpa, &sbdf) )
+ return 1;
return vpci_ecam_write(sbdf, ECAM_REG_OFFSET(info->gpa),
1U << info->dabt.size, r);
@@ -81,6 +81,30 @@ static int add_virtual_device(struct pci_dev *pdev)
return 0;
}
+/*
+ * Find the physical device which is mapped to the virtual device
+ * and translate virtual SBDF to the physical one.
+ */
+bool vpci_translate_virtual_device(const struct domain *d, pci_sbdf_t *sbdf)
+{
+ const struct pci_dev *pdev;
+
+ ASSERT(!is_hardware_domain(d));
+ ASSERT(rw_is_locked(&d->pci_lock));
+
+ for_each_pdev ( d, pdev )
+ {
+ if ( pdev->vpci && (pdev->vpci->guest_sbdf.sbdf == sbdf->sbdf) )
+ {
+ /* Replace guest SBDF with the physical one. */
+ *sbdf = pdev->sbdf;
+ return true;
+ }
+ }
+
+ return false;
+}
+
#endif /* CONFIG_HAS_VPCI_GUEST_SUPPORT */
void vpci_deassign_device(struct pci_dev *pdev)
@@ -302,6 +302,18 @@ static inline bool __must_check vpci_process_pending(struct vcpu *v)
}
#endif
+#ifdef CONFIG_HAS_VPCI_GUEST_SUPPORT
+bool vpci_translate_virtual_device(const struct domain *d, pci_sbdf_t *sbdf);
+#else
+static inline bool vpci_translate_virtual_device(const struct domain *d,
+ pci_sbdf_t *sbdf)
+{
+ ASSERT_UNREACHABLE();
+
+ return false;
+}
+#endif
+
#endif
/*