@@ -741,7 +741,7 @@ void hvm_domain_destroy(struct domain *d)
xfree(ioport);
}
- destroy_vpci_mmcfg(d);
+ hvm_ioreq_free_mmcfg(d);
}
static int hvm_save_tsc_adjust(struct vcpu *v, hvm_domain_context_t *h)
@@ -279,6 +279,18 @@ unsigned int hvm_pci_decode_addr(unsigned int cf8, unsigned int addr,
return CF8_ADDR_LO(cf8) | (addr & 3);
}
+unsigned int hvm_mmcfg_decode_addr(const struct hvm_mmcfg *mmcfg,
+ paddr_t addr, pci_sbdf_t *sbdf)
+{
+ addr -= mmcfg->addr;
+ sbdf->bdf = MMCFG_BDF(addr);
+ sbdf->bus += mmcfg->start_bus;
+ sbdf->seg = mmcfg->segment;
+
+ return addr & (PCI_CFG_SPACE_EXP_SIZE - 1);
+}
+
+
/* Do some sanity checks. */
static bool vpci_access_allowed(unsigned int reg, unsigned int len)
{
@@ -383,14 +395,6 @@ void register_vpci_portio_handler(struct domain *d)
handler->ops = &vpci_portio_ops;
}
-struct hvm_mmcfg {
- struct list_head next;
- paddr_t addr;
- unsigned int size;
- uint16_t segment;
- uint8_t start_bus;
-};
-
/* Handlers to trap PCI MMCFG config accesses. */
static const struct hvm_mmcfg *vpci_mmcfg_find(const struct domain *d,
paddr_t addr)
@@ -558,22 +562,6 @@ int register_vpci_mmcfg_handler(struct domain *d, paddr_t addr,
return 0;
}
-void destroy_vpci_mmcfg(struct domain *d)
-{
- struct list_head *mmcfg_regions = &d->arch.hvm.mmcfg_regions;
-
- write_lock(&d->arch.hvm.mmcfg_lock);
- while ( !list_empty(mmcfg_regions) )
- {
- struct hvm_mmcfg *mmcfg = list_first_entry(mmcfg_regions,
- struct hvm_mmcfg, next);
-
- list_del(&mmcfg->next);
- xfree(mmcfg);
- }
- write_unlock(&d->arch.hvm.mmcfg_lock);
-}
-
/*
* Local variables:
* mode: C
@@ -690,6 +690,22 @@ static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s)
rangeset_destroy(s->range[i]);
}
+void hvm_ioreq_free_mmcfg(struct domain *d)
+{
+ struct list_head *mmcfg_regions = &d->arch.hvm.mmcfg_regions;
+
+ write_lock(&d->arch.hvm.mmcfg_lock);
+ while ( !list_empty(mmcfg_regions) )
+ {
+ struct hvm_mmcfg *mmcfg = list_first_entry(mmcfg_regions,
+ struct hvm_mmcfg, next);
+
+ list_del(&mmcfg->next);
+ xfree(mmcfg);
+ }
+ write_unlock(&d->arch.hvm.mmcfg_lock);
+}
+
static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s,
ioservid_t id)
{
@@ -1329,6 +1345,19 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
}
+static const struct hvm_mmcfg *mmcfg_find(const struct domain *d,
+ paddr_t addr)
+{
+ const struct hvm_mmcfg *mmcfg;
+
+ list_for_each_entry ( mmcfg, &d->arch.hvm.mmcfg_regions, next )
+ if ( addr >= mmcfg->addr && addr < mmcfg->addr + mmcfg->size )
+ return mmcfg;
+
+ return NULL;
+}
+
+
struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
ioreq_t *p)
{
@@ -1338,27 +1367,34 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
uint64_t addr;
unsigned int id;
bool internal = true;
+ const struct hvm_mmcfg *mmcfg;
if ( p->type != IOREQ_TYPE_COPY && p->type != IOREQ_TYPE_PIO )
return NULL;
cf8 = d->arch.hvm.pci_cf8;
- if ( p->type == IOREQ_TYPE_PIO &&
- (p->addr & ~3) == 0xcfc &&
- CF8_ENABLED(cf8) )
+ read_lock(&d->arch.hvm.mmcfg_lock);
+ if ( (p->type == IOREQ_TYPE_PIO &&
+ (p->addr & ~3) == 0xcfc &&
+ CF8_ENABLED(cf8)) ||
+ (p->type == IOREQ_TYPE_COPY &&
+ (mmcfg = mmcfg_find(d, p->addr)) != NULL) )
{
uint32_t x86_fam;
pci_sbdf_t sbdf;
unsigned int reg;
- reg = hvm_pci_decode_addr(cf8, p->addr, &sbdf);
+ reg = p->type == IOREQ_TYPE_PIO ? hvm_pci_decode_addr(cf8, p->addr,
+ &sbdf)
+ : hvm_mmcfg_decode_addr(mmcfg, p->addr,
+ &sbdf);
/* PCI config data cycle */
type = XEN_DMOP_IO_RANGE_PCI;
addr = ((uint64_t)sbdf.sbdf << 32) | reg;
/* AMD extended configuration space access? */
- if ( CF8_ADDR_HI(cf8) &&
+ if ( p->type == IOREQ_TYPE_PIO && CF8_ADDR_HI(cf8) &&
d->arch.cpuid->x86_vendor == X86_VENDOR_AMD &&
(x86_fam = get_cpu_family(
d->arch.cpuid->basic.raw_fms, NULL, NULL)) > 0x10 &&
@@ -1377,6 +1413,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
XEN_DMOP_IO_RANGE_PORT : XEN_DMOP_IO_RANGE_MEMORY;
addr = p->addr;
}
+ read_unlock(&d->arch.hvm.mmcfg_lock);
retry:
FOR_EACH_IOREQ_SERVER(d, id, s)
@@ -1629,6 +1666,47 @@ void hvm_ioreq_init(struct domain *d)
register_portio_handler(d, 0xcf8, 4, hvm_access_cf8);
}
+int hvm_ioreq_register_mmcfg(struct domain *d, paddr_t addr,
+ unsigned int start_bus, unsigned int end_bus,
+ unsigned int seg)
+{
+ struct hvm_mmcfg *mmcfg, *new;
+
+ if ( start_bus > end_bus )
+ return -EINVAL;
+
+ new = xmalloc(struct hvm_mmcfg);
+ if ( !new )
+ return -ENOMEM;
+
+ new->addr = addr + (start_bus << 20);
+ new->start_bus = start_bus;
+ new->segment = seg;
+ new->size = (end_bus - start_bus + 1) << 20;
+
+ write_lock(&d->arch.hvm.mmcfg_lock);
+ list_for_each_entry ( mmcfg, &d->arch.hvm.mmcfg_regions, next )
+ if ( new->addr < mmcfg->addr + mmcfg->size &&
+ mmcfg->addr < new->addr + new->size )
+ {
+ int ret = -EEXIST;
+
+ if ( new->addr == mmcfg->addr &&
+ new->start_bus == mmcfg->start_bus &&
+ new->segment == mmcfg->segment &&
+ new->size == mmcfg->size )
+ ret = 0;
+ write_unlock(&d->arch.hvm.mmcfg_lock);
+ xfree(new);
+ return ret;
+ }
+
+ list_add(&new->next, &d->arch.hvm.mmcfg_regions);
+ write_unlock(&d->arch.hvm.mmcfg_lock);
+
+ return 0;
+}
+
/*
* Local variables:
* mode: C
@@ -165,9 +165,19 @@ void stdvga_deinit(struct domain *d);
extern void hvm_dpci_msi_eoi(struct domain *d, int vector);
-/* Decode a PCI port IO access into a bus/slot/func/reg. */
+struct hvm_mmcfg {
+ struct list_head next;
+ paddr_t addr;
+ unsigned int size;
+ uint16_t segment;
+ uint8_t start_bus;
+};
+
+/* Decode a PCI port IO or MMCFG access into a bus/slot/func/reg. */
unsigned int hvm_pci_decode_addr(unsigned int cf8, unsigned int addr,
pci_sbdf_t *sbdf);
+unsigned int hvm_mmcfg_decode_addr(const struct hvm_mmcfg *mmcfg,
+ paddr_t addr, pci_sbdf_t *sbdf);
/*
* HVM port IO handler that performs forwarding of guest IO ports into machine
@@ -58,6 +58,12 @@ void hvm_ioreq_init(struct domain *d);
int hvm_add_ioreq_handler(struct domain *d, ioservid_t id,
int (*handler)(struct vcpu *v, ioreq_t *));
+int hvm_ioreq_register_mmcfg(struct domain *d, paddr_t addr,
+ unsigned int start_bus, unsigned int end_bus,
+ unsigned int seg);
+
+void hvm_ioreq_free_mmcfg(struct domain *d);
+
#endif /* __ASM_X86_HVM_IOREQ_H__ */
/*
Pick up on the infrastructure already added for vPCI and allow ioreq to decode accesses to MMCFG regions registered for a domain. This infrastructure is still only accessible from internal callers, so MMCFG regions can only be registered from the internal domain builder used by PVH dom0. Note that the vPCI infrastructure to decode and handle accesses to MMCFG regions will be removed in following patches when vPCI is switched to become an internal ioreq server. Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> --- xen/arch/x86/hvm/hvm.c | 2 +- xen/arch/x86/hvm/io.c | 36 +++++--------- xen/arch/x86/hvm/ioreq.c | 88 +++++++++++++++++++++++++++++++-- xen/include/asm-x86/hvm/io.h | 12 ++++- xen/include/asm-x86/hvm/ioreq.h | 6 +++ 5 files changed, 113 insertions(+), 31 deletions(-)