@@ -247,12 +247,79 @@ static int dpci_portio_write(const struct hvm_io_handler *handler,
return X86EMUL_OKAY;
}
+static bool_t hw_dpci_portio_accept(const struct hvm_io_handler *handler,
+ const ioreq_t *p)
+{
+ if ( (p->addr == 0xcf8 && p->size == 4) || (p->addr & 0xfffc) == 0xcfc)
+ {
+ return 1;
+ }
+
+ return 0;
+}
+
+static int hw_dpci_portio_read(const struct hvm_io_handler *handler,
+ uint64_t addr,
+ uint32_t size,
+ uint64_t *data)
+{
+ struct domain *currd = current->domain;
+
+ if ( addr == 0xcf8 )
+ {
+ ASSERT(size == 4);
+ *data = currd->arch.pci_cf8;
+ return X86EMUL_OKAY;
+ }
+
+ ASSERT((addr & 0xfffc) == 0xcfc);
+ size = min(size, 4 - ((uint32_t)addr & 3));
+ if ( size == 3 )
+ size = 2;
+ if ( pci_cfg_ok(currd, addr & 3, size, NULL) )
+ *data = pci_conf_read(currd->arch.pci_cf8, addr & 3, size);
+
+ return X86EMUL_OKAY;
+}
+
+static int hw_dpci_portio_write(const struct hvm_io_handler *handler,
+ uint64_t addr,
+ uint32_t size,
+ uint64_t data)
+{
+ struct domain *currd = current->domain;
+ uint32_t data32;
+
+ if ( addr == 0xcf8 )
+ {
+ ASSERT(size == 4);
+ currd->arch.pci_cf8 = data;
+ return X86EMUL_OKAY;
+ }
+
+ ASSERT((addr & 0xfffc) == 0xcfc);
+ size = min(size, 4 - ((uint32_t)addr & 3));
+ if ( size == 3 )
+ size = 2;
+ data32 = data;
+ if ( pci_cfg_ok(currd, addr & 3, size, &data32) )
+ pci_conf_write(currd->arch.pci_cf8, addr & 3, size, data);
+
+ return X86EMUL_OKAY;
+}
+
static const struct hvm_io_ops dpci_portio_ops = {
.accept = dpci_portio_accept,
.read = dpci_portio_read,
.write = dpci_portio_write
};
+static const struct hvm_io_ops hw_dpci_portio_ops = {
+ .accept = hw_dpci_portio_accept,
+ .read = hw_dpci_portio_read,
+ .write = hw_dpci_portio_write
+};
+
void register_dpci_portio_handler(struct domain *d)
{
struct hvm_io_handler *handler = hvm_next_io_handler(d);
@@ -261,7 +328,10 @@ void register_dpci_portio_handler(struct domain *d)
return;
handler->type = IOREQ_TYPE_PIO;
- handler->ops = &dpci_portio_ops;
+ if ( is_hardware_domain(d) )
+ handler->ops = &hw_dpci_portio_ops;
+ else
+ handler->ops = &dpci_portio_ops;
}
/*
@@ -2031,45 +2031,6 @@ static bool_t admin_io_okay(unsigned int port, unsigned int bytes,
return ioports_access_permitted(d, port, port + bytes - 1);
}
-static bool_t pci_cfg_ok(struct domain *currd, unsigned int start,
- unsigned int size, uint32_t *write)
-{
- uint32_t machine_bdf;
-
- if ( !is_hardware_domain(currd) )
- return 0;
-
- if ( !CF8_ENABLED(currd->arch.pci_cf8) )
- return 1;
-
- machine_bdf = CF8_BDF(currd->arch.pci_cf8);
- if ( write )
- {
- const unsigned long *ro_map = pci_get_ro_map(0);
-
- if ( ro_map && test_bit(machine_bdf, ro_map) )
- return 0;
- }
- start |= CF8_ADDR_LO(currd->arch.pci_cf8);
- /* AMD extended configuration space access? */
- if ( CF8_ADDR_HI(currd->arch.pci_cf8) &&
- boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
- boot_cpu_data.x86 >= 0x10 && boot_cpu_data.x86 <= 0x17 )
- {
- uint64_t msr_val;
-
- if ( rdmsr_safe(MSR_AMD64_NB_CFG, msr_val) )
- return 0;
- if ( msr_val & (1ULL << AMD64_NB_CFG_CF8_EXT_ENABLE_BIT) )
- start |= CF8_ADDR_HI(currd->arch.pci_cf8);
- }
-
- return !write ?
- xsm_pci_config_permission(XSM_HOOK, currd, machine_bdf,
- start, start + size - 1, 0) == 0 :
- pci_conf_write_intercept(0, machine_bdf, start, size, write) >= 0;
-}
-
uint32_t guest_io_read(unsigned int port, unsigned int bytes,
struct domain *currd)
{
@@ -966,6 +966,45 @@ void pci_check_disable_device(u16 seg, u8 bus, u8 devfn)
PCI_COMMAND, cword & ~PCI_COMMAND_MASTER);
}
+bool_t pci_cfg_ok(struct domain *currd, unsigned int start,
+ unsigned int size, uint32_t *write)
+{
+ uint32_t machine_bdf;
+
+ if ( !is_hardware_domain(currd) )
+ return 0;
+
+ if ( !CF8_ENABLED(currd->arch.pci_cf8) )
+ return 1;
+
+ machine_bdf = CF8_BDF(currd->arch.pci_cf8);
+ if ( write )
+ {
+ const unsigned long *ro_map = pci_get_ro_map(0);
+
+ if ( ro_map && test_bit(machine_bdf, ro_map) )
+ return 0;
+ }
+ start |= CF8_ADDR_LO(currd->arch.pci_cf8);
+ /* AMD extended configuration space access? */
+ if ( CF8_ADDR_HI(currd->arch.pci_cf8) &&
+ boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ boot_cpu_data.x86 >= 0x10 && boot_cpu_data.x86 <= 0x17 )
+ {
+ uint64_t msr_val;
+
+ if ( rdmsr_safe(MSR_AMD64_NB_CFG, msr_val) )
+ return 0;
+ if ( msr_val & (1ULL << AMD64_NB_CFG_CF8_EXT_ENABLE_BIT) )
+ start |= CF8_ADDR_HI(currd->arch.pci_cf8);
+ }
+
+ return !write ?
+ xsm_pci_config_permission(XSM_HOOK, currd, machine_bdf,
+ start, start + size - 1, 0) == 0 :
+ pci_conf_write_intercept(0, machine_bdf, start, size, write) >= 0;
+}
+
/*
* scan pci devices to add all existed PCI devices to alldevs_list,
* and setup pci hierarchy in array bus2bridge.
@@ -162,6 +162,8 @@ const char *parse_pci(const char *, unsigned int *seg, unsigned int *bus,
bool_t pcie_aer_get_firmware_first(const struct pci_dev *);
+bool_t pci_cfg_ok(struct domain *, unsigned int, unsigned int, uint32_t *);
+
struct pirq;
int msixtbl_pt_register(struct domain *, struct pirq *, uint64_t gtable);
void msixtbl_pt_unregister(struct domain *, struct pirq *);
This is very similar to the PCI trap used for the traditional PV(H) Dom0. Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> --- Cc: Paul Durrant <paul.durrant@citrix.com> Cc: Jan Beulich <jbeulich@suse.com> Cc: Andrew Cooper <andrew.cooper3@citrix.com> --- xen/arch/x86/hvm/io.c | 72 ++++++++++++++++++++++++++++++++++++++++++- xen/arch/x86/traps.c | 39 ----------------------- xen/drivers/passthrough/pci.c | 39 +++++++++++++++++++++++ xen/include/xen/pci.h | 2 ++ 4 files changed, 112 insertions(+), 40 deletions(-)