@@ -864,6 +864,7 @@ static int hvm_pt_add_register(struct hvm_pt_device *dev,
static struct hvm_pt_handler_init *hwdom_pt_handlers[] = {
&hvm_pt_bar_init,
&hvm_pt_vf_bar_init,
+ &hvm_pt_msi_init,
};
int hwdom_add_device(struct pci_dev *pdev)
@@ -931,6 +932,64 @@ int hwdom_add_device(struct pci_dev *pdev)
return 0;
}
+/* Generic handlers for HVM PCI pass-through. */
+int hvm_pt_common_reg_init(struct hvm_pt_device *s,
+ struct hvm_pt_reg_handler *handler,
+ uint32_t real_offset, uint32_t *data)
+{
+ *data = handler->init_val;
+ return 0;
+}
+
+int hvm_pt_word_reg_read(struct hvm_pt_device *s, struct hvm_pt_reg *reg,
+ uint16_t *value, uint16_t valid_mask)
+{
+ struct hvm_pt_reg_handler *handler = reg->handler;
+ uint16_t valid_emu_mask = 0;
+ uint16_t *data = ®->val.word;
+
+ /* emulate word register */
+ valid_emu_mask = handler->emu_mask & valid_mask;
+ *value = HVM_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask);
+
+ return 0;
+}
+
+int hvm_pt_long_reg_read(struct hvm_pt_device *s, struct hvm_pt_reg *reg,
+ uint32_t *value, uint32_t valid_mask)
+{
+ struct hvm_pt_reg_handler *handler = reg->handler;
+ uint32_t valid_emu_mask = 0;
+ uint32_t *data = ®->val.dword;
+
+ /* emulate long register */
+ valid_emu_mask = handler->emu_mask & valid_mask;
+ *value = HVM_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask);
+
+ return 0;
+}
+
+int hvm_pt_long_reg_write(struct hvm_pt_device *s, struct hvm_pt_reg *reg,
+ uint32_t *val, uint32_t dev_value,
+ uint32_t valid_mask)
+{
+ struct hvm_pt_reg_handler *handler = reg->handler;
+ uint32_t writable_mask = 0;
+ uint32_t throughable_mask = hvm_pt_get_throughable_mask(s, handler,
+ valid_mask);
+ uint32_t *data = ®->val.dword;
+
+ /* modify emulate register */
+ writable_mask = handler->emu_mask & ~handler->ro_mask & valid_mask;
+ *data = HVM_PT_MERGE_VALUE(*val, *data, writable_mask);
+
+ /* create value for writing to I/O device register */
+ *val = HVM_PT_MERGE_VALUE(*val, dev_value & ~handler->rw1c_mask,
+ throughable_mask);
+
+ return 0;
+}
+
static const struct hvm_io_ops dpci_portio_ops = {
.accept = dpci_portio_accept,
.read = dpci_portio_read,
@@ -624,3 +624,541 @@ void msix_write_completion(struct vcpu *v)
if ( msixtbl_write(v, ctrl_address, 4, 0) != X86EMUL_OKAY )
gdprintk(XENLOG_WARNING, "MSI-X write completion failure\n");
}
+
+/* MSI emulation. */
+
+/* Helper to check supported MSI features. */
+#define vmsi_check_type(offset, flags, what) \
+ ((offset) == ((flags) & PCI_MSI_FLAGS_64BIT ? \
+ PCI_MSI_##what##_64 : PCI_MSI_##what##_32))
+
+static inline uint64_t msi_addr64(struct hvm_pt_msi *msi)
+{
+ return (uint64_t)msi->addr_hi << 32 | msi->addr_lo;
+}
+
+/* Helper for updating a PIRQ-vMSI bind. */
+static int vmsi_update_bind(struct hvm_pt_msi *msi)
+{
+ xen_domctl_bind_pt_irq_t bind;
+ struct hvm_pt_device *s = container_of(msi, struct hvm_pt_device, msi);
+ int rc;
+
+ ASSERT(msi->pirq != -1);
+
+ bind.hvm_domid = DOMID_SELF;
+ bind.machine_irq = msi->pirq;
+ bind.irq_type = PT_IRQ_TYPE_MSI;
+ bind.u.msi.gvec = msi_vector(msi->data);
+ bind.u.msi.gflags = msi_gflags(msi->data, msi_addr64(msi));
+ bind.u.msi.gtable = 0;
+
+ pcidevs_lock();
+ rc = pt_irq_create_bind(current->domain, &bind);
+ pcidevs_unlock();
+ if ( rc )
+ {
+ printk_pdev(s->pdev, XENLOG_ERR,
+ "updating of MSI failed. (err: %d)\n", rc);
+ rc = physdev_unmap_pirq(DOMID_SELF, msi->pirq);
+ if ( rc )
+ printk_pdev(s->pdev, XENLOG_ERR,
+ "unmapping of MSI pirq %d failed. (err: %i)\n",
+ msi->pirq, rc);
+ msi->pirq = -1;
+ msi->mapped = false;
+ msi->initialized = false;
+ return rc;
+ }
+
+ return 0;
+}
+
+/* Handlers. */
+
+/* Message Control register */
+static int vmsi_msgctrl_reg_init(struct hvm_pt_device *s,
+ struct hvm_pt_reg_handler *handler,
+ uint32_t real_offset, uint32_t *data)
+{
+ struct hvm_pt_msi *msi = &s->msi;
+ struct pci_dev *pdev = s->pdev;
+ uint16_t reg_field;
+ uint8_t seg, bus, slot, func;
+
+ seg = pdev->seg;
+ bus = pdev->bus;
+ slot = PCI_SLOT(pdev->devfn);
+ func = PCI_FUNC(pdev->devfn);
+
+ /* Use I/O device register's value as initial value */
+ reg_field = pci_conf_read16(seg, bus, slot, func, real_offset);
+ if ( reg_field & PCI_MSI_FLAGS_ENABLE )
+ {
+ printk_pdev(pdev, XENLOG_INFO,
+ "MSI already enabled, disabling it first\n");
+ reg_field &= ~PCI_MSI_FLAGS_ENABLE;
+ pci_conf_write16(seg, bus, slot, func, real_offset, reg_field);
+ }
+ msi->flags |= reg_field;
+ msi->ctrl_offset = real_offset;
+ msi->initialized = false;
+ msi->mapped = false;
+
+ *data = handler->init_val | (reg_field & ~PCI_MSI_FLAGS_QMASK);
+ return 0;
+}
+
+static int vmsi_msgctrl_reg_write(struct hvm_pt_device *s,
+ struct hvm_pt_reg *reg, uint16_t *val,
+ uint16_t dev_value, uint16_t valid_mask)
+{
+ struct hvm_pt_reg_handler *handler = reg->handler;
+ struct hvm_pt_msi *msi = &s->msi;
+ uint16_t writable_mask = 0;
+ uint16_t throughable_mask = hvm_pt_get_throughable_mask(s, handler,
+ valid_mask);
+ uint16_t *data = ®->val.word;
+ int rc;
+
+ /* Currently no support for multi-vector */
+ if ( *val & PCI_MSI_FLAGS_QSIZE )
+ printk_pdev(s->pdev, XENLOG_WARNING,
+ "tries to set more than 1 vector ctrl %x\n", *val);
+
+ /* Modify emulate register */
+ writable_mask = handler->emu_mask & ~handler->ro_mask & valid_mask;
+ *data = HVM_PT_MERGE_VALUE(*val, *data, writable_mask);
+ msi->flags |= *data & ~PCI_MSI_FLAGS_ENABLE;
+
+ /* Create value for writing to I/O device register */
+ *val = HVM_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
+
+ /* update MSI */
+ if ( *val & PCI_MSI_FLAGS_ENABLE )
+ {
+ /* Setup MSI pirq for the first time */
+ if ( !msi->initialized )
+ {
+ struct msi_info msi_info;
+ int index = -1;
+
+ /* Init physical one */
+ printk_pdev(s->pdev, XENLOG_DEBUG, "setup MSI (register: %x).\n",
+ *val);
+
+ memset(&msi_info, 0, sizeof(msi_info));
+ msi_info.seg = s->pdev->seg;
+ msi_info.bus = s->pdev->bus;
+ msi_info.devfn = s->pdev->devfn;
+
+ rc = physdev_map_pirq(DOMID_SELF, MAP_PIRQ_TYPE_MSI, &index,
+ &msi->pirq, &msi_info);
+ if ( rc )
+ {
+ /*
+ * Do not broadcast this error, since there's nothing else
+ * that can be done (MSI setup should have been successful).
+ * Guest MSI would be actually not working.
+ */
+ *val &= ~PCI_MSI_FLAGS_ENABLE;
+
+ printk_pdev(s->pdev, XENLOG_ERR,
+ "can not map MSI (register: %x)!\n", *val);
+ return 0;
+ }
+
+ rc = vmsi_update_bind(msi);
+ if ( rc )
+ {
+ *val &= ~PCI_MSI_FLAGS_ENABLE;
+ printk_pdev(s->pdev, XENLOG_ERR,
+ "can not bind MSI (register: %x)!\n", *val);
+ return 0;
+ }
+ msi->initialized = true;
+ msi->mapped = true;
+ }
+ msi->flags |= PCI_MSI_FLAGS_ENABLE;
+ }
+ else if ( msi->mapped )
+ {
+ uint8_t seg, bus, slot, func;
+ uint8_t gvec = msi_vector(msi->data);
+ uint32_t gflags = msi_gflags(msi->data, msi_addr64(msi));
+ uint16_t flags;
+
+ seg = s->pdev->seg;
+ bus = s->pdev->bus;
+ slot = PCI_SLOT(s->pdev->devfn);
+ func = PCI_FUNC(s->pdev->devfn);
+
+ flags = pci_conf_read16(seg, bus, slot, func, s->msi.ctrl_offset);
+ pci_conf_write16(seg, bus, slot, func, s->msi.ctrl_offset,
+ flags & ~PCI_MSI_FLAGS_ENABLE);
+
+ if ( msi->pirq == -1 )
+ return 0;
+
+ if ( msi->initialized )
+ {
+ xen_domctl_bind_pt_irq_t bind;
+
+ printk_pdev(s->pdev, XENLOG_DEBUG,
+ "Unbind MSI with pirq %d, gvec %#x\n", msi->pirq,
+ gvec);
+
+ bind.hvm_domid = DOMID_SELF;
+ bind.irq_type = PT_IRQ_TYPE_MSI;
+ bind.machine_irq = msi->pirq;
+ bind.u.msi.gvec = gvec;
+ bind.u.msi.gflags = gflags;
+ bind.u.msi.gtable = 0;
+
+ pcidevs_lock();
+ rc = pt_irq_destroy_bind(current->domain, &bind);
+ pcidevs_unlock();
+ if ( rc )
+ printk_pdev(s->pdev, XENLOG_ERR,
+ "can not unbind MSI (register: %x)!\n", *val);
+
+ rc = physdev_unmap_pirq(DOMID_SELF, msi->pirq);
+ if ( rc )
+ printk_pdev(s->pdev, XENLOG_ERR,
+ "unmapping of MSI pirq %d failed. (err: %i)\n",
+ msi->pirq, rc);
+ msi->flags &= ~PCI_MSI_FLAGS_ENABLE;
+ msi->initialized = false;
+ msi->mapped = false;
+ msi->pirq = -1;
+ }
+ }
+
+ return 0;
+}
+
+/* Initialize Message Upper Address register */
+static int vmsi_msgaddr64_reg_init(struct hvm_pt_device *s,
+ struct hvm_pt_reg_handler *handler,
+ uint32_t real_offset,
+ uint32_t *data)
+{
+ /* No need to initialize in case of 32 bit type */
+ if ( !(s->msi.flags & PCI_MSI_FLAGS_64BIT) )
+ *data = HVM_PT_INVALID_REG;
+ else
+ *data = handler->init_val;
+
+ return 0;
+}
+
+/* Write Message Address register */
+static int vmsi_msgaddr32_reg_write(struct hvm_pt_device *s,
+ struct hvm_pt_reg *reg, uint32_t *val,
+ uint32_t dev_value, uint32_t valid_mask)
+{
+ struct hvm_pt_reg_handler *handler = reg->handler;
+ uint32_t writable_mask = 0;
+ uint32_t old_addr = reg->val.dword;
+ uint32_t *data = ®->val.dword;
+
+ /* Modify emulate register */
+ writable_mask = handler->emu_mask & ~handler->ro_mask & valid_mask;
+ *data = HVM_PT_MERGE_VALUE(*val, *data, writable_mask);
+ s->msi.addr_lo = *data;
+
+ /* Create value for writing to I/O device register */
+ *val = HVM_PT_MERGE_VALUE(*val, dev_value, 0);
+
+ /* Update MSI */
+ if ( *data != old_addr && s->msi.mapped )
+ vmsi_update_bind(&s->msi);
+
+ return 0;
+}
+
+/* Write Message Upper Address register */
+static int vmsi_msgaddr64_reg_write(struct hvm_pt_device *s,
+ struct hvm_pt_reg *reg, uint32_t *val,
+ uint32_t dev_value, uint32_t valid_mask)
+{
+ struct hvm_pt_reg_handler *handler = reg->handler;
+ uint32_t writable_mask = 0;
+ uint32_t old_addr = reg->val.dword;
+ uint32_t *data = ®->val.dword;
+
+ /* Check whether the type is 64 bit or not */
+ if ( !(s->msi.flags & PCI_MSI_FLAGS_64BIT) )
+ {
+ printk_pdev(s->pdev, XENLOG_ERR,
+ "Can't write to the upper address without 64 bit support\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Modify emulate register */
+ writable_mask = handler->emu_mask & ~handler->ro_mask & valid_mask;
+ *data = HVM_PT_MERGE_VALUE(*val, *data, writable_mask);
+ /* update the msi_info too */
+ s->msi.addr_hi = *data;
+
+ /* Create value for writing to I/O device register */
+ *val = HVM_PT_MERGE_VALUE(*val, dev_value, 0);
+
+ /* Update MSI */
+ if ( *data != old_addr && s->msi.mapped )
+ vmsi_update_bind(&s->msi);
+
+ return 0;
+}
+
+/*
+ * This function is shared between 32 and 64 bits MSI implementations
+ * Initialize Message Data register
+ */
+static int vmsi_msgdata_reg_init(struct hvm_pt_device *s,
+ struct hvm_pt_reg_handler *handler,
+ uint32_t real_offset,
+ uint32_t *data)
+{
+ uint32_t flags = s->msi.flags;
+ uint32_t offset = handler->offset;
+
+ /* Check the offset whether matches the type or not */
+ if ( vmsi_check_type(offset, flags, DATA) )
+ *data = handler->init_val;
+ else
+ *data = HVM_PT_INVALID_REG;
+
+ return 0;
+}
+
+/*
+ * This function is shared between 32 and 64 bits MSI implementations
+ * Write Message Data register
+ */
+static int vmsi_msgdata_reg_write(struct hvm_pt_device *s,
+ struct hvm_pt_reg *reg, uint16_t *val,
+ uint16_t dev_value, uint16_t valid_mask)
+{
+ struct hvm_pt_reg_handler *handler = reg->handler;
+ struct hvm_pt_msi *msi = &s->msi;
+ uint16_t writable_mask = 0;
+ uint16_t old_data = reg->val.word;
+ uint32_t offset = handler->offset;
+ uint16_t *data = ®->val.word;
+
+ /* Check the offset whether matches the type or not */
+ if ( !vmsi_check_type(offset, msi->flags, DATA) )
+ {
+ /* Exit I/O emulator */
+ printk_pdev(s->pdev, XENLOG_ERR,
+ "the offset does not match the 32/64 bit type!\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Modify emulate register */
+ writable_mask = handler->emu_mask & ~handler->ro_mask & valid_mask;
+ *data = HVM_PT_MERGE_VALUE(*val, *data, writable_mask);
+ /* Update the msi_info too */
+ msi->data = *data;
+
+ /* Create value for writing to I/O device register */
+ *val = HVM_PT_MERGE_VALUE(*val, dev_value, 0);
+
+ /* Update MSI */
+ if ( *data != old_data && msi->mapped )
+ vmsi_update_bind(msi);
+
+ return 0;
+}
+
+/*
+ * This function is shared between 32 and 64 bits MSI implementations
+ * Initialize Mask register
+ */
+static int vmsi_mask_reg_init(struct hvm_pt_device *s,
+ struct hvm_pt_reg_handler *handler,
+ uint32_t real_offset,
+ uint32_t *data)
+{
+ uint32_t flags = s->msi.flags;
+
+ /* Check the offset whether matches the type or not */
+ if ( !(flags & PCI_MSI_FLAGS_MASKBIT) )
+ *data = HVM_PT_INVALID_REG;
+ else if ( vmsi_check_type(handler->offset, flags, MASK) )
+ *data = handler->init_val;
+ else
+ *data = HVM_PT_INVALID_REG;
+
+ return 0;
+}
+
+/*
+ * This function is shared between 32 and 64 bits MSI implementations
+ * Initialize Pending register
+ */
+static int vmsi_pending_reg_init(struct hvm_pt_device *s,
+ struct hvm_pt_reg_handler *handler,
+ uint32_t real_offset,
+ uint32_t *data)
+{
+ uint32_t flags = s->msi.flags;
+
+ /* check the offset whether matches the type or not */
+ if ( !(flags & PCI_MSI_FLAGS_MASKBIT) )
+ *data = HVM_PT_INVALID_REG;
+ else if ( vmsi_check_type(handler->offset, flags, PENDING) )
+ *data = handler->init_val;
+ else
+ *data = HVM_PT_INVALID_REG;
+
+ return 0;
+}
+
+/* MSI Capability Structure reg static information table */
+static struct hvm_pt_reg_handler vmsi_handler[] = {
+ /* Message Control reg */
+ {
+ .offset = PCI_MSI_FLAGS,
+ .size = 2,
+ .init_val = 0x0000,
+ .res_mask = 0xFE00,
+ .ro_mask = 0x018E,
+ .emu_mask = 0x017E,
+ .init = vmsi_msgctrl_reg_init,
+ .u.w.read = hvm_pt_word_reg_read,
+ .u.w.write = vmsi_msgctrl_reg_write,
+ },
+ /* Message Address reg */
+ {
+ .offset = PCI_MSI_ADDRESS_LO,
+ .size = 4,
+ .init_val = 0x00000000,
+ .ro_mask = 0x00000003,
+ .emu_mask = 0xFFFFFFFF,
+ .init = hvm_pt_common_reg_init,
+ .u.dw.read = hvm_pt_long_reg_read,
+ .u.dw.write = vmsi_msgaddr32_reg_write,
+ },
+ /* Message Upper Address reg (if PCI_MSI_FLAGS_64BIT set) */
+ {
+ .offset = PCI_MSI_ADDRESS_HI,
+ .size = 4,
+ .init_val = 0x00000000,
+ .ro_mask = 0x00000000,
+ .emu_mask = 0xFFFFFFFF,
+ .init = vmsi_msgaddr64_reg_init,
+ .u.dw.read = hvm_pt_long_reg_read,
+ .u.dw.write = vmsi_msgaddr64_reg_write,
+ },
+ /* Message Data reg (16 bits of data for 32-bit devices) */
+ {
+ .offset = PCI_MSI_DATA_32,
+ .size = 2,
+ .init_val = 0x0000,
+ .ro_mask = 0x0000,
+ .emu_mask = 0xFFFF,
+ .init = vmsi_msgdata_reg_init,
+ .u.w.read = hvm_pt_word_reg_read,
+ .u.w.write = vmsi_msgdata_reg_write,
+ },
+ /* Message Data reg (16 bits of data for 64-bit devices) */
+ {
+ .offset = PCI_MSI_DATA_64,
+ .size = 2,
+ .init_val = 0x0000,
+ .ro_mask = 0x0000,
+ .emu_mask = 0xFFFF,
+ .init = vmsi_msgdata_reg_init,
+ .u.w.read = hvm_pt_word_reg_read,
+ .u.w.write = vmsi_msgdata_reg_write,
+ },
+ /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */
+ {
+ .offset = PCI_MSI_DATA_64, /* PCI_MSI_MASK_32 */
+ .size = 4,
+ .init_val = 0x00000000,
+ .ro_mask = 0xFFFFFFFF,
+ .emu_mask = 0xFFFFFFFF,
+ .init = vmsi_mask_reg_init,
+ .u.dw.read = hvm_pt_long_reg_read,
+ .u.dw.write = hvm_pt_long_reg_write,
+ },
+ /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */
+ {
+ .offset = PCI_MSI_MASK_BIT, /* PCI_MSI_MASK_64 */
+ .size = 4,
+ .init_val = 0x00000000,
+ .ro_mask = 0xFFFFFFFF,
+ .emu_mask = 0xFFFFFFFF,
+ .init = vmsi_mask_reg_init,
+ .u.dw.read = hvm_pt_long_reg_read,
+ .u.dw.write = hvm_pt_long_reg_write,
+ },
+ /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */
+ {
+ .offset = PCI_MSI_DATA_64 + 4, /* PCI_MSI_PENDING_32 */
+ .size = 4,
+ .init_val = 0x00000000,
+ .ro_mask = 0xFFFFFFFF,
+ .emu_mask = 0x00000000,
+ .init = vmsi_pending_reg_init,
+ .u.dw.read = hvm_pt_long_reg_read,
+ .u.dw.write = hvm_pt_long_reg_write,
+ },
+ /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */
+ {
+ .offset = PCI_MSI_MASK_BIT + 4, /* PCI_MSI_PENDING_64 */
+ .size = 4,
+ .init_val = 0x00000000,
+ .ro_mask = 0xFFFFFFFF,
+ .emu_mask = 0x00000000,
+ .init = vmsi_pending_reg_init,
+ .u.dw.read = hvm_pt_long_reg_read,
+ .u.dw.write = hvm_pt_long_reg_write,
+ },
+ /* End */
+ {
+ .size = 0,
+ },
+};
+
+static int vmsi_group_init(struct hvm_pt_device *dev,
+ struct hvm_pt_reg_group *group)
+{
+ uint8_t seg, bus, slot, func;
+ struct pci_dev *pdev = dev->pdev;
+ int msi_offset;
+ uint8_t msi_size = 0xa;
+ uint16_t flags;
+
+ dev->msi.pirq = -1;
+ seg = pdev->seg;
+ bus = pdev->bus;
+ slot = PCI_SLOT(pdev->devfn);
+ func = PCI_FUNC(pdev->devfn);
+
+ msi_offset = pci_find_cap_offset(seg, bus, slot, func, PCI_CAP_ID_MSI);
+ if ( msi_offset == 0 )
+ return -ENODEV;
+
+ group->base_offset = msi_offset;
+ flags = pci_conf_read16(seg, bus, slot, func,
+ msi_offset + PCI_MSI_FLAGS);
+
+ if ( flags & PCI_MSI_FLAGS_64BIT )
+ msi_size += 4;
+ if ( flags & PCI_MSI_FLAGS_MASKBIT )
+ msi_size += 10;
+
+ dev->msi.flags = flags;
+ group->size = msi_size;
+
+ return 0;
+}
+
+struct hvm_pt_handler_init hvm_pt_msi_init = {
+ .handlers = vmsi_handler,
+ .init = vmsi_group_init,
+};
@@ -165,6 +165,9 @@ struct hvm_pt_reg_group;
/* Return code when register should be ignored. */
#define HVM_PT_INVALID_REG 0xFFFFFFFF
+#define HVM_PT_MERGE_VALUE(value, data, val_mask) \
+ (((value) & (val_mask)) | ((data) & ~(val_mask)))
+
/* function type for config reg */
typedef int (*hvm_pt_conf_reg_init)
(struct hvm_pt_device *, struct hvm_pt_reg_handler *, uint32_t real_offset,
@@ -350,6 +353,31 @@ struct hvm_pt_device {
/* Helper to add passed-through devices to the hardware domain. */
int hwdom_add_device(struct pci_dev *pdev);
+/* Generic handlers for HVM PCI pass-through. */
+int hvm_pt_long_reg_read(struct hvm_pt_device *, struct hvm_pt_reg *,
+ uint32_t *, uint32_t);
+int hvm_pt_long_reg_write(struct hvm_pt_device *, struct hvm_pt_reg *,
+ uint32_t *, uint32_t, uint32_t);
+int hvm_pt_word_reg_read(struct hvm_pt_device *, struct hvm_pt_reg *,
+ uint16_t *, uint16_t);
+
+int hvm_pt_common_reg_init(struct hvm_pt_device *, struct hvm_pt_reg_handler *,
+ uint32_t real_offset, uint32_t *data);
+
+static inline uint32_t hvm_pt_get_throughable_mask(
+ struct hvm_pt_device *s,
+ struct hvm_pt_reg_handler *handler,
+ uint32_t valid_mask)
+{
+ uint32_t throughable_mask = ~(handler->emu_mask | handler->ro_mask);
+
+ if ( !s->permissive )
+ throughable_mask &= ~handler->res_mask;
+
+ return throughable_mask & valid_mask;
+}
+
+
#endif /* __ASM_X86_HVM_IO_H__ */
@@ -246,4 +246,36 @@ void ack_nonmaskable_msi_irq(struct irq_desc *);
void end_nonmaskable_msi_irq(struct irq_desc *, u8 vector);
void set_msi_affinity(struct irq_desc *, const cpumask_t *);
+static inline uint8_t msi_vector(uint32_t data)
+{
+ return (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
+}
+
+static inline uint8_t msi_dest_id(uint32_t addr)
+{
+ return (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
+}
+
+static inline uint32_t msi_gflags(uint32_t data, uint64_t addr)
+{
+ uint32_t result = 0;
+ int rh, dm, dest_id, deliv_mode, trig_mode;
+
+ rh = (addr >> MSI_ADDR_REDIRECTION_SHIFT) & 0x1;
+ dm = (addr >> MSI_ADDR_DESTMODE_SHIFT) & 0x1;
+ dest_id = msi_dest_id(addr);
+ deliv_mode = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7;
+ trig_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
+
+ result = dest_id | (rh << GFLAGS_SHIFT_RH)
+ | (dm << GFLAGS_SHIFT_DM)
+ | (deliv_mode << GFLAGS_SHIFT_DELIV_MODE)
+ | (trig_mode << GFLAGS_SHIFT_TRG_MODE);
+
+ return result;
+}
+
+/* MSI HVM pass-through handlers. */
+extern struct hvm_pt_handler_init hvm_pt_msi_init;
+
#endif /* __ASM_MSI_H */
@@ -56,6 +56,7 @@ struct dev_intx_gsi_link {
#define VMSI_TRIG_MODE 0x8000
#define GFLAGS_SHIFT_RH 8
+#define GFLAGS_SHIFT_DM 9
#define GFLAGS_SHIFT_DELIV_MODE 12
#define GFLAGS_SHIFT_TRG_MODE 15
@@ -296,6 +296,10 @@
#define PCI_MSI_DATA_32 8 /* 16 bits of data for 32-bit devices */
#define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */
#define PCI_MSI_MASK_BIT 16 /* Mask bits register */
+#define PCI_MSI_MASK_64 PCI_MSI_MASK_BIT
+#define PCI_MSI_MASK_32 PCI_MSI_DATA_64
+#define PCI_MSI_PENDING_32 PCI_MSI_MASK_BIT
+#define PCI_MSI_PENDING_64 20
/* MSI-X registers (these are at offset PCI_MSIX_FLAGS) */
#define PCI_MSIX_FLAGS 2
Import the MSI handlers from QEMU into Xen. This allows Xen to detect accesses to the MSI registers and correctly setup PIRQs for physical devices that are then bound to the hardware domain. The current logic only allows the usage of a single MSI interrupt per device, so the maximum queue size announced by the device is unconditionally set to 0 (1 vector only). Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> --- Cc: Paul Durrant <paul.durrant@citrix.com> Cc: Jan Beulich <jbeulich@suse.com> Cc: Andrew Cooper <andrew.cooper3@citrix.com> --- xen/arch/x86/hvm/io.c | 59 +++++ xen/arch/x86/hvm/vmsi.c | 538 +++++++++++++++++++++++++++++++++++++++++++ xen/include/asm-x86/hvm/io.h | 28 +++ xen/include/asm-x86/msi.h | 32 +++ xen/include/xen/hvm/irq.h | 1 + xen/include/xen/pci_regs.h | 4 + 6 files changed, 662 insertions(+)