@@ -798,4 +798,11 @@ struct kvm_assigned_msix_entry {
__u16 padding[2];
};
+struct kvm_assigned_msix_mmio {
+ __u32 assigned_dev_id;
+ __u64 base_addr;
+ __u32 flags;
+ __u32 reserved[2];
+};
+
#endif /* __LINUX_KVM_H */
@@ -465,6 +465,8 @@ struct kvm_assigned_dev_kernel {
struct pci_dev *dev;
struct kvm *kvm;
spinlock_t assigned_dev_lock;
+ u64 msix_mmio_base;
+ struct kvm_io_device msix_mmio_dev;
};
struct kvm_irq_mask_notifier {
@@ -739,6 +739,137 @@ msix_entry_out:
return r;
}
+
+static bool msix_mmio_in_range(struct kvm_assigned_dev_kernel *adev,
+ gpa_t addr, int len, int *idx)
+{
+ int i;
+
+ if (!(adev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX))
+ return false;
+ BUG_ON(adev->msix_mmio_base == 0);
+ for (i = 0; i < adev->entries_nr; i++) {
+ u64 start, end;
+ start = adev->msix_mmio_base +
+ adev->guest_msix_entries[i].entry * PCI_MSIX_ENTRY_SIZE;
+ end = start + PCI_MSIX_ENTRY_SIZE;
+ if (addr >= start && addr + len <= end) {
+ *idx = i;
+ return true;
+ }
+ }
+ return false;
+}
+
+static int msix_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
+ void *val)
+{
+ struct kvm_assigned_dev_kernel *adev =
+ container_of(this, struct kvm_assigned_dev_kernel,
+ msix_mmio_dev);
+ int idx, r = 0;
+ u32 entry[4];
+ struct kvm_kernel_irq_routing_entry *e;
+
+ mutex_lock(&adev->kvm->lock);
+ if (!msix_mmio_in_range(adev, addr, len, &idx)) {
+ r = -EOPNOTSUPP;
+ goto out;
+ }
+ if ((addr & 0x3) || len != 4) {
+ printk(KERN_WARNING
+ "KVM: Unaligned reading for device MSI-X MMIO! "
+ "addr 0x%llx, len %d\n", addr, len);
+ r = -EOPNOTSUPP;
+ goto out;
+ }
+
+ e = kvm_get_irq_routing_entry(adev->kvm,
+ adev->guest_msix_entries[idx].vector);
+ if (!e || e->type != KVM_IRQ_ROUTING_MSI) {
+ printk(KERN_WARNING "KVM: Wrong MSI-X routing entry! "
+ "addr 0x%llx, len %d\n", addr, len);
+ r = -EOPNOTSUPP;
+ goto out;
+ }
+ entry[0] = e->msi.address_lo;
+ entry[1] = e->msi.address_hi;
+ entry[2] = e->msi.data;
+ entry[3] = !!(adev->guest_msix_entries[idx].flags &
+ KVM_ASSIGNED_MSIX_MASK);
+ memcpy(val, &entry[addr % PCI_MSIX_ENTRY_SIZE / 4], len);
+
+out:
+ mutex_unlock(&adev->kvm->lock);
+ return r;
+}
+
+static int msix_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
+ const void *val)
+{
+ struct kvm_assigned_dev_kernel *adev =
+ container_of(this, struct kvm_assigned_dev_kernel,
+ msix_mmio_dev);
+ int idx, r = 0;
+ unsigned long new_val = *(unsigned long *)val;
+ bool entry_masked;
+
+ mutex_lock(&adev->kvm->lock);
+ if (!msix_mmio_in_range(adev, addr, len, &idx)) {
+ r = -EOPNOTSUPP;
+ goto out;
+ }
+ if ((addr & 0x3) || len != 4) {
+ printk(KERN_WARNING
+ "KVM: Unaligned writing for device MSI-X MMIO! "
+ "addr 0x%llx, len %d, val 0x%lx\n",
+ addr, len, new_val);
+ r = -EOPNOTSUPP;
+ goto out;
+ }
+ entry_masked = adev->guest_msix_entries[idx].flags &
+ KVM_ASSIGNED_MSIX_MASK;
+ if (addr % PCI_MSIX_ENTRY_SIZE != PCI_MSIX_ENTRY_VECTOR_CTRL) {
+ /* Only allow entry modification when entry was masked */
+ if (!entry_masked) {
+ printk(KERN_WARNING
+ "KVM: guest try to write unmasked MSI-X entry. "
+ "addr 0x%llx, len %d, val 0x%lx\n",
+ addr, len, new_val);
+ r = 0;
+ } else
+ /* Leave it to QEmu */
+ r = -EOPNOTSUPP;
+ goto out;
+ }
+ if (new_val & ~1ul) {
+ printk(KERN_WARNING
+ "KVM: Bad writing for device MSI-X MMIO! "
+ "addr 0x%llx, len %d, val 0x%lx\n",
+ addr, len, new_val);
+ r = -EOPNOTSUPP;
+ goto out;
+ }
+ if (new_val == 1 && !entry_masked) {
+ adev->guest_msix_entries[idx].flags |=
+ KVM_ASSIGNED_MSIX_MASK;
+ update_msix_mask(adev, idx);
+ } else if (new_val == 0 && entry_masked) {
+ adev->guest_msix_entries[idx].flags &=
+ ~KVM_ASSIGNED_MSIX_MASK;
+ update_msix_mask(adev, idx);
+ }
+out:
+ mutex_unlock(&adev->kvm->lock);
+
+ return r;
+}
+
+static const struct kvm_io_device_ops msix_mmio_ops = {
+ .read = msix_mmio_read,
+ .write = msix_mmio_write,
+};
+
#endif
long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,