@@ -643,6 +643,7 @@ struct kvm_arch {
u64 disabled_quirks;
bool irqchip_split;
+ u8 nr_reserved_ioapic_pins;
};
struct kvm_vm_stat {
@@ -672,3 +672,19 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
spin_unlock(&ioapic->lock);
return 0;
}
+
+void kvm_arch_irq_routing_update(struct kvm *kvm)
+{
+ struct kvm_ioapic *ioapic = kvm->arch.vioapic;
+
+ if (ioapic)
+ return;
+ if (!lapic_in_kernel(kvm))
+ return;
+ kvm_make_scan_ioapic_request(kvm);
+}
+
+u8 kvm_arch_nr_userspace_ioapic_pins(struct kvm *kvm)
+{
+ return kvm->arch.nr_reserved_ioapic_pins;
+}
@@ -9,6 +9,7 @@ struct kvm;
struct kvm_vcpu;
#define IOAPIC_NUM_PINS KVM_IOAPIC_NUM_PINS
+#define MAX_NR_RESERVED_IOAPIC_PINS 48
#define IOAPIC_VERSION_ID 0x11 /* IOAPIC version */
#define IOAPIC_EDGE_TRIG 0
#define IOAPIC_LEVEL_TRIG 1
@@ -123,4 +124,5 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap,
u32 *tmr);
+void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
#endif
@@ -209,8 +209,7 @@ out:
if (old)
kfree_rcu(old, rcu);
- if (!irqchip_split(kvm))
- kvm_vcpu_request_scan_ioapic(kvm);
+ kvm_make_scan_ioapic_request(kvm);
}
static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
@@ -3864,15 +3864,20 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
case KVM_CAP_SPLIT_IRQCHIP: {
mutex_lock(&kvm->lock);
r = -EEXIST;
- if (lapic_in_kernel(kvm))
+ if (irqchip_in_kernel(kvm))
goto split_irqchip_unlock;
r = -EINVAL;
- if (atomic_read(&kvm->online_vcpus))
- goto split_irqchip_unlock;
- r = kvm_setup_empty_irq_routing(kvm);
- if (r)
+ if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS)
goto split_irqchip_unlock;
- kvm->arch.irqchip_split = true;
+ if (!irqchip_split(kvm)) {
+ if (atomic_read(&kvm->online_vcpus))
+ goto split_irqchip_unlock;
+ r = kvm_setup_empty_irq_routing(kvm);
+ if (r)
+ goto split_irqchip_unlock;
+ kvm->arch.irqchip_split = true;
+ }
+ kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
r = 0;
split_irqchip_unlock:
mutex_unlock(&kvm->lock);
@@ -6335,8 +6340,17 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
goto out;
}
}
- if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
- vcpu_scan_ioapic(vcpu);
+ if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) {
+ if (irqchip_split(vcpu->kvm)) {
+ memset(vcpu->arch.eoi_exit_bitmaps, 0, 32);
+ kvm_scan_ioapic_routes(
+ vcpu, vcpu->arch.eoi_exit_bitmaps);
+ kvm_x86_ops->load_eoi_exitmap(
+ vcpu, vcpu->arch.eoi_exit_bitmaps);
+
+ } else
+ vcpu_scan_ioapic(vcpu);
+ }
if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
kvm_vcpu_reload_apic_access_page(vcpu);
}
@@ -438,10 +438,19 @@ void vcpu_put(struct kvm_vcpu *vcpu);
#ifdef __KVM_HAVE_IOAPIC
void kvm_vcpu_request_scan_ioapic(struct kvm *kvm);
+void kvm_arch_irq_routing_update(struct kvm *kvm);
+u8 kvm_arch_nr_userspace_ioapic_pins(struct kvm *kvm);
#else
static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
{
}
+static inline void kvm_arch_irq_routing_update(struct kvm *kvm)
+{
+}
+static inline u8 kvm_arch_nr_userspace_ioapic_pins(struct kvm *kvm)
+{
+ return 0;
+}
#endif
#ifdef CONFIG_HAVE_KVM_IRQFD
@@ -203,6 +203,8 @@ int kvm_set_irq_routing(struct kvm *kvm,
kvm_irq_routing_update(kvm);
mutex_unlock(&kvm->irq_lock);
+ kvm_arch_irq_routing_update(kvm);
+
synchronize_srcu_expedited(&kvm->irq_srcu);
new = old;
@@ -212,3 +214,38 @@ out:
kfree(new);
return r;
}
+
+void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_kernel_irq_routing_entry *entry;
+ struct kvm_irq_routing_table *table;
+ u32 i, nr_ioapic_pins;
+ int idx;
+
+ /* kvm->irq_routing must be read after clearing
+ * KVM_SCAN_IOAPIC. */
+ smp_mb();
+ idx = srcu_read_lock(&kvm->irq_srcu);
+ table = kvm->irq_routing;
+ nr_ioapic_pins = min_t(u32, table->nr_rt_entries,
+ kvm_arch_nr_userspace_ioapic_pins(kvm));
+ for (i = 0; i < nr_ioapic_pins; ++i) {
+ hlist_for_each_entry(entry, &table->map[i], link) {
+ u32 dest_id, dest_mode;
+
+ if (entry->type != KVM_IRQ_ROUTING_MSI)
+ continue;
+ dest_id = (entry->msi.address_lo >> 12) & 0xff;
+ dest_mode = (entry->msi.address_lo >> 2) & 0x1;
+ if (kvm_apic_match_dest(vcpu, NULL, 0, dest_id,
+ dest_mode)) {
+ u32 vector = entry->msi.data & 0xff;
+
+ __set_bit(vector,
+ (unsigned long *) eoi_exit_bitmap);
+ }
+ }
+ }
+ srcu_read_unlock(&kvm->irq_srcu, idx);
+}
In order to support a userspace IOAPIC interacting with an in kernel APIC, the EOI exit bitmaps need to be configurable. If the IOAPIC is in userspace (i.e. the irqchip has been split), the EOI exit bitmaps will be set whenever the GSI Routes are configured. In particular, for the low MSI routes are reservable for userspace IOAPICs. For these MSI routes, the EOI Exit bit corresponding to the destination vector of the route will be set for the destination VCPU. The intention is for the userspace IOAPICs to use the reservable MSI routes to inject interrupts into the guest. This is a slight abuse of the notion of an MSI Route, given that MSIs classically bypass the IOAPIC. It might be worthwhile to add an additional route type to improve clarity. Compile tested for Intel x86. Signed-off-by: Steve Rutherford <srutherford@google.com> --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/ioapic.c | 16 ++++++++++++++++ arch/x86/kvm/ioapic.h | 2 ++ arch/x86/kvm/lapic.c | 3 +-- arch/x86/kvm/x86.c | 30 ++++++++++++++++++++++-------- include/linux/kvm_host.h | 9 +++++++++ virt/kvm/irqchip.c | 37 +++++++++++++++++++++++++++++++++++++ 7 files changed, 88 insertions(+), 10 deletions(-)