@@ -182,7 +182,7 @@ int kvm_dev_ioctl_check_extension(long ext)
switch (ext) {
case KVM_CAP_IRQCHIP:
case KVM_CAP_MP_STATE:
-
+ case KVM_CAP_IRQ_INJECT_STATUS:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
@@ -925,6 +925,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
goto out;
}
break;
+ case KVM_IRQ_LINE_STATUS:
case KVM_IRQ_LINE: {
struct kvm_irq_level irq_event;
@@ -932,10 +933,17 @@ long kvm_arch_vm_ioctl(struct file *filp,
if (copy_from_user(&irq_event, argp, sizeof irq_event))
goto out;
if (irqchip_in_kernel(kvm)) {
+ __s32 status;
mutex_lock(&kvm->lock);
- kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
+ status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
irq_event.irq, irq_event.level);
mutex_unlock(&kvm->lock);
+ if (ioctl == KVM_IRQ_LINE_STATUS) {
+ irq_event.status = status;
+ if (copy_to_user(argp, &irq_event,
+ sizeof irq_event))
+ goto out;
+ }
r = 0;
}
break;
@@ -616,7 +616,7 @@ void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
u32 error_code);
-void kvm_pic_set_irq(void *opaque, int irq, int level);
+int kvm_pic_set_irq(void *opaque, int irq, int level);
void kvm_inject_nmi(struct kvm_vcpu *vcpu);
@@ -76,12 +76,13 @@ void kvm_pic_clear_isr_ack(struct kvm *kvm)
/*
* set irq level. If an edge is detected, then the IRR is set to 1
*/
-static inline void pic_set_irq1(struct kvm_kpic_state *s, int irq, int level)
+static inline int pic_set_irq1(struct kvm_kpic_state *s, int irq, int level)
{
- int mask;
+ int mask, ret = 1;
mask = 1 << irq;
if (s->elcr & mask) /* level triggered */
if (level) {
+ ret = !(s->irr & mask);
s->irr |= mask;
s->last_irr |= mask;
} else {
@@ -90,11 +91,15 @@ static inline void pic_set_irq1(struct kvm_kpic_state *s, int irq, int level)
}
else /* edge triggered */
if (level) {
- if ((s->last_irr & mask) == 0)
+ if ((s->last_irr & mask) == 0) {
+ ret = !(s->irr & mask);
s->irr |= mask;
+ }
s->last_irr |= mask;
} else
s->last_irr &= ~mask;
+
+ return (s->imr & mask) ? -1 : ret;
}
/*
@@ -171,16 +176,19 @@ void kvm_pic_update_irq(struct kvm_pic *s)
pic_unlock(s);
}
-void kvm_pic_set_irq(void *opaque, int irq, int level)
+int kvm_pic_set_irq(void *opaque, int irq, int level)
{
struct kvm_pic *s = opaque;
+ int ret = -1;
pic_lock(s);
if (irq >= 0 && irq < PIC_NUM_PINS) {
- pic_set_irq1(&s->pics[irq >> 3], irq & 7, level);
+ ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level);
pic_update_irq(s);
}
pic_unlock(s);
+
+ return ret;
}
/*
@@ -995,6 +995,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_MP_STATE:
case KVM_CAP_SYNC_MMU:
case KVM_CAP_REINJECT_CONTROL:
+ case KVM_CAP_IRQ_INJECT_STATUS:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
@@ -1852,6 +1853,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
create_pit_unlock:
mutex_unlock(&kvm->lock);
break;
+ case KVM_IRQ_LINE_STATUS:
case KVM_IRQ_LINE: {
struct kvm_irq_level irq_event;
@@ -1859,10 +1861,17 @@ long kvm_arch_vm_ioctl(struct file *filp,
if (copy_from_user(&irq_event, argp, sizeof irq_event))
goto out;
if (irqchip_in_kernel(kvm)) {
+ __s32 status;
mutex_lock(&kvm->lock);
- kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
- irq_event.irq, irq_event.level);
+ status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
+ irq_event.irq, irq_event.level);
mutex_unlock(&kvm->lock);
+ if (ioctl == KVM_IRQ_LINE_STATUS) {
+ irq_event.status = status;
+ if (copy_to_user(argp, &irq_event,
+ sizeof irq_event))
+ goto out;
+ }
r = 0;
}
break;
@@ -48,7 +48,10 @@ struct kvm_irq_level {
* For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47..
* For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23..
*/
- __u32 irq;
+ union {
+ __u32 irq;
+ __s32 status;
+ };
__u32 level;
};
@@ -402,6 +405,7 @@ struct kvm_trace_rec {
#ifdef __KVM_HAVE_IOAPIC
#define KVM_CAP_IRQ_ROUTING 25
#endif
+#define KVM_CAP_IRQ_INJECT_STATUS 26
#ifdef KVM_CAP_IRQ_ROUTING
@@ -456,6 +460,7 @@ struct kvm_irq_routing {
#define KVM_CREATE_PIT _IO(KVMIO, 0x64)
#define KVM_GET_PIT _IOWR(KVMIO, 0x65, struct kvm_pit_state)
#define KVM_SET_PIT _IOR(KVMIO, 0x66, struct kvm_pit_state)
+#define KVM_IRQ_LINE_STATUS _IOWR(KVMIO, 0x67, struct kvm_irq_level)
#define KVM_REGISTER_COALESCED_MMIO \
_IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone)
#define KVM_UNREGISTER_COALESCED_MMIO \
@@ -109,7 +109,7 @@ struct kvm_memory_slot {
struct kvm_kernel_irq_routing_entry {
u32 gsi;
- void (*set)(struct kvm_kernel_irq_routing_entry *e,
+ int (*set)(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int level);
union {
struct {
@@ -351,7 +351,7 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
struct kvm_irq_mask_notifier *kimn);
void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
-void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
+int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi);
void kvm_register_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian);
@@ -83,19 +83,22 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
return result;
}
-static void ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
+static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
{
union ioapic_redir_entry *pent;
+ int injected = -1;
pent = &ioapic->redirtbl[idx];
if (!pent->fields.mask) {
- int injected = ioapic_deliver(ioapic, idx);
+ injected = ioapic_deliver(ioapic, idx);
if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG)
pent->fields.remote_irr = 1;
}
if (!pent->fields.trig_mode)
ioapic->irr &= ~(1 << idx);
+
+ return injected;
}
static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
@@ -273,11 +276,12 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
return r;
}
-void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
+int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
{
u32 old_irr = ioapic->irr;
u32 mask = 1 << irq;
union ioapic_redir_entry entry;
+ int ret = 1;
if (irq >= 0 && irq < IOAPIC_NUM_PINS) {
entry = ioapic->redirtbl[irq];
@@ -288,9 +292,10 @@ void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
ioapic->irr |= mask;
if ((!entry.fields.trig_mode && old_irr != ioapic->irr)
|| !entry.fields.remote_irr)
- ioapic_service(ioapic, irq);
+ ret = ioapic_service(ioapic, irq);
}
}
+ return ret;
}
static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int gsi,
@@ -83,7 +83,7 @@ struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
unsigned long bitmap);
void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode);
int kvm_ioapic_init(struct kvm *kvm);
-void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
+int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
u32 kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
u8 dest_mode);
@@ -24,25 +24,28 @@
#include "ioapic.h"
-static void kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
+static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int level)
{
#ifdef CONFIG_X86
- kvm_pic_set_irq(pic_irqchip(kvm), e->irqchip.pin, level);
+ return kvm_pic_set_irq(pic_irqchip(kvm), e->irqchip.pin, level);
+#else
+ return -1;
#endif
}
-static void kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
+static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int level)
{
- kvm_ioapic_set_irq(kvm->arch.vioapic, e->irqchip.pin, level);
+ return kvm_ioapic_set_irq(kvm->arch.vioapic, e->irqchip.pin, level);
}
/* This should be called with the kvm->lock mutex held */
-void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level)
+int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level)
{
struct kvm_kernel_irq_routing_entry *e;
unsigned long *irq_state = (unsigned long *)&kvm->arch.irq_states[irq];
+ int ret = -1;
/* Logical OR for level trig interrupt */
if (level)
@@ -55,8 +58,11 @@ void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level)
* writes to the unused one.
*/
list_for_each_entry(e, &kvm->irq_routing, link)
- if (e->gsi == irq)
- e->set(e, kvm, !!(*irq_state));
+ if (e->gsi == irq) {
+ int r = e->set(e, kvm, !!(*irq_state));
+ ret = max(ret, r);
+ }
+ return ret;
}
void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi)
@@ -165,7 +171,7 @@ int setup_routing_entry(struct kvm_kernel_irq_routing_entry *e,
e->set = kvm_set_pic_irq;
break;
case KVM_IRQCHIP_PIC_SLAVE:
- e->set = kvm_set_pic_irq;
+ e->set = kvm_set_pic_irq;
delta = 8;
break;
case KVM_IRQCHIP_IOAPIC:
Use this one instead. Adds capabilities checks. Signed-off-by: Gleb Natapov <gleb@redhat.com> -- Gleb. -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html