@@ -907,6 +907,7 @@ struct msr_bitmap_range {
struct kvm_xen {
bool long_mode;
bool shinfo_set;
+ u8 upcall_vector;
struct gfn_to_hva_cache shinfo_cache;
};
@@ -14,6 +14,7 @@
#include "irq.h"
#include "i8254.h"
#include "x86.h"
+#include "xen.h"
/*
* check if there are pending timer events
@@ -56,6 +57,9 @@ int kvm_cpu_has_extint(struct kvm_vcpu *v)
if (!lapic_in_kernel(v))
return v->arch.interrupt.injected;
+ if (kvm_xen_has_interrupt(v))
+ return 1;
+
if (!kvm_apic_accept_pic_intr(v))
return 0;
@@ -110,6 +114,9 @@ static int kvm_cpu_get_extint(struct kvm_vcpu *v)
if (!lapic_in_kernel(v))
return v->arch.interrupt.nr;
+ if (kvm_xen_has_interrupt(v))
+ return v->kvm->arch.xen.upcall_vector;
+
if (irqchip_split(v->kvm)) {
int vector = v->arch.pending_external_vector;
@@ -8935,7 +8935,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_x86_ops.msr_filter_changed(vcpu);
}
- if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
+ if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win ||
+ kvm_xen_has_interrupt(vcpu)) {
++vcpu->stat.req_event;
kvm_apic_accept_events(vcpu);
if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
@@ -176,6 +176,45 @@ void kvm_xen_setup_runstate_page(struct kvm_vcpu *v)
kvm_xen_update_runstate(v, RUNSTATE_running, steal_time);
}
+int kvm_xen_has_interrupt(struct kvm_vcpu *v)
+{
+ u8 rc = 0;
+
+ /*
+ * If the global upcall vector (HVMIRQ_callback_vector) is set and
+ * the vCPU's evtchn_upcall_pending flag is set, the IRQ is pending.
+ */
+ if (v->arch.xen.vcpu_info_set && v->kvm->arch.xen.upcall_vector) {
+ struct gfn_to_hva_cache *ghc = &v->arch.xen.vcpu_info_cache;
+ struct kvm_memslots *slots = kvm_memslots(v->kvm);
+ unsigned int offset = offsetof(struct vcpu_info, evtchn_upcall_pending);
+
+ /* No need for compat handling here */
+ BUILD_BUG_ON(offsetof(struct vcpu_info, evtchn_upcall_pending) !=
+ offsetof(struct compat_vcpu_info, evtchn_upcall_pending));
+ BUILD_BUG_ON(sizeof(rc) !=
+ sizeof(((struct vcpu_info *)0)->evtchn_upcall_pending));
+ BUILD_BUG_ON(sizeof(rc) !=
+ sizeof(((struct compat_vcpu_info *)0)->evtchn_upcall_pending));
+
+ /*
+ * For efficiency, this mirrors the checks for using the valid
+ * cache in kvm_read_guest_offset_cached(), but just uses
+ * __get_user() instead. And falls back to the slow path.
+ */
+ if (likely(slots->generation == ghc->generation &&
+ !kvm_is_error_hva(ghc->hva) && ghc->memslot)) {
+ /* Fast path */
+ __get_user(rc, (u8 __user *)ghc->hva + offset);
+ } else {
+ /* Slow path */
+ kvm_read_guest_offset_cached(v->kvm, ghc, &rc, offset,
+ sizeof(rc));
+ }
+ }
+ return rc;
+}
+
int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
{
struct kvm_vcpu *v;
@@ -245,6 +284,14 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
v->arch.xen.last_state_ns = ktime_get_ns();
break;
+ case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
+ if (data->u.vector < 0x10)
+ return -EINVAL;
+
+ kvm->arch.xen.upcall_vector = data->u.vector;
+ r = 0;
+ break;
+
default:
break;
}
@@ -303,6 +350,11 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
}
break;
+ case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
+ data->u.vector = kvm->arch.xen.upcall_vector;
+ r = 0;
+ break;
+
default:
break;
}
@@ -11,6 +11,7 @@
void kvm_xen_setup_runstate_page(struct kvm_vcpu *vcpu);
void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu);
+int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
int kvm_xen_hypercall(struct kvm_vcpu *vcpu);
@@ -1593,6 +1593,7 @@ struct kvm_xen_hvm_attr {
union {
__u8 long_mode;
+ __u8 vector;
struct {
__u64 gfn;
} shared_info;
@@ -1610,6 +1611,7 @@ struct kvm_xen_hvm_attr {
#define KVM_XEN_ATTR_TYPE_VCPU_INFO 0x2
#define KVM_XEN_ATTR_TYPE_VCPU_TIME_INFO 0x3
#define KVM_XEN_ATTR_TYPE_VCPU_RUNSTATE 0x4
+#define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR 0x5
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {