@@ -685,7 +685,7 @@ struct kvm_vcpu_events {
__u8 pad;
} nmi;
__u32 sipi_vector;
- __u32 flags; /* must be zero */
+ __u32 flags;
};
4.30 KVM_SET_VCPU_EVENTS
@@ -701,6 +701,14 @@ vcpu.
See KVM_GET_VCPU_EVENTS for the data structure.
+Fields that may be modified asynchronously by running VCPUs can be excluded
+from the update. These fields are nmi.pending and sipi_vector. Set the
+corresponding mask bits in the flags field to suppress overwriting their
+current state:
+
+KVM_VCPUEVENT_MASK_NMI_PENDING - do not update nmi.pending
+KVM_VCPUEVENT_MASK_SIPI_VECTOR - do not update sipi_vector
+
5. The kvm_run structure
@@ -254,6 +254,10 @@ struct kvm_reinject_control {
__u8 reserved[31];
};
+/* When set in flags, skip corresponding fields on KVM_SET_VCPU_EVENTS */
+#define KVM_VCPUEVENT_MASK_NMI_PENDING 0x00000001
+#define KVM_VCPUEVENT_MASK_SIPI_VECTOR 0x00000002
+
/* for KVM_GET/SET_VCPU_EVENTS */
struct kvm_vcpu_events {
struct {
@@ -1953,7 +1953,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
- if (events->flags)
+ if (events->flags &
+ ~(KVM_VCPUEVENT_MASK_NMI_PENDING | KVM_VCPUEVENT_MASK_SIPI_VECTOR))
return -EINVAL;
vcpu_load(vcpu);
@@ -1970,10 +1971,12 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
kvm_pic_clear_isr_ack(vcpu->kvm);
vcpu->arch.nmi_injected = events->nmi.injected;
- vcpu->arch.nmi_pending = events->nmi.pending;
+ if (!(events->flags & KVM_VCPUEVENT_MASK_NMI_PENDING))
+ vcpu->arch.nmi_pending = events->nmi.pending;
kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
- vcpu->arch.sipi_vector = events->sipi_vector;
+ if (!(events->flags & KVM_VCPUEVENT_MASK_SIPI_VECTOR))
+ vcpu->arch.sipi_vector = events->sipi_vector;
vcpu_put(vcpu);