@@ -835,7 +835,7 @@ struct kvm_clock_data {
Capability: KVM_CAP_VCPU_EVENTS
Extended by: KVM_CAP_INTR_SHADOW
-Architectures: x86, arm64
+Architectures: x86, arm, arm64
Type: vcpu ioctl
Parameters: struct kvm_vcpu_event (out)
Returns: 0 on success, -1 on error
@@ -883,7 +883,7 @@ Only two fields are defined in the flags field:
- KVM_VCPUEVENT_VALID_SMM may be set in the flags field to signal that
smi contains a valid state.
-ARM64:
+ARM/ARM64:
If the guest accesses a device that is being emulated by the host kernel in
such a way that a real device would generate a physical SError, KVM may make
@@ -934,7 +934,7 @@ struct kvm_vcpu_events {
Capability: KVM_CAP_VCPU_EVENTS
Extended by: KVM_CAP_INTR_SHADOW
-Architectures: x86, arm64
+Architectures: x86, arm, arm64
Type: vcpu ioctl
Parameters: struct kvm_vcpu_event (in)
Returns: 0 on success, -1 on error
@@ -961,7 +961,7 @@ shall be written into the VCPU.
KVM_VCPUEVENT_VALID_SMM can only be set if KVM_CAP_X86_SMM is available.
-ARM64:
+ARM/ARM64:
Set the pending SError exception state for this VCPU. It is not possible to
'cancel' an Serror that has been made pending.
@@ -216,6 +216,11 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
unsigned long kvm_call_hyp(void *hypfn, ...);
void force_vm_exit(const cpumask_t *mask);
+int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events);
+
+int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events);
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
@@ -27,6 +27,7 @@
#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_READONLY_MEM
+#define __KVM_HAVE_VCPU_EVENTS
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
@@ -125,6 +126,18 @@ struct kvm_sync_regs {
struct kvm_arch_memory_slot {
};
+/* for KVM_GET/SET_VCPU_EVENTS */
+struct kvm_vcpu_events {
+ struct {
+ __u8 serror_pending;
+ __u8 serror_has_esr;
+ /* Align it to 8 bytes */
+ __u8 pad[6];
+ __u64 serror_esr;
+ } exception;
+ __u32 reserved[12];
+};
+
/* If you need to interpret the index values, here is the key: */
#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
#define KVM_REG_ARM_COPROC_SHIFT 16
@@ -261,6 +261,29 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
return -EINVAL;
}
+
+int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events)
+{
+ events->exception.serror_pending = !!(*vcpu_hcr(vcpu) & HCR_VA);
+
+ return 0;
+}
+
+int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events)
+{
+ bool serror_pending = events->exception.serror_pending;
+ bool has_esr = events->exception.serror_has_esr;
+
+ if (serror_pending && has_esr)
+ return -EINVAL;
+ else if (serror_pending)
+ kvm_inject_vabt(vcpu);
+
+ return 0;
+}
+
int __attribute_const__ kvm_target_cpu(void)
{
switch (read_cpuid_part()) {
@@ -1044,7 +1044,6 @@ static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
return ret;
}
-#ifdef __KVM_HAVE_VCPU_EVENTS /* temporary: until 32bit is wired up */
static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
@@ -1070,7 +1069,6 @@ static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
return __kvm_arm_vcpu_set_events(vcpu, events);
}
-#endif /* __KVM_HAVE_VCPU_EVENTS */
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
@@ -1152,7 +1150,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = kvm_arm_vcpu_has_attr(vcpu, &attr);
break;
}
-#ifdef __KVM_HAVE_VCPU_EVENTS
case KVM_GET_VCPU_EVENTS: {
struct kvm_vcpu_events events;
@@ -1172,7 +1169,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
return kvm_arm_vcpu_set_events(vcpu, &events);
}
-#endif
default:
r = -EINVAL;
}