@@ -47,6 +47,7 @@ void kvm_skip_instr32(struct kvm_vcpu *vcpu);
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
void kvm_inject_vabt(struct kvm_vcpu *vcpu);
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
+void kvm_inject_dabt_excl_atomic(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
@@ -171,6 +171,41 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
inject_abt64(vcpu, false, addr);
}
+/**
+ * kvm_inject_dabt_excl_atomic - inject a data abort for unsupported exclusive
+ * or atomic access
+ * @vcpu: The VCPU to receive the data abort
+ * @addr: The address to report in the DFAR
+ *
+ * It is assumed that this code is called from the VCPU thread and that the
+ * VCPU therefore is not currently executing guest code.
+ */
+void kvm_inject_dabt_excl_atomic(struct kvm_vcpu *vcpu, unsigned long addr)
+{
+ unsigned long cpsr = *vcpu_cpsr(vcpu);
+ u64 esr = 0;
+
+ pend_sync_exception(vcpu);
+
+ if (kvm_vcpu_trap_il_is32bit(vcpu))
+ esr |= ESR_ELx_IL;
+
+ if ((cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
+ esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
+ else
+ esr |= ESR_ELx_EC_DABT_CUR << ESR_ELx_EC_SHIFT;
+
+ esr |= ESR_ELx_FSC_EXCL_ATOMIC;
+
+ if (match_target_el(vcpu, unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC))) {
+ vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
+ vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
+ } else {
+ vcpu_write_sys_reg(vcpu, addr, FAR_EL2);
+ vcpu_write_sys_reg(vcpu, esr, ESR_EL2);
+ }
+}
+
/**
* kvm_inject_pabt - inject a prefetch abort into the guest
* @vcpu: The VCPU to receive the prefetch abort
@@ -1658,6 +1658,25 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (exec_fault && device)
return -ENOEXEC;
+ if (esr_fsc_is_excl_atomic_fault(kvm_vcpu_get_esr(vcpu))) {
+ /*
+ * Target address is normal memory on the Host. We come here
+ * because:
+ * 1) Guest map it as device memory and perform LS64 operations
+ * 2) VMM report it as device memory mistakenly
+ * Warn the VMM and inject the DABT back to the guest.
+ */
+ if (!device)
+ kvm_err("memory attributes maybe incorrect for hva 0x%lx\n", hva);
+
+ /*
+ * Otherwise it's a piece of device memory on the Host.
+ * Inject the DABT back to the guest since the mapping
+ * is wrong.
+ */
+ kvm_inject_dabt_excl_atomic(vcpu, kvm_vcpu_get_hfar(vcpu));
+ }
+
/*
* Potentially reduce shadow S2 permissions to match the guest's own
* S2. For exec faults, we'd only reach this point if the guest
@@ -1836,7 +1855,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
/* Check the stage-2 fault is trans. fault or write fault */
if (!esr_fsc_is_translation_fault(esr) &&
!esr_fsc_is_permission_fault(esr) &&
- !esr_fsc_is_access_flag_fault(esr)) {
+ !esr_fsc_is_access_flag_fault(esr) &&
+ !esr_fsc_is_excl_atomic_fault(esr)) {
kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
kvm_vcpu_trap_get_class(vcpu),
(unsigned long)kvm_vcpu_trap_get_fault(vcpu),
@@ -1919,6 +1939,21 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
goto out_unlock;
}
+ /*
+ * If instructions of FEAT_{LS64, LS64_V} operated on
+ * unsupported memory regions, a DABT for unsupported
+ * Exclusive or atomic access is generated. It's
+ * implementation defined whether the exception will
+ * be taken to, a stage-1 DABT or the final enabled
+ * stage of translation (stage-2 in this case as we
+ * hit here). Inject a DABT to the guest to handle it
+ * if it's implemented as a stage-2 DABT.
+ */
+ if (esr_fsc_is_excl_atomic_fault(esr)) {
+ kvm_inject_dabt_excl_atomic(vcpu, kvm_vcpu_get_hfar(vcpu));
+ return 1;
+ }
+
/*
* The IPA is reported as [MAX:12], so we need to
* complement it with the bottom 12 bits from the