@@ -165,7 +165,9 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
*/
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
{
- if (vcpu_el1_is_32bit(vcpu))
+ if (unlikely(vcpu_is_rec(vcpu)))
+ vcpu->arch.rec.run->enter.flags |= REC_ENTER_FLAG_INJECT_SEA;
+ else if (vcpu_el1_is_32bit(vcpu))
inject_abt32(vcpu, false, addr);
else
inject_abt64(vcpu, false, addr);
@@ -6,6 +6,7 @@
#include <linux/kvm_host.h>
#include <asm/kvm_emulate.h>
+#include <asm/rmi_smc.h>
#include <trace/events/kvm.h>
#include "trace.h"
@@ -136,14 +137,21 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu)
trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
&data);
data = vcpu_data_host_to_guest(vcpu, data, len);
- vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), data);
+
+ if (vcpu_is_rec(vcpu))
+ vcpu->arch.rec.run->enter.gprs[0] = data;
+ else
+ vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), data);
}
/*
* The MMIO instruction is emulated and should not be re-executed
* in the guest.
*/
- kvm_incr_pc(vcpu);
+ if (vcpu_is_rec(vcpu))
+ vcpu->arch.rec.run->enter.flags |= REC_ENTER_FLAG_EMULATED_MMIO;
+ else
+ kvm_incr_pc(vcpu);
return 1;
}
@@ -162,14 +170,14 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
* No valid syndrome? Ask userspace for help if it has
* volunteered to do so, and bail out otherwise.
*
- * In the protected VM case, there isn't much userspace can do
+ * In the protected/realm VM case, there isn't much userspace can do
* though, so directly deliver an exception to the guest.
*/
if (!kvm_vcpu_dabt_isvalid(vcpu)) {
trace_kvm_mmio_nisv(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
kvm_vcpu_get_hfar(vcpu), fault_ipa);
- if (vcpu_is_protected(vcpu)) {
+ if (vcpu_is_protected(vcpu) || vcpu_is_rec(vcpu)) {
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
return 1;
}
@@ -25,6 +25,20 @@ static int rec_exit_reason_notimpl(struct kvm_vcpu *vcpu)
static int rec_exit_sync_dabt(struct kvm_vcpu *vcpu)
{
+ struct realm_rec *rec = &vcpu->arch.rec;
+
+ /*
+ * In the case of a write, copy over gprs[0] to the target GPR,
+ * preparing to handle MMIO write fault. The content to be written has
+ * been saved to gprs[0] by the RMM (even if another register was used
+ * by the guest). In the case of normal memory access this is redundant
+ * (the guest will replay the instruction), but the overhead is
+ * minimal.
+ */
+ if (kvm_vcpu_dabt_iswrite(vcpu) && kvm_vcpu_dabt_isvalid(vcpu))
+ vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu),
+ rec->run->exit.gprs[0]);
+
return kvm_handle_guest_abort(vcpu);
}