@@ -13,6 +13,7 @@
/* Hyp Configuration Register (HCR) bits */
#define HCR_FWB (UL(1) << 46)
+#define HCR_NV (UL(1) << 42)
#define HCR_API (UL(1) << 41)
#define HCR_APK (UL(1) << 40)
#define HCR_TEA (UL(1) << 37)
@@ -11,5 +11,7 @@ static inline bool nested_virt_in_use(const struct kvm_vcpu *vcpu)
}
int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe);
+extern bool forward_traps(struct kvm_vcpu *vcpu, u64 control_bit);
+extern bool forward_nv_traps(struct kvm_vcpu *vcpu);
#endif /* __ARM64_KVM_NESTED_H */
@@ -24,6 +24,27 @@
#include "trace.h"
+bool forward_traps(struct kvm_vcpu *vcpu, u64 control_bit)
+{
+ bool control_bit_set;
+
+ if (!nested_virt_in_use(vcpu))
+ return false;
+
+ control_bit_set = __vcpu_sys_reg(vcpu, HCR_EL2) & control_bit;
+ if (!vcpu_mode_el2(vcpu) && control_bit_set) {
+ kvm_inject_nested_sync(vcpu, kvm_vcpu_get_hsr(vcpu));
+ return true;
+ }
+ return false;
+}
+
+bool forward_nv_traps(struct kvm_vcpu *vcpu)
+{
+ return forward_traps(vcpu, HCR_NV);
+}
+
+
/* This is borrowed from get_except_vector in inject_fault.c */
static u64 get_el2_except_vector(struct kvm_vcpu *vcpu,
enum exception_type type)
@@ -55,6 +76,13 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
u64 spsr, elr, mode;
bool direct_eret;
+ /*
+ * Forward this trap to the virtual EL2 if the virtual
+ * HCR_EL2.NV bit is set and this is coming from !EL2.
+ */
+ if (forward_nv_traps(vcpu))
+ return;
+
/*
* Going through the whole put/load motions is a waste of time
* if this is a VHE guest hypervisor returning to its own
@@ -65,6 +65,13 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int ret;
+ /*
+ * Forward this trapped smc instruction to the virtual EL2 if
+ * the guest has asked for it.
+ */
+ if (forward_traps(vcpu, HCR_TSC))
+ return 1;
+
/*
* "If an SMC instruction executed at Non-secure EL1 is
* trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
@@ -392,10 +392,19 @@ static u32 get_ccsidr(u32 csselr)
return ccsidr;
}
+static bool el12_reg(struct sys_reg_params *p)
+{
+ /* All *_EL12 registers have Op1=5. */
+ return (p->Op1 == 5);
+}
+
static bool access_rw(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
+ if (el12_reg(p) && forward_nv_traps(vcpu))
+ return false;
+
if (p->is_write)
vcpu_write_sys_reg(vcpu, p->regval, r->reg);
else
@@ -458,6 +467,9 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
u64 val;
int reg = r->reg;
+ if (el12_reg(p) && forward_nv_traps(vcpu))
+ return false;
+
BUG_ON(!vcpu_mode_el2(vcpu) && !p->is_write);
if (!p->is_write) {
@@ -1632,6 +1644,9 @@ static bool access_elr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
+ if (el12_reg(p) && forward_nv_traps(vcpu))
+ return false;
+
if (p->is_write)
vcpu->arch.ctxt.gp_regs.elr_el1 = p->regval;
else
@@ -1644,6 +1659,9 @@ static bool access_spsr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
+ if (el12_reg(p) && forward_nv_traps(vcpu))
+ return false;
+
if (p->is_write)
vcpu->arch.ctxt.gp_regs.spsr[KVM_SPSR_EL1] = p->regval;
else
@@ -1656,6 +1674,9 @@ static bool access_spsr_el2(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
+ if (el12_reg(p) && forward_nv_traps(vcpu))
+ return false;
+
if (p->is_write)
vcpu_write_sys_reg(vcpu, p->regval, SPSR_EL2);
else