@@ -40,6 +40,13 @@ static inline unsigned char vcpu_mode(struct kvm_vcpu *vcpu)
return modes_table[vcpu->arch.regs.cpsr & 0xf];
}
+int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp15_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run);
+
/*
* Return the SPSR for the specified mode of the virtual CPU.
*/
@@ -33,6 +33,7 @@
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
+#include <asm/kvm_emulate.h>
#include "trace.h"
@@ -297,16 +298,78 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
return 0;
}
+static inline int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ unsigned long hsr_ec;
+
+ hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT;
+ switch (hsr_ec) {
+ case HSR_EC_WFI:
+ return kvm_handle_wfi(vcpu, run);
+ case HSR_EC_CP15_32:
+ case HSR_EC_CP15_64:
+ return kvm_handle_cp15_access(vcpu, run);
+ case HSR_EC_CP14_MR:
+ return kvm_handle_cp14_access(vcpu, run);
+ case HSR_EC_CP14_LS:
+ return kvm_handle_cp14_load_store(vcpu, run);
+ case HSR_EC_CP14_64:
+ return kvm_handle_cp14_access(vcpu, run);
+ case HSR_EC_CP_0_13:
+ return kvm_handle_cp_0_13_access(vcpu, run);
+ case HSR_EC_CP10_ID:
+ return kvm_handle_cp10_id(vcpu, run);
+ case HSR_EC_SVC_HYP:
+ /* SVC called from Hyp mode should never get here */
+ kvm_msg("SVC called from Hyp mode shouldn't go here");
+ BUG();
+ case HSR_EC_HVC:
+ kvm_err(-EINVAL, "Guest called HVC, not supported");
+ return -EINVAL;
+ case HSR_EC_IABT:
+ case HSR_EC_DABT:
+ return kvm_handle_guest_abort(vcpu, run);
+ case HSR_EC_IABT_HYP:
+ case HSR_EC_DABT_HYP:
+ /* The hypervisor should never cause aborts */
+ kvm_msg("The hypervisor itself shouldn't cause aborts");
+ BUG();
+ default:
+ kvm_msg("Unkown exception class: %08x (%08x)", hsr_ec,
+ vcpu->arch.hsr);
+ BUG();
+ }
+
+ return 0;
+}
+
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
unsigned long flags;
+ int ret;
- local_irq_save(flags);
- __kvm_vcpu_run(vcpu);
- local_irq_restore(flags);
+ for (;;) {
+ local_irq_save(flags);
+ ret = __kvm_vcpu_run(vcpu);
+ local_irq_restore(flags);
- KVMARM_NOT_IMPLEMENTED();
- return -EINVAL;
+ if (ret == ARM_EXCEPTION_IRQ)
+ continue;
+
+ if (ret != ARM_EXCEPTION_HVC) {
+ ret = -EINVAL;
+ kvm_err(ret, "Unsupported exception type");
+ break;
+ }
+
+ ret = handle_exit(vcpu, run);
+ if (ret) {
+ kvm_err(ret, "Error in handle_exit");
+ break;
+ }
+ }
+
+ return ret;
}
static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
@@ -14,8 +14,33 @@
*
*/
+#include <linux/mm.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_host.h>
#include <asm/kvm_emulate.h>
+#include "trace.h"
+
+struct coproc_params {
+ unsigned long CRm;
+ unsigned long CRn;
+ unsigned long Op1;
+ unsigned long Op2;
+ unsigned long Rt1;
+ unsigned long Rt2;
+ bool is_64bit;
+ bool is_write;
+};
+
+#define CP15_OP(_vcpu, _params, _cp15_reg) \
+do { \
+ if (_params->is_write) \
+ _vcpu->arch.cp15._cp15_reg = vcpu_reg(_vcpu, _params->Rt1); \
+ else \
+ vcpu_reg(_vcpu, _params->Rt1) = _vcpu->arch.cp15._cp15_reg; \
+} while (0);
+
+
/*
* Return a pointer to the register number valid in the specified mode of
* the virtual CPU.
@@ -68,3 +93,144 @@ u32* kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode)
BUG();
return NULL;
}
+
+static inline void print_cp_instr(struct coproc_params *p)
+{
+ if (p->is_64bit) {
+ kvm_msg(" %s\tp15, %u, r%u, r%u, c%u",
+ (p->is_write) ? "mcrr" : "mrrc",
+ p->Op1, p->Rt1, p->Rt2, p->CRm);
+ } else {
+ kvm_msg(" %s\tp15, %u, r%u, c%u, c%u, %u",
+ (p->is_write) ? "mcr" : "mrc",
+ p->Op1, p->Rt1, p->CRn, p->CRm, p->Op2);
+ }
+}
+
+int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ KVMARM_NOT_IMPLEMENTED();
+ return -EINVAL;
+}
+
+int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ KVMARM_NOT_IMPLEMENTED();
+ return -EINVAL;
+}
+
+int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ KVMARM_NOT_IMPLEMENTED();
+ return -EINVAL;
+}
+
+int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ KVMARM_NOT_IMPLEMENTED();
+ return -EINVAL;
+}
+
+/**
+ * emulate_cp15_cp15_access -- emulates cp15 accesses for CRn == 10
+ * @vcpu: The VCPU pointer
+ * @p: Thr coprocessor parameters struct pointer holding trap inst. details
+ *
+ * This funciton may not need to exist - if we can ignore guest attempts to
+ * tamper with TLB lockdowns then it should be enough to store/restore the
+ * host/guest PRRR and NMRR memory remap registers and allow guest direct access
+ * to these registers.
+ */
+static int emulate_cp15_cp10_access(struct kvm_vcpu *vcpu,
+ struct coproc_params *p)
+{
+ BUG_ON(p->CRn != 10);
+ BUG_ON(p->is_64bit);
+
+ if ((p->CRm == 0 || p->CRm == 1 || p->CRm == 4 || p->CRm == 8) &&
+ (p->Op2 <= 7)) {
+ /* TLB Lockdown operations - ignored */
+ return 0;
+ }
+
+ if (p->CRm == 2 && p->Op2 == 0) {
+ CP15_OP(vcpu, p, c10_PRRR);
+ return 0;
+ }
+
+ if (p->CRm == 2 && p->Op2 == 1) {
+ CP15_OP(vcpu, p, c10_NMRR);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * kvm_handle_cp15_access -- handles a trap on a guest CP15 access
+ * @vcpu: The VCPU pointer
+ * @run: The kvm_run struct
+ *
+ * Investigates the CRn/CRm and wether this was mcr/mrc or mcrr/mrrc and either
+ * simply errors out if the operation was not supported (should maybe raise
+ * undefined to guest instead?) and otherwise emulated access.
+ */
+int kvm_handle_cp15_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ unsigned long hsr_ec, instr_len;
+ struct coproc_params params;
+ int ret = 0;
+
+ hsr_ec = vcpu->arch.hsr >> HSR_EC_SHIFT;
+ params.CRm = (vcpu->arch.hsr >> 1) & 0xf;
+ params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf;
+ BUG_ON(params.Rt1 >= 15);
+ params.is_write = ((vcpu->arch.hsr & 1) == 0);
+ params.is_64bit = (hsr_ec == HSR_EC_CP15_64);
+
+ if (params.is_64bit) {
+ /* mrrc, mccr operation */
+ params.Op1 = (vcpu->arch.hsr >> 16) & 0xf;
+ params.Op2 = 0;
+ params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf;
+ BUG_ON(params.Rt2 >= 15);
+ params.CRn = 0;
+ } else {
+ params.CRn = (vcpu->arch.hsr >> 10) & 0xf;
+ params.Op1 = (vcpu->arch.hsr >> 14) & 0x7;
+ params.Op2 = (vcpu->arch.hsr >> 17) & 0x7;
+ params.Rt2 = 0;
+ }
+
+ /* So far no mrrc/mcrr accesses are emulated */
+ if (params.is_64bit)
+ goto unsupp_err_out;
+
+ switch (params.CRn) {
+ case 10:
+ ret = emulate_cp15_cp10_access(vcpu, ¶ms);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ goto unsupp_err_out;
+
+ /* Skip instruction, since it was emulated */
+ instr_len = ((vcpu->arch.hsr >> 25) & 1) ? 4 : 2;
+ vcpu_reg(vcpu, 15) += instr_len;
+
+ return ret;
+unsupp_err_out:
+ kvm_msg("Unsupported guest CP15 access:");
+ print_cp_instr(¶ms);
+ return -EINVAL;
+}
+
+int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ KVMARM_NOT_IMPLEMENTED();
+ return -EINVAL;
+}