From patchwork Fri Jun 3 15:04:05 2011 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Christoffer Dall X-Patchwork-Id: 847442 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter1.kernel.org (8.14.4/8.14.3) with ESMTP id p53F4KOE023978 for ; Fri, 3 Jun 2011 15:04:20 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1755404Ab1FCPER (ORCPT ); Fri, 3 Jun 2011 11:04:17 -0400 Received: from mail-wy0-f174.google.com ([74.125.82.174]:37774 "EHLO mail-wy0-f174.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1755387Ab1FCPEQ (ORCPT ); Fri, 3 Jun 2011 11:04:16 -0400 Received: by mail-wy0-f174.google.com with SMTP id 21so1457214wya.19 for ; Fri, 03 Jun 2011 08:04:16 -0700 (PDT) Received: by 10.216.36.136 with SMTP id w8mr61567wea.38.1307113455822; Fri, 03 Jun 2011 08:04:15 -0700 (PDT) Received: from [127.0.0.1] (host198-39-dynamic.245-95-r.retail.telecomitalia.it [95.245.39.198]) by mx.google.com with ESMTPS id o75sm892604weq.40.2011.06.03.08.04.13 (version=TLSv1/SSLv3 cipher=OTHER); Fri, 03 Jun 2011 08:04:15 -0700 (PDT) Subject: [PATCH v3 6/8] ARM: KVM: Emulation framework and CP15 emulation To: catalin.marinas@arm.com, android-virt@lists.cs.columbia.edu From: Christoffer Dall Cc: s.raho@virtualopensystems.com, a.motakis@virtualopensystems.com, c.dall@virtualopensystems.com, kvm@vger.kernel.org, a.costa@virtualopensystems.com Date: Fri, 03 Jun 2011 17:04:05 +0200 Message-ID: <20110603150405.17011.12946.stgit@ubuntu> In-Reply-To: <20110603150318.17011.82777.stgit@ubuntu> References: <20110603150318.17011.82777.stgit@ubuntu> User-Agent: StGit/0.15 MIME-Version: 1.0 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.6 (demeter1.kernel.org [140.211.167.41]); Fri, 03 Jun 2011 15:04:20 +0000 (UTC) A few stub functions and support for emulating access to memory remap registers PRRR and NMRR. Provides some framework for handling trapped CP15 accesses in general. --- arch/arm/include/asm/kvm_emulate.h | 7 ++ arch/arm/kvm/arm.c | 73 +++++++++++++++- arch/arm/kvm/arm_emulate.c | 166 ++++++++++++++++++++++++++++++++++++ 3 files changed, 241 insertions(+), 5 deletions(-) -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 8eed752..6483b2a 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -40,6 +40,13 @@ static inline unsigned char vcpu_mode(struct kvm_vcpu *vcpu) return modes_table[vcpu->arch.regs.cpsr & 0xf]; } +int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_cp15_access(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run); + /* * Return the SPSR for the specified mode of the virtual CPU. */ diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 1a9f168..abed683 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -33,6 +33,7 @@ #include #include #include +#include #include "trace.h" @@ -297,16 +298,78 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) return 0; } +static inline int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + unsigned long hsr_ec; + + hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT; + switch (hsr_ec) { + case HSR_EC_WFI: + return kvm_handle_wfi(vcpu, run); + case HSR_EC_CP15_32: + case HSR_EC_CP15_64: + return kvm_handle_cp15_access(vcpu, run); + case HSR_EC_CP14_MR: + return kvm_handle_cp14_access(vcpu, run); + case HSR_EC_CP14_LS: + return kvm_handle_cp14_load_store(vcpu, run); + case HSR_EC_CP14_64: + return kvm_handle_cp14_access(vcpu, run); + case HSR_EC_CP_0_13: + return kvm_handle_cp_0_13_access(vcpu, run); + case HSR_EC_CP10_ID: + return kvm_handle_cp10_id(vcpu, run); + case HSR_EC_SVC_HYP: + /* SVC called from Hyp mode should never get here */ + kvm_msg("SVC called from Hyp mode shouldn't go here"); + BUG(); + case HSR_EC_HVC: + kvm_err(-EINVAL, "Guest called HVC, not supported"); + return -EINVAL; + case HSR_EC_IABT: + case HSR_EC_DABT: + return kvm_handle_guest_abort(vcpu, run); + case HSR_EC_IABT_HYP: + case HSR_EC_DABT_HYP: + /* The hypervisor should never cause aborts */ + kvm_msg("The hypervisor itself shouldn't cause aborts"); + BUG(); + default: + kvm_msg("Unkown exception class: %08x (%08x)", hsr_ec, + vcpu->arch.hsr); + BUG(); + } + + return 0; +} + int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) { unsigned long flags; + int ret; - local_irq_save(flags); - __kvm_vcpu_run(vcpu); - local_irq_restore(flags); + for (;;) { + local_irq_save(flags); + ret = __kvm_vcpu_run(vcpu); + local_irq_restore(flags); - KVMARM_NOT_IMPLEMENTED(); - return -EINVAL; + if (ret == ARM_EXCEPTION_IRQ) + continue; + + if (ret != ARM_EXCEPTION_HVC) { + ret = -EINVAL; + kvm_err(ret, "Unsupported exception type"); + break; + } + + ret = handle_exit(vcpu, run); + if (ret) { + kvm_err(ret, "Error in handle_exit"); + break; + } + } + + return ret; } static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, diff --git a/arch/arm/kvm/arm_emulate.c b/arch/arm/kvm/arm_emulate.c index 3dd4f08..f370d87 100644 --- a/arch/arm/kvm/arm_emulate.c +++ b/arch/arm/kvm/arm_emulate.c @@ -14,8 +14,33 @@ * */ +#include +#include +#include #include +#include "trace.h" + +struct coproc_params { + unsigned long CRm; + unsigned long CRn; + unsigned long Op1; + unsigned long Op2; + unsigned long Rt1; + unsigned long Rt2; + bool is_64bit; + bool is_write; +}; + +#define CP15_OP(_vcpu, _params, _cp15_reg) \ +do { \ + if (_params->is_write) \ + _vcpu->arch.cp15._cp15_reg = vcpu_reg(_vcpu, _params->Rt1); \ + else \ + vcpu_reg(_vcpu, _params->Rt1) = _vcpu->arch.cp15._cp15_reg; \ +} while (0); + + /* * Return a pointer to the register number valid in the specified mode of * the virtual CPU. @@ -68,3 +93,144 @@ u32* kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode) BUG(); return NULL; } + +static inline void print_cp_instr(struct coproc_params *p) +{ + if (p->is_64bit) { + kvm_msg(" %s\tp15, %u, r%u, r%u, c%u", + (p->is_write) ? "mcrr" : "mrrc", + p->Op1, p->Rt1, p->Rt2, p->CRm); + } else { + kvm_msg(" %s\tp15, %u, r%u, c%u, c%u, %u", + (p->is_write) ? "mcr" : "mrc", + p->Op1, p->Rt1, p->CRn, p->CRm, p->Op2); + } +} + +int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + KVMARM_NOT_IMPLEMENTED(); + return -EINVAL; +} + +int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + KVMARM_NOT_IMPLEMENTED(); + return -EINVAL; +} + +int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + KVMARM_NOT_IMPLEMENTED(); + return -EINVAL; +} + +int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + KVMARM_NOT_IMPLEMENTED(); + return -EINVAL; +} + +/** + * emulate_cp15_cp15_access -- emulates cp15 accesses for CRn == 10 + * @vcpu: The VCPU pointer + * @p: Thr coprocessor parameters struct pointer holding trap inst. details + * + * This funciton may not need to exist - if we can ignore guest attempts to + * tamper with TLB lockdowns then it should be enough to store/restore the + * host/guest PRRR and NMRR memory remap registers and allow guest direct access + * to these registers. + */ +static int emulate_cp15_cp10_access(struct kvm_vcpu *vcpu, + struct coproc_params *p) +{ + BUG_ON(p->CRn != 10); + BUG_ON(p->is_64bit); + + if ((p->CRm == 0 || p->CRm == 1 || p->CRm == 4 || p->CRm == 8) && + (p->Op2 <= 7)) { + /* TLB Lockdown operations - ignored */ + return 0; + } + + if (p->CRm == 2 && p->Op2 == 0) { + CP15_OP(vcpu, p, c10_PRRR); + return 0; + } + + if (p->CRm == 2 && p->Op2 == 1) { + CP15_OP(vcpu, p, c10_NMRR); + return 0; + } + + return -EINVAL; +} + +/** + * kvm_handle_cp15_access -- handles a trap on a guest CP15 access + * @vcpu: The VCPU pointer + * @run: The kvm_run struct + * + * Investigates the CRn/CRm and wether this was mcr/mrc or mcrr/mrrc and either + * simply errors out if the operation was not supported (should maybe raise + * undefined to guest instead?) and otherwise emulated access. + */ +int kvm_handle_cp15_access(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + unsigned long hsr_ec, instr_len; + struct coproc_params params; + int ret = 0; + + hsr_ec = vcpu->arch.hsr >> HSR_EC_SHIFT; + params.CRm = (vcpu->arch.hsr >> 1) & 0xf; + params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; + BUG_ON(params.Rt1 >= 15); + params.is_write = ((vcpu->arch.hsr & 1) == 0); + params.is_64bit = (hsr_ec == HSR_EC_CP15_64); + + if (params.is_64bit) { + /* mrrc, mccr operation */ + params.Op1 = (vcpu->arch.hsr >> 16) & 0xf; + params.Op2 = 0; + params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf; + BUG_ON(params.Rt2 >= 15); + params.CRn = 0; + } else { + params.CRn = (vcpu->arch.hsr >> 10) & 0xf; + params.Op1 = (vcpu->arch.hsr >> 14) & 0x7; + params.Op2 = (vcpu->arch.hsr >> 17) & 0x7; + params.Rt2 = 0; + } + + /* So far no mrrc/mcrr accesses are emulated */ + if (params.is_64bit) + goto unsupp_err_out; + + switch (params.CRn) { + case 10: + ret = emulate_cp15_cp10_access(vcpu, ¶ms); + break; + default: + ret = -EINVAL; + break; + } + + if (ret) + goto unsupp_err_out; + + /* Skip instruction, since it was emulated */ + instr_len = ((vcpu->arch.hsr >> 25) & 1) ? 4 : 2; + vcpu_reg(vcpu, 15) += instr_len; + + return ret; +unsupp_err_out: + kvm_msg("Unsupported guest CP15 access:"); + print_cp_instr(¶ms); + return -EINVAL; +} + +int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + KVMARM_NOT_IMPLEMENTED(); + return -EINVAL; +}