From patchwork Mon Jun 3 20:54:26 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mihai Caraman X-Patchwork-Id: 2655111 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork1.kernel.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by patchwork1.kernel.org (Postfix) with ESMTP id 67F3840077 for ; Mon, 3 Jun 2013 20:54:59 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1758548Ab3FCUyy (ORCPT ); Mon, 3 Jun 2013 16:54:54 -0400 Received: from co9ehsobe001.messaging.microsoft.com ([207.46.163.24]:51641 "EHLO co9outboundpool.messaging.microsoft.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1758497Ab3FCUyq (ORCPT ); Mon, 3 Jun 2013 16:54:46 -0400 Received: from mail209-co9-R.bigfish.com (10.236.132.228) by CO9EHSOBE013.bigfish.com (10.236.130.76) with Microsoft SMTP Server id 14.1.225.23; Mon, 3 Jun 2013 20:54:46 +0000 Received: from mail209-co9 (localhost [127.0.0.1]) by mail209-co9-R.bigfish.com (Postfix) with ESMTP id 497C1440116; Mon, 3 Jun 2013 20:54:46 +0000 (UTC) X-Forefront-Antispam-Report: CIP:70.37.183.190; KIP:(null); UIP:(null); IPV:NLI; H:mail.freescale.net; RD:none; EFVD:NLI X-SpamScore: 15 X-BigFish: VS15(z52aescb8k551bizzz1f42h1ee6h1de0h1fdah1202h1e76h1d1ah1d2ah1fc6hzz8275bhz2dh2a8h668h839hd24he5bhf0ah1288h12a5h12a9h12bdh12e5h137ah139eh13b6h1441h1504h1537h162dh1631h1758h1898h18e1h1946h19b5h1ad9h1b0ah1d0ch1d2eh1d3fh1dfeh1dffh1155h) Received: from mail209-co9 (localhost.localdomain [127.0.0.1]) by mail209-co9 (MessageSwitch) id 1370292884120758_6533; Mon, 3 Jun 2013 20:54:44 +0000 (UTC) Received: from CO9EHSMHS004.bigfish.com (unknown [10.236.132.230]) by mail209-co9.bigfish.com (Postfix) with ESMTP id 1B613B00047; Mon, 3 Jun 2013 20:54:44 +0000 (UTC) Received: from mail.freescale.net (70.37.183.190) by CO9EHSMHS004.bigfish.com (10.236.130.14) with Microsoft SMTP Server (TLS) id 14.1.225.23; Mon, 3 Jun 2013 20:54:38 +0000 Received: from az84smr01.freescale.net (10.64.34.197) by 039-SN1MMR1-005.039d.mgd.msft.net (10.84.1.17) with Microsoft SMTP Server (TLS) id 14.2.328.11; Mon, 3 Jun 2013 20:55:47 +0000 Received: from mcaraman-VirtualBox.ea.freescale.net (mcaraman-VirtualBox.ea.freescale.net [10.171.73.14]) by az84smr01.freescale.net (8.14.3/8.14.0) with ESMTP id r53KsSWq030131; Mon, 3 Jun 2013 13:54:35 -0700 From: Mihai Caraman To: CC: , , Mihai Caraman Subject: [RFC PATCH 4/6] KVM: PPC: Book3E: Add AltiVec support Date: Mon, 3 Jun 2013 23:54:26 +0300 Message-ID: <1370292868-2697-5-git-send-email-mihai.caraman@freescale.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1370292868-2697-1-git-send-email-mihai.caraman@freescale.com> References: <1370292868-2697-1-git-send-email-mihai.caraman@freescale.com> MIME-Version: 1.0 X-OriginatorOrg: freescale.com Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org KVM Book3E FPU support gracefully reuse host infrastructure so we do the same for AltiVec. To keep AltiVec lazy call kvmppc_load_guest_altivec() just when returning to guest instead of each sched in. Signed-off-by: Mihai Caraman --- arch/powerpc/kvm/booke.c | 74 +++++++++++++++++++++++++++++++++++++++++++- arch/powerpc/kvm/e500mc.c | 8 +++++ 2 files changed, 80 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index c08b04b..01eb635 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -134,6 +134,23 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu) } /* + * Simulate AltiVec unavailable fault to load guest state + * from thread to AltiVec unit. + * It requires to be called with preemption disabled. + */ +static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu) +{ +#ifdef CONFIG_ALTIVEC + if (cpu_has_feature(CPU_FTR_ALTIVEC)) { + if (!(current->thread.regs->msr & MSR_VEC)) { + load_up_altivec(NULL); + current->thread.regs->msr |= MSR_VEC; + } + } +#endif +} + +/* * Helper function for "full" MSR writes. No need to call this if only * EE/CE/ME/DE/RI are changing. */ @@ -661,6 +678,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) u64 fpr[32]; #endif +#ifdef CONFIG_ALTIVEC + vector128 vr[32]; + vector128 vscr; + int used_vr = 0; +#endif + if (!vcpu->arch.sane) { kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; return -EINVAL; @@ -699,6 +722,22 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) kvmppc_load_guest_fp(vcpu); #endif +#ifdef CONFIG_ALTIVEC + if (cpu_has_feature(CPU_FTR_ALTIVEC)) { + /* Save userspace VEC state in stack */ + enable_kernel_altivec(); + memcpy(vr, current->thread.vr, sizeof(current->thread.vr)); + vscr = current->thread.vscr; + used_vr = current->thread.used_vr; + + /* Restore guest VEC state to thread */ + memcpy(current->thread.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); + current->thread.vscr = vcpu->arch.vscr; + + kvmppc_load_guest_altivec(vcpu); + } +#endif + ret = __kvmppc_vcpu_run(kvm_run, vcpu); /* No need for kvm_guest_exit. It's done in handle_exit. @@ -719,6 +758,23 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) current->thread.fpexc_mode = fpexc_mode; #endif +#ifdef CONFIG_ALTIVEC + if (cpu_has_feature(CPU_FTR_ALTIVEC)) { + /* Save AltiVec state to thread */ + if (current->thread.regs->msr & MSR_VEC) + giveup_altivec(current); + + /* Save guest state */ + memcpy(vcpu->arch.vr, current->thread.vr, sizeof(vcpu->arch.vr)); + vcpu->arch.vscr = current->thread.vscr; + + /* Restore userspace state */ + memcpy(current->thread.vr, vr, sizeof(current->thread.vr)); + current->thread.vscr = vscr; + current->thread.used_vr = used_vr; + } +#endif + out: vcpu->mode = OUTSIDE_GUEST_MODE; return ret; @@ -822,6 +878,19 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu, } } +/* + * Always returns true is AltiVec unit is present, see + * kvmppc_core_check_processor_compat(). + */ +static inline bool kvmppc_supports_altivec(void) +{ +#ifdef CONFIG_ALTIVEC + if (cpu_has_feature(CPU_FTR_ALTIVEC)) + return true; +#endif + return false; +} + static inline bool kvmppc_supports_spe(void) { #ifdef CONFIG_SPE @@ -947,7 +1016,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, */ bool handled = false; - if (kvmppc_supports_spe()) { + if (kvmppc_supports_altivec() || kvmppc_supports_spe()) { #ifdef CONFIG_SPE if (cpu_has_feature(CPU_FTR_SPE)) if (vcpu->arch.shared->msr & MSR_SPE) { @@ -976,7 +1045,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, * The interrupt is shared, KVM support for the featured unit * is detected at run-time. */ - if (kvmppc_supports_spe()) { + if (kvmppc_supports_altivec() || kvmppc_supports_spe()) { kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST); r = RESUME_GUEST; @@ -1188,6 +1257,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); } else { kvmppc_lazy_ee_enable(); + kvmppc_load_guest_altivec(vcpu); } } diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index c3bdc0a..9d7f38e 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c @@ -172,8 +172,16 @@ int kvmppc_core_check_processor_compat(void) r = 0; else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0) r = 0; +#ifdef CONFIG_ALTIVEC + /* + * Since guests have the priviledge to enable AltiVec, we need AltiVec + * support in the host to save/restore their context. + * Don't use CPU_FTR_ALTIVEC to identify cores with AltiVec unit + * because it's cleared in the absence of CONFIG_ALTIVEC! + */ else if (strcmp(cur_cpu_spec->cpu_name, "e6500") == 0) r = 0; +#endif else r = -ENOTSUPP;