diff mbox

[RFC,4/6] KVM: PPC: Book3E: Add AltiVec support

Message ID 1370292868-2697-5-git-send-email-mihai.caraman@freescale.com (mailing list archive)
State New, archived
Headers show

Commit Message

Mihai Caraman June 3, 2013, 8:54 p.m. UTC
KVM Book3E FPU support gracefully reuse host infrastructure so we do the
same for AltiVec. To keep AltiVec lazy call kvmppc_load_guest_altivec()
just when returning to guest instead of each sched in.

Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
---
 arch/powerpc/kvm/booke.c  |   74 +++++++++++++++++++++++++++++++++++++++++++-
 arch/powerpc/kvm/e500mc.c |    8 +++++
 2 files changed, 80 insertions(+), 2 deletions(-)

Comments

Caraman Mihai Claudiu-B02008 June 5, 2013, 9:23 a.m. UTC | #1
> > + * Simulate AltiVec unavailable fault to load guest state
> > + * from thread to AltiVec unit.
> > + * It requires to be called with preemption disabled.
> > + */
> > +static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
> > +{
> > +#ifdef CONFIG_ALTIVEC
> > +	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
> > +		if (!(current->thread.regs->msr & MSR_VEC)) {
> > +			load_up_altivec(NULL);
> > +			current->thread.regs->msr |= MSR_VEC;
> > +		}
> > +	}
> > +#endif
> 
> Why not use kvmppc_supports_altivec()?  In fact, there's nothing
> KVM-specific about these functions...

I will do so, I had this code before kvmppc_supports_altivec() :)

> >  static inline bool kvmppc_supports_spe(void)
> >  {
> >  #ifdef CONFIG_SPE
> > @@ -947,7 +1016,7 @@ int kvmppc_handle_exit(struct kvm_run *run,
> > struct kvm_vcpu *vcpu,
> >  		 */
> >  		bool handled = false;
> >
> > -		if (kvmppc_supports_spe()) {
> > +		if (kvmppc_supports_altivec() || kvmppc_supports_spe())
> > {
> >  #ifdef CONFIG_SPE
> >  			if (cpu_has_feature(CPU_FTR_SPE))
> >  				if (vcpu->arch.shared->msr & MSR_SPE) {
> 
> The distinction between how you're handling SPE and Altivec here
> doesn't really have anything to do with SPE versus Altivec -- it's
> PR-mode versus HV-mode.

I was mislead by MSR_SPE bit, we should rename it as MSR_SPV.

-Mike


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index c08b04b..01eb635 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -134,6 +134,23 @@  static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
 }
 
 /*
+ * Simulate AltiVec unavailable fault to load guest state
+ * from thread to AltiVec unit.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_ALTIVEC
+	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
+		if (!(current->thread.regs->msr & MSR_VEC)) {
+			load_up_altivec(NULL);
+			current->thread.regs->msr |= MSR_VEC;
+		}
+	}
+#endif
+}
+
+/*
  * Helper function for "full" MSR writes.  No need to call this if only
  * EE/CE/ME/DE/RI are changing.
  */
@@ -661,6 +678,12 @@  int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 	u64 fpr[32];
 #endif
 
+#ifdef CONFIG_ALTIVEC
+	vector128 vr[32];
+	vector128 vscr;
+	int used_vr = 0;
+#endif
+
 	if (!vcpu->arch.sane) {
 		kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
 		return -EINVAL;
@@ -699,6 +722,22 @@  int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 	kvmppc_load_guest_fp(vcpu);
 #endif
 
+#ifdef CONFIG_ALTIVEC
+	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
+		/* Save userspace VEC state in stack */
+		enable_kernel_altivec();
+		memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
+		vscr = current->thread.vscr;
+		used_vr = current->thread.used_vr;
+
+		/* Restore guest VEC state to thread */
+		memcpy(current->thread.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
+		current->thread.vscr = vcpu->arch.vscr;
+
+		kvmppc_load_guest_altivec(vcpu);
+	}
+#endif
+
 	ret = __kvmppc_vcpu_run(kvm_run, vcpu);
 
 	/* No need for kvm_guest_exit. It's done in handle_exit.
@@ -719,6 +758,23 @@  int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 	current->thread.fpexc_mode = fpexc_mode;
 #endif
 
+#ifdef CONFIG_ALTIVEC
+	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
+		/* Save AltiVec state to thread */
+		if (current->thread.regs->msr & MSR_VEC)
+			giveup_altivec(current);
+
+		/* Save guest state */
+		memcpy(vcpu->arch.vr, current->thread.vr, sizeof(vcpu->arch.vr));
+		vcpu->arch.vscr = current->thread.vscr;
+
+		/* Restore userspace state */
+		memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
+		current->thread.vscr = vscr;
+		current->thread.used_vr = used_vr;
+	}
+#endif
+
 out:
 	vcpu->mode = OUTSIDE_GUEST_MODE;
 	return ret;
@@ -822,6 +878,19 @@  static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
 	}
 }
 
+/*
+ * Always returns true is AltiVec unit is present, see
+ * kvmppc_core_check_processor_compat().
+ */
+static inline bool kvmppc_supports_altivec(void)
+{
+#ifdef CONFIG_ALTIVEC
+		if (cpu_has_feature(CPU_FTR_ALTIVEC))
+			return true;
+#endif
+	return false;
+}
+
 static inline bool kvmppc_supports_spe(void)
 {
 #ifdef CONFIG_SPE
@@ -947,7 +1016,7 @@  int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 		 */
 		bool handled = false;
 
-		if (kvmppc_supports_spe()) {
+		if (kvmppc_supports_altivec() || kvmppc_supports_spe()) {
 #ifdef CONFIG_SPE
 			if (cpu_has_feature(CPU_FTR_SPE))
 				if (vcpu->arch.shared->msr & MSR_SPE) {
@@ -976,7 +1045,7 @@  int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 		 * The interrupt is shared, KVM support for the featured unit
 		 * is detected at run-time.
 		 */
-		if (kvmppc_supports_spe()) {
+		if (kvmppc_supports_altivec() || kvmppc_supports_spe()) {
 			kvmppc_booke_queue_irqprio(vcpu,
 				BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST);
 			r = RESUME_GUEST;
@@ -1188,6 +1257,7 @@  int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 			r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
 		} else {
 			kvmppc_lazy_ee_enable();
+			kvmppc_load_guest_altivec(vcpu);
 		}
 	}
 
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index c3bdc0a..9d7f38e 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -172,8 +172,16 @@  int kvmppc_core_check_processor_compat(void)
 		r = 0;
 	else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0)
 		r = 0;
+#ifdef CONFIG_ALTIVEC
+	/*
+	 * Since guests have the priviledge to enable AltiVec, we need AltiVec
+	 * support in the host to save/restore their context.
+	 * Don't use CPU_FTR_ALTIVEC to identify cores with AltiVec unit
+	 * because it's cleared in the absence of CONFIG_ALTIVEC!
+	 */
 	else if (strcmp(cur_cpu_spec->cpu_name, "e6500") == 0)
 		r = 0;
+#endif
 	else
 		r = -ENOTSUPP;