diff mbox

[4/6] KVM: PPC: Book3E: Add AltiVec support

Message ID 1372855359-13452-5-git-send-email-mihai.caraman@freescale.com (mailing list archive)
State New, archived
Headers show

Commit Message

Mihai Caraman July 3, 2013, 12:42 p.m. UTC
Add KVM Book3E AltiVec support. KVM Book3E FPU support gracefully reuse host
infrastructure so follow the same approach for AltiVec.

Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
---
 arch/powerpc/kvm/booke.c |   72 ++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 70 insertions(+), 2 deletions(-)

Comments

Alexander Graf July 3, 2013, 3:17 p.m. UTC | #1
On 03.07.2013, at 14:42, Mihai Caraman wrote:

> Add KVM Book3E AltiVec support. KVM Book3E FPU support gracefully reuse host
> infrastructure so follow the same approach for AltiVec.
> 
> Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
> ---
> arch/powerpc/kvm/booke.c |   72 ++++++++++++++++++++++++++++++++++++++++++++-
> 1 files changed, 70 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
> index 3cae2e3..c3c3af6 100644
> --- a/arch/powerpc/kvm/booke.c
> +++ b/arch/powerpc/kvm/booke.c
> @@ -98,6 +98,19 @@ static inline bool kvmppc_supports_spe(void)
> 	return false;
> }
> 
> +/*
> + * Always returns true is AltiVec unit is present, see
> + * kvmppc_core_check_processor_compat().
> + */
> +static inline bool kvmppc_supports_altivec(void)
> +{
> +#ifdef CONFIG_ALTIVEC
> +		if (cpu_has_feature(CPU_FTR_ALTIVEC))
> +			return true;
> +#endif
> +	return false;
> +}
> +
> #ifdef CONFIG_SPE
> void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
> {
> @@ -151,6 +164,21 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
> }
> 
> /*
> + * Simulate AltiVec unavailable fault to load guest state
> + * from thread to AltiVec unit.
> + * It requires to be called with preemption disabled.
> + */
> +static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
> +{
> +	if (kvmppc_supports_altivec()) {
> +		if (!(current->thread.regs->msr & MSR_VEC)) {
> +			load_up_altivec(NULL);
> +			current->thread.regs->msr |= MSR_VEC;

Does this ensure that the kernel saves / restores all altivec state on task switch? Does it load it again when it schedules us in? Would definitely be worth a comment.

> +		}
> +	}
> +}
> +
> +/*
>  * Helper function for "full" MSR writes.  No need to call this if only
>  * EE/CE/ME/DE/RI are changing.
>  */
> @@ -678,6 +706,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
> 	u64 fpr[32];
> #endif
> 
> +#ifdef CONFIG_ALTIVEC
> +	vector128 vr[32];
> +	vector128 vscr;
> +	int used_vr = 0;

bool

> +#endif
> +
> 	if (!vcpu->arch.sane) {
> 		kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
> 		return -EINVAL;
> @@ -716,6 +750,22 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
> 	kvmppc_load_guest_fp(vcpu);
> #endif
> 
> +#ifdef CONFIG_ALTIVEC

/* Switch from user space Altivec to guest Altivec state */

> +	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {

Why not use your kvmppc_supports_altivec() helper here?

> +		/* Save userspace VEC state in stack */
> +		enable_kernel_altivec();

Can't you hide this in the load function? We only want to enable kernel altivec for a short time while we shuffle the registers around.

> +		memcpy(vr, current->thread.vr, sizeof(current->thread.vr));

vr = current->thread.vr;

> +		vscr = current->thread.vscr;
> +		used_vr = current->thread.used_vr;
> +
> +		/* Restore guest VEC state to thread */
> +		memcpy(current->thread.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));

current->thread.vr = vcpu->arch.vr;

> +		current->thread.vscr = vcpu->arch.vscr;
> +
> +		kvmppc_load_guest_altivec(vcpu);
> +	}

Do we need to do this even when the guest doesn't use Altivec? Can't we just load it on demand then once we fault? This code path really should only be a prefetch enable when MSR_VEC is already set in guest context.

> +#endif
> +
> 	ret = __kvmppc_vcpu_run(kvm_run, vcpu);
> 
> 	/* No need for kvm_guest_exit. It's done in handle_exit.
> @@ -736,6 +786,23 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
> 	current->thread.fpexc_mode = fpexc_mode;
> #endif
> 
> +#ifdef CONFIG_ALTIVEC

/* Switch from guest Altivec to user space Altivec state */

> +	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
> +		/* Save AltiVec state to thread */
> +		if (current->thread.regs->msr & MSR_VEC)
> +			giveup_altivec(current);
> +
> +		/* Save guest state */
> +		memcpy(vcpu->arch.vr, current->thread.vr, sizeof(vcpu->arch.vr));
> +		vcpu->arch.vscr = current->thread.vscr;
> +
> +		/* Restore userspace state */
> +		memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
> +		current->thread.vscr = vscr;
> +		current->thread.used_vr = used_vr;
> +	}

Same comments here. If the guest never touched Altivec state, don't bother restoring it, as it's still good.


Alex

> +#endif
> +
> out:
> 	vcpu->mode = OUTSIDE_GUEST_MODE;
> 	return ret;
> @@ -961,7 +1028,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
> 		break;
> 
> 	case BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL: {
> -		if (kvmppc_supports_spe()) {
> +		if (kvmppc_supports_altivec() || kvmppc_supports_spe()) {
> 			bool enabled = false;
> 
> #ifndef CONFIG_KVM_BOOKE_HV
> @@ -987,7 +1054,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
> 	}
> 
> 	case BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST:
> -		if (kvmppc_supports_spe()) {
> +		if (kvmppc_supports_altivec() || kvmppc_supports_spe()) {
> 			kvmppc_booke_queue_irqprio(vcpu,
> 				BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST);
> 			r = RESUME_GUEST;
> @@ -1205,6 +1272,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
> 		} else {
> 			kvmppc_lazy_ee_enable();
> 			kvmppc_load_guest_fp(vcpu);
> +			kvmppc_load_guest_altivec(vcpu);
> 		}
> 	}
> 
> -- 
> 1.7.3.4
> 
> 
> --
> To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Caraman Mihai Claudiu-B02008 July 3, 2013, 4:09 p.m. UTC | #2
> > + * Simulate AltiVec unavailable fault to load guest state
> > + * from thread to AltiVec unit.
> > + * It requires to be called with preemption disabled.
> > + */
> > +static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
> > +{
> > +	if (kvmppc_supports_altivec()) {
> > +		if (!(current->thread.regs->msr & MSR_VEC)) {
> > +			load_up_altivec(NULL);
> > +			current->thread.regs->msr |= MSR_VEC;
> 
> Does this ensure that the kernel saves / restores all altivec state on
> task switch? Does it load it again when it schedules us in? Would
> definitely be worth a comment.

These units are _LAZY_ !!! Only SMP kernel save the state when it schedules out.

> 
> > +		}
> > +	}
> > +}
> > +
> > +/*
> >  * Helper function for "full" MSR writes.  No need to call this if only
> >  * EE/CE/ME/DE/RI are changing.
> >  */
> > @@ -678,6 +706,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run,
> struct kvm_vcpu *vcpu)
> > 	u64 fpr[32];
> > #endif
> >
> > +#ifdef CONFIG_ALTIVEC
> > +	vector128 vr[32];
> > +	vector128 vscr;
> > +	int used_vr = 0;
> 
> bool

Why don't you ask first to change the type of used_vr member in
/include/asm/processor.h?

> 
> > +#endif
> > +
> > 	if (!vcpu->arch.sane) {
> > 		kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
> > 		return -EINVAL;
> > @@ -716,6 +750,22 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run,
> struct kvm_vcpu *vcpu)
> > 	kvmppc_load_guest_fp(vcpu);
> > #endif
> >
> > +#ifdef CONFIG_ALTIVEC
> 
> /* Switch from user space Altivec to guest Altivec state */
> 
> > +	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
> 
> Why not use your kvmppc_supports_altivec() helper here?

Give it a try ... because Linus guarded this members with CONFIG_ALTIVEC :)

> 
> > +		/* Save userspace VEC state in stack */
> > +		enable_kernel_altivec();
> 
> Can't you hide this in the load function? We only want to enable kernel
> altivec for a short time while we shuffle the registers around.
> 
> > +		memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
> 
> vr = current->thread.vr;

Are you kidding, be more careful with arrays :) 

> 
> > +		vscr = current->thread.vscr;
> > +		used_vr = current->thread.used_vr;
> > +
> > +		/* Restore guest VEC state to thread */
> > +		memcpy(current->thread.vr, vcpu->arch.vr, sizeof(vcpu-
> >arch.vr));
> 
> current->thread.vr = vcpu->arch.vr;
> 
> > +		current->thread.vscr = vcpu->arch.vscr;
> > +
> > +		kvmppc_load_guest_altivec(vcpu);
> > +	}
> 
> Do we need to do this even when the guest doesn't use Altivec? Can't we
> just load it on demand then once we fault? This code path really should
> only be a prefetch enable when MSR_VEC is already set in guest context.

No we can't, read 6/6. 

> 
> > +#endif
> > +
> > 	ret = __kvmppc_vcpu_run(kvm_run, vcpu);
> >
> > 	/* No need for kvm_guest_exit. It's done in handle_exit.
> > @@ -736,6 +786,23 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run,
> struct kvm_vcpu *vcpu)
> > 	current->thread.fpexc_mode = fpexc_mode;
> > #endif
> >
> > +#ifdef CONFIG_ALTIVEC
> 
> /* Switch from guest Altivec to user space Altivec state */
> 
> > +	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
> > +		/* Save AltiVec state to thread */
> > +		if (current->thread.regs->msr & MSR_VEC)
> > +			giveup_altivec(current);
> > +
> > +		/* Save guest state */
> > +		memcpy(vcpu->arch.vr, current->thread.vr, sizeof(vcpu-
> >arch.vr));
> > +		vcpu->arch.vscr = current->thread.vscr;
> > +
> > +		/* Restore userspace state */
> > +		memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
> > +		current->thread.vscr = vscr;
> > +		current->thread.used_vr = used_vr;
> > +	}
> 
> Same comments here. If the guest never touched Altivec state, don't
> bother restoring it, as it's still good.

LOL, the mighty guest kernel does whatever he wants with AltiVec and
doesn't inform us.

-Mike

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Alexander Graf July 3, 2013, 4:43 p.m. UTC | #3
On 03.07.2013, at 18:09, Caraman Mihai Claudiu-B02008 wrote:

>>> + * Simulate AltiVec unavailable fault to load guest state
>>> + * from thread to AltiVec unit.
>>> + * It requires to be called with preemption disabled.
>>> + */
>>> +static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
>>> +{
>>> +	if (kvmppc_supports_altivec()) {
>>> +		if (!(current->thread.regs->msr & MSR_VEC)) {
>>> +			load_up_altivec(NULL);
>>> +			current->thread.regs->msr |= MSR_VEC;
>> 
>> Does this ensure that the kernel saves / restores all altivec state on
>> task switch? Does it load it again when it schedules us in? Would
>> definitely be worth a comment.
> 
> These units are _LAZY_ !!! Only SMP kernel save the state when it schedules out.

Then how do you ensure that altivec state is still consistent after another altivec user got scheduled? Have I missed a vcpu_load hook anywhere?

> 
>> 
>>> +		}
>>> +	}
>>> +}
>>> +
>>> +/*
>>> * Helper function for "full" MSR writes.  No need to call this if only
>>> * EE/CE/ME/DE/RI are changing.
>>> */
>>> @@ -678,6 +706,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run,
>> struct kvm_vcpu *vcpu)
>>> 	u64 fpr[32];
>>> #endif
>>> 
>>> +#ifdef CONFIG_ALTIVEC
>>> +	vector128 vr[32];
>>> +	vector128 vscr;
>>> +	int used_vr = 0;
>> 
>> bool
> 
> Why don't you ask first to change the type of used_vr member in
> /include/asm/processor.h?

Ah, it's a copy from thread. Fair enough.

> 
>> 
>>> +#endif
>>> +
>>> 	if (!vcpu->arch.sane) {
>>> 		kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
>>> 		return -EINVAL;
>>> @@ -716,6 +750,22 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run,
>> struct kvm_vcpu *vcpu)
>>> 	kvmppc_load_guest_fp(vcpu);
>>> #endif
>>> 
>>> +#ifdef CONFIG_ALTIVEC
>> 
>> /* Switch from user space Altivec to guest Altivec state */
>> 
>>> +	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
>> 
>> Why not use your kvmppc_supports_altivec() helper here?
> 
> Give it a try ... because Linus guarded this members with CONFIG_ALTIVEC :)

Huh? You already are in an #ifdef CONFIG_ALTIVEC here. I think it's a good idea to be consistent in helper usage. And the name you gave to the helper (kvmppc_supports_altivec) is actually quite nice and tells us exactly what we're asking for.

> 
>> 
>>> +		/* Save userspace VEC state in stack */
>>> +		enable_kernel_altivec();
>> 
>> Can't you hide this in the load function? We only want to enable kernel
>> altivec for a short time while we shuffle the registers around.
>> 
>>> +		memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
>> 
>> vr = current->thread.vr;
> 
> Are you kidding, be more careful with arrays :) 

Bleks :).

> 
>> 
>>> +		vscr = current->thread.vscr;
>>> +		used_vr = current->thread.used_vr;
>>> +
>>> +		/* Restore guest VEC state to thread */
>>> +		memcpy(current->thread.vr, vcpu->arch.vr, sizeof(vcpu-
>>> arch.vr));
>> 
>> current->thread.vr = vcpu->arch.vr;
>> 
>>> +		current->thread.vscr = vcpu->arch.vscr;
>>> +
>>> +		kvmppc_load_guest_altivec(vcpu);
>>> +	}
>> 
>> Do we need to do this even when the guest doesn't use Altivec? Can't we
>> just load it on demand then once we fault? This code path really should
>> only be a prefetch enable when MSR_VEC is already set in guest context.
> 
> No we can't, read 6/6. 

So we have to make sure we're completely unlazy when it comes to a KVM guest. Are we?


Alex

> 
>> 
>>> +#endif
>>> +
>>> 	ret = __kvmppc_vcpu_run(kvm_run, vcpu);
>>> 
>>> 	/* No need for kvm_guest_exit. It's done in handle_exit.
>>> @@ -736,6 +786,23 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run,
>> struct kvm_vcpu *vcpu)
>>> 	current->thread.fpexc_mode = fpexc_mode;
>>> #endif
>>> 
>>> +#ifdef CONFIG_ALTIVEC
>> 
>> /* Switch from guest Altivec to user space Altivec state */
>> 
>>> +	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
>>> +		/* Save AltiVec state to thread */
>>> +		if (current->thread.regs->msr & MSR_VEC)
>>> +			giveup_altivec(current);
>>> +
>>> +		/* Save guest state */
>>> +		memcpy(vcpu->arch.vr, current->thread.vr, sizeof(vcpu-
>>> arch.vr));
>>> +		vcpu->arch.vscr = current->thread.vscr;
>>> +
>>> +		/* Restore userspace state */
>>> +		memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
>>> +		current->thread.vscr = vscr;
>>> +		current->thread.used_vr = used_vr;
>>> +	}
>> 
>> Same comments here. If the guest never touched Altivec state, don't
>> bother restoring it, as it's still good.
> 
> LOL, the mighty guest kernel does whatever he wants with AltiVec and
> doesn't inform us.
> 
> -Mike
> 

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Caraman Mihai Claudiu-B02008 July 3, 2013, 4:49 p.m. UTC | #4
> >>> +
> >>> 	if (!vcpu->arch.sane) {
> >>> 		kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
> >>> 		return -EINVAL;
> >>> @@ -716,6 +750,22 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run,
> >> struct kvm_vcpu *vcpu)
> >>> 	kvmppc_load_guest_fp(vcpu);
> >>> #endif
> >>>
> >>> +#ifdef CONFIG_ALTIVEC
> >>
> >> /* Switch from user space Altivec to guest Altivec state */
> >>
> >>> +	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
> >>
> >> Why not use your kvmppc_supports_altivec() helper here?
> >
> > Give it a try ... because Linus guarded this members with
> CONFIG_ALTIVEC :)
> 
> Huh? You already are in an #ifdef CONFIG_ALTIVEC here. I think it's a
> good idea to be consistent in helper usage. And the name you gave to the
> helper (kvmppc_supports_altivec) is actually quite nice and tells us
> exactly what we're asking for.

I thought you asking to replace #ifdef CONFIG_ALTIVEC.

> >> Do we need to do this even when the guest doesn't use Altivec? Can't
> we
> >> just load it on demand then once we fault? This code path really
> should
> >> only be a prefetch enable when MSR_VEC is already set in guest
> context.
> >
> > No we can't, read 6/6.
> 
> So we have to make sure we're completely unlazy when it comes to a KVM
> guest. Are we?

Yes, because MSR[SPV] is under its control.

-Mike


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Alexander Graf July 3, 2013, 5:07 p.m. UTC | #5
On 03.07.2013, at 18:49, Caraman Mihai Claudiu-B02008 wrote:

>>>>> +
>>>>> 	if (!vcpu->arch.sane) {
>>>>> 		kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
>>>>> 		return -EINVAL;
>>>>> @@ -716,6 +750,22 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run,
>>>> struct kvm_vcpu *vcpu)
>>>>> 	kvmppc_load_guest_fp(vcpu);
>>>>> #endif
>>>>> 
>>>>> +#ifdef CONFIG_ALTIVEC
>>>> 
>>>> /* Switch from user space Altivec to guest Altivec state */
>>>> 
>>>>> +	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
>>>> 
>>>> Why not use your kvmppc_supports_altivec() helper here?
>>> 
>>> Give it a try ... because Linus guarded this members with
>> CONFIG_ALTIVEC :)
>> 
>> Huh? You already are in an #ifdef CONFIG_ALTIVEC here. I think it's a
>> good idea to be consistent in helper usage. And the name you gave to the
>> helper (kvmppc_supports_altivec) is actually quite nice and tells us
>> exactly what we're asking for.
> 
> I thought you asking to replace #ifdef CONFIG_ALTIVEC.
> 
>>>> Do we need to do this even when the guest doesn't use Altivec? Can't
>> we
>>>> just load it on demand then once we fault? This code path really
>> should
>>>> only be a prefetch enable when MSR_VEC is already set in guest
>> context.
>>> 
>>> No we can't, read 6/6.
>> 
>> So we have to make sure we're completely unlazy when it comes to a KVM
>> guest. Are we?
> 
> Yes, because MSR[SPV] is under its control.

Oh, sure, KVM wants it unlazy. That part is obvious. But does the kernel always give us unlazyness? The way I read the code, process.c goes lazy when !CONFIG_SMP.

So the big question is why we're manually enforcing FPU giveup, but not Altivec giveup? One of the 2 probably is wrong :).


Alex

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Scott Wood July 3, 2013, 6:36 p.m. UTC | #6
On 07/03/2013 12:07:30 PM, Alexander Graf wrote:
> 
> On 03.07.2013, at 18:49, Caraman Mihai Claudiu-B02008 wrote:
> 
> >>>> Do we need to do this even when the guest doesn't use Altivec?  
> Can't
> >> we
> >>>> just load it on demand then once we fault? This code path really
> >> should
> >>>> only be a prefetch enable when MSR_VEC is already set in guest
> >> context.
> >>>
> >>> No we can't, read 6/6.
> >>
> >> So we have to make sure we're completely unlazy when it comes to a  
> KVM
> >> guest. Are we?
> >
> > Yes, because MSR[SPV] is under its control.
> 
> Oh, sure, KVM wants it unlazy. That part is obvious. But does the  
> kernel always give us unlazyness? The way I read the code, process.c  
> goes lazy when !CONFIG_SMP.
> 
> So the big question is why we're manually enforcing FPU giveup, but  
> not Altivec giveup? One of the 2 probably is wrong :).

Why do you think we're not enforcing it for Altivec?  Is there some  
specific piece of code you're referring to that is different in this  
regard?

-Scott
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Alexander Graf July 3, 2013, 6:45 p.m. UTC | #7
On 03.07.2013, at 20:36, Scott Wood wrote:

> On 07/03/2013 12:07:30 PM, Alexander Graf wrote:
>> On 03.07.2013, at 18:49, Caraman Mihai Claudiu-B02008 wrote:
>> >>>> Do we need to do this even when the guest doesn't use Altivec? Can't
>> >> we
>> >>>> just load it on demand then once we fault? This code path really
>> >> should
>> >>>> only be a prefetch enable when MSR_VEC is already set in guest
>> >> context.
>> >>>
>> >>> No we can't, read 6/6.
>> >>
>> >> So we have to make sure we're completely unlazy when it comes to a KVM
>> >> guest. Are we?
>> >
>> > Yes, because MSR[SPV] is under its control.
>> Oh, sure, KVM wants it unlazy. That part is obvious. But does the kernel always give us unlazyness? The way I read the code, process.c goes lazy when !CONFIG_SMP.
>> So the big question is why we're manually enforcing FPU giveup, but not Altivec giveup? One of the 2 probably is wrong :).
> 
> Why do you think we're not enforcing it for Altivec?  Is there some specific piece of code you're referring to that is different in this regard?

Well, apparently because I misread the code :). All is well.


Alex

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 3cae2e3..c3c3af6 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -98,6 +98,19 @@  static inline bool kvmppc_supports_spe(void)
 	return false;
 }
 
+/*
+ * Always returns true is AltiVec unit is present, see
+ * kvmppc_core_check_processor_compat().
+ */
+static inline bool kvmppc_supports_altivec(void)
+{
+#ifdef CONFIG_ALTIVEC
+		if (cpu_has_feature(CPU_FTR_ALTIVEC))
+			return true;
+#endif
+	return false;
+}
+
 #ifdef CONFIG_SPE
 void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
 {
@@ -151,6 +164,21 @@  static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
 }
 
 /*
+ * Simulate AltiVec unavailable fault to load guest state
+ * from thread to AltiVec unit.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
+{
+	if (kvmppc_supports_altivec()) {
+		if (!(current->thread.regs->msr & MSR_VEC)) {
+			load_up_altivec(NULL);
+			current->thread.regs->msr |= MSR_VEC;
+		}
+	}
+}
+
+/*
  * Helper function for "full" MSR writes.  No need to call this if only
  * EE/CE/ME/DE/RI are changing.
  */
@@ -678,6 +706,12 @@  int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 	u64 fpr[32];
 #endif
 
+#ifdef CONFIG_ALTIVEC
+	vector128 vr[32];
+	vector128 vscr;
+	int used_vr = 0;
+#endif
+
 	if (!vcpu->arch.sane) {
 		kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
 		return -EINVAL;
@@ -716,6 +750,22 @@  int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 	kvmppc_load_guest_fp(vcpu);
 #endif
 
+#ifdef CONFIG_ALTIVEC
+	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
+		/* Save userspace VEC state in stack */
+		enable_kernel_altivec();
+		memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
+		vscr = current->thread.vscr;
+		used_vr = current->thread.used_vr;
+
+		/* Restore guest VEC state to thread */
+		memcpy(current->thread.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
+		current->thread.vscr = vcpu->arch.vscr;
+
+		kvmppc_load_guest_altivec(vcpu);
+	}
+#endif
+
 	ret = __kvmppc_vcpu_run(kvm_run, vcpu);
 
 	/* No need for kvm_guest_exit. It's done in handle_exit.
@@ -736,6 +786,23 @@  int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 	current->thread.fpexc_mode = fpexc_mode;
 #endif
 
+#ifdef CONFIG_ALTIVEC
+	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
+		/* Save AltiVec state to thread */
+		if (current->thread.regs->msr & MSR_VEC)
+			giveup_altivec(current);
+
+		/* Save guest state */
+		memcpy(vcpu->arch.vr, current->thread.vr, sizeof(vcpu->arch.vr));
+		vcpu->arch.vscr = current->thread.vscr;
+
+		/* Restore userspace state */
+		memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
+		current->thread.vscr = vscr;
+		current->thread.used_vr = used_vr;
+	}
+#endif
+
 out:
 	vcpu->mode = OUTSIDE_GUEST_MODE;
 	return ret;
@@ -961,7 +1028,7 @@  int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 		break;
 
 	case BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL: {
-		if (kvmppc_supports_spe()) {
+		if (kvmppc_supports_altivec() || kvmppc_supports_spe()) {
 			bool enabled = false;
 
 #ifndef CONFIG_KVM_BOOKE_HV
@@ -987,7 +1054,7 @@  int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 	}
 
 	case BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST:
-		if (kvmppc_supports_spe()) {
+		if (kvmppc_supports_altivec() || kvmppc_supports_spe()) {
 			kvmppc_booke_queue_irqprio(vcpu,
 				BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST);
 			r = RESUME_GUEST;
@@ -1205,6 +1272,7 @@  int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 		} else {
 			kvmppc_lazy_ee_enable();
 			kvmppc_load_guest_fp(vcpu);
+			kvmppc_load_guest_altivec(vcpu);
 		}
 	}