diff mbox series

[v2,1/1] KVM: s390: pv: make use of ultravisor AIV support

Message ID 20220208165310.3905815-2-mimu@linux.ibm.com (mailing list archive)
State New, archived
Headers show
Series KVM: s390: pv: make use of ultravisor AIV support | expand

Commit Message

Michael Mueller Feb. 8, 2022, 4:53 p.m. UTC
This patch enables the ultravisor adapter interruption vitualization
support indicated by UV feature BIT_UV_FEAT_AIV. This allows ISC
interruption injection directly into the GISA IPM for PV kvm guests.

Hardware that does not support this feature will continue to use the
UV interruption interception method to deliver ISC interruptions to
PV kvm guests. For this purpose, the ECA_AIV bit for all guest cpus
will be cleared and the GISA will be disabled during PV CPU setup.

In addition a check in __inject_io() has been removed. That reduces the
required instructions for interruption handling for PV and traditional
kvm guests.

Signed-off-by: Michael Mueller <mimu@linux.ibm.com>
---
 arch/s390/include/asm/uv.h |  1 +
 arch/s390/kvm/interrupt.c  | 53 +++++++++++++++++++++++++++++++++-----
 arch/s390/kvm/kvm-s390.c   | 12 ++++++---
 arch/s390/kvm/kvm-s390.h   | 11 ++++++++
 4 files changed, 68 insertions(+), 9 deletions(-)

Comments

Janosch Frank Feb. 9, 2022, 9:12 a.m. UTC | #1
On 2/8/22 17:53, Michael Mueller wrote:
> This patch enables the ultravisor adapter interruption vitualization
> support indicated by UV feature BIT_UV_FEAT_AIV. This allows ISC
> interruption injection directly into the GISA IPM for PV kvm guests.
> 
> Hardware that does not support this feature will continue to use the
> UV interruption interception method to deliver ISC interruptions to
> PV kvm guests. For this purpose, the ECA_AIV bit for all guest cpus
> will be cleared and the GISA will be disabled during PV CPU setup.
> 
> In addition a check in __inject_io() has been removed. That reduces the
> required instructions for interruption handling for PV and traditional
> kvm guests.
> 
> Signed-off-by: Michael Mueller <mimu@linux.ibm.com>
> ---
>   arch/s390/include/asm/uv.h |  1 +
>   arch/s390/kvm/interrupt.c  | 53 +++++++++++++++++++++++++++++++++-----
>   arch/s390/kvm/kvm-s390.c   | 12 ++++++---
>   arch/s390/kvm/kvm-s390.h   | 11 ++++++++
>   4 files changed, 68 insertions(+), 9 deletions(-)
> 
> diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
> index 86218382d29c..a2d376b8bce3 100644
> --- a/arch/s390/include/asm/uv.h
> +++ b/arch/s390/include/asm/uv.h
> @@ -80,6 +80,7 @@ enum uv_cmds_inst {
>   
>   enum uv_feat_ind {
>   	BIT_UV_FEAT_MISC = 0,
> +	BIT_UV_FEAT_AIV = 1,
>   };
>   
>   struct uv_cb_header {
> diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
> index db933c252dbc..d45e26e31d3d 100644
> --- a/arch/s390/kvm/interrupt.c
> +++ b/arch/s390/kvm/interrupt.c
> @@ -1901,13 +1901,12 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
>   	isc = int_word_to_isc(inti->io.io_int_word);
>   
>   	/*
> -	 * Do not make use of gisa in protected mode. We do not use the lock
> -	 * checking variant as this is just a performance optimization and we
> -	 * do not hold the lock here. This is ok as the code will pick
> -	 * interrupts from both "lists" for delivery.
> +	 * We do not use the lock checking variant as this is just a
> +	 * performance optimization and we do not hold the lock here.
> +	 * This is ok as the code will pick interrupts from both "lists"
> +	 * for delivery.
>   	 */
> -	if (!kvm_s390_pv_get_handle(kvm) &&
> -	    gi->origin && inti->type & KVM_S390_INT_IO_AI_MASK) {
> +	if (gi->origin && inti->type & KVM_S390_INT_IO_AI_MASK) {
>   		VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc);
>   		gisa_set_ipm_gisc(gi->origin, isc);
>   		kfree(inti);
> @@ -3171,9 +3170,32 @@ void kvm_s390_gisa_init(struct kvm *kvm)
>   	VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin);
>   }
>   
> +void kvm_s390_gisa_enable(struct kvm *kvm)
> +{
> +	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
> +	struct kvm_vcpu *vcpu;
> +	unsigned long i;
> +
> +	if (gi->origin)
> +		return;
> +	kvm_s390_gisa_init(kvm);
> +	kvm_for_each_vcpu(i, vcpu, kvm) {
> +		mutex_lock(&vcpu->mutex);
> +		vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(kvm);

The gisa is a per VM structure so this value could be cached outside the 
loop. Also I'd expect that we would need to do the != 0 check only once, no?

> +		if (vcpu->arch.sie_block->gd) {
> +			vcpu->arch.sie_block->eca |= ECA_AIV;
> +			VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
> +				   vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
> +		}
> +		mutex_unlock(&vcpu->mutex);
> +	}
> +}
> +
> +

One \n too much

>   void kvm_s390_gisa_destroy(struct kvm *kvm)
>   {
>   	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
> +	struct kvm_s390_gisa *gisa = gi->origin;
>   
>   	if (!gi->origin)
>   		return;
> @@ -3184,6 +3206,25 @@ void kvm_s390_gisa_destroy(struct kvm *kvm)
>   		cpu_relax();
>   	hrtimer_cancel(&gi->timer);
>   	gi->origin = NULL;
> +	VM_EVENT(kvm, 3, "gisa 0x%pK destroyed", gisa);
> +}
> +
> +void kvm_s390_gisa_disable(struct kvm *kvm)
> +{
> +	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
> +	struct kvm_vcpu *vcpu;
> +	unsigned long i;
> +
> +	if (!gi->origin)
> +		return;
> +	kvm_for_each_vcpu(i, vcpu, kvm) {
> +		mutex_lock(&vcpu->mutex);
> +		vcpu->arch.sie_block->eca &= ~ECA_AIV;
> +		vcpu->arch.sie_block->gd = 0U;
> +		mutex_unlock(&vcpu->mutex);
> +		VCPU_EVENT(vcpu, 3, "AIV disabled for cpu %03u", vcpu->vcpu_id);
> +	}
> +	kvm_s390_gisa_destroy(kvm);
>   }
>   
>   /**
> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
> index 577f1ead6a51..72a3c9a7c9dd 100644
> --- a/arch/s390/kvm/kvm-s390.c
> +++ b/arch/s390/kvm/kvm-s390.c
> @@ -2194,6 +2194,9 @@ static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp)
>   		}
>   		mutex_unlock(&vcpu->mutex);
>   	}
> +	/* Finally enable the GISA if required for traditional KVM guests */

Ensure that we re-enable gisa if the non-PV guest used it but the PV 
guest did not.

> +	if (use_gisa)
> +		kvm_s390_gisa_enable(kvm);
>   	return ret;
>   }
>   
> @@ -2205,6 +2208,10 @@ static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
>   
>   	struct kvm_vcpu *vcpu;
>   
> +	/* First disable the GISA if the ultravisor does not support AIV. */

I think we should drop the "First" here.

> +	if (!test_bit_inv(BIT_UV_FEAT_AIV, &uv_info.uv_feature_indications))
> +		kvm_s390_gisa_disable(kvm);
> +
>   	kvm_for_each_vcpu(i, vcpu, kvm) {
>   		mutex_lock(&vcpu->mutex);
>   		r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
> @@ -2268,6 +2275,7 @@ static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
>   		 */
>   		if (r)
>   			break;
> +

Stray whitespace change

>   		r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc);
>   
>   		/* no need to block service interrupts any more */
> @@ -3263,9 +3271,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
>   
>   	vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
>   	spin_lock_init(&vcpu->arch.local_int.lock);
> -	vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin;
> -	if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
> -		vcpu->arch.sie_block->gd |= GISA_FORMAT1;
> +	vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm);

I like that way of handling the gisa setup of VCPUs

>   	seqcount_init(&vcpu->arch.cputm_seqcount);
>   
>   	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
> diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
> index 098831e815e6..4ba8fc30d87a 100644
> --- a/arch/s390/kvm/kvm-s390.h
> +++ b/arch/s390/kvm/kvm-s390.h
> @@ -231,6 +231,15 @@ static inline unsigned long kvm_s390_get_gfn_end(struct kvm_memslots *slots)
>   	return ms->base_gfn + ms->npages;
>   }
>   
> +static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm)
> +{
> +	u32 gd = (u32)(u64)kvm->arch.gisa_int.origin;
> +
> +	if (gd && sclp.has_gisaf)
> +		gd |= GISA_FORMAT1;
> +	return gd;
> +}
> +
>   /* implemented in pv.c */
>   int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
>   int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
> @@ -450,6 +459,8 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu,
>   void kvm_s390_gisa_init(struct kvm *kvm);
>   void kvm_s390_gisa_clear(struct kvm *kvm);
>   void kvm_s390_gisa_destroy(struct kvm *kvm);
> +void kvm_s390_gisa_disable(struct kvm *kvm);
> +void kvm_s390_gisa_enable(struct kvm *kvm);
>   int kvm_s390_gib_init(u8 nisc);
>   void kvm_s390_gib_destroy(void);
>
Michael Mueller Feb. 9, 2022, 12:26 p.m. UTC | #2
On 09.02.22 10:12, Janosch Frank wrote:
> On 2/8/22 17:53, Michael Mueller wrote:
>> This patch enables the ultravisor adapter interruption vitualization
>> support indicated by UV feature BIT_UV_FEAT_AIV. This allows ISC
>> interruption injection directly into the GISA IPM for PV kvm guests.
>>
>> Hardware that does not support this feature will continue to use the
>> UV interruption interception method to deliver ISC interruptions to
>> PV kvm guests. For this purpose, the ECA_AIV bit for all guest cpus
>> will be cleared and the GISA will be disabled during PV CPU setup.
>>
>> In addition a check in __inject_io() has been removed. That reduces the
>> required instructions for interruption handling for PV and traditional
>> kvm guests.
>>
>> Signed-off-by: Michael Mueller <mimu@linux.ibm.com>
>> ---
>>   arch/s390/include/asm/uv.h |  1 +
>>   arch/s390/kvm/interrupt.c  | 53 +++++++++++++++++++++++++++++++++-----
>>   arch/s390/kvm/kvm-s390.c   | 12 ++++++---
>>   arch/s390/kvm/kvm-s390.h   | 11 ++++++++
>>   4 files changed, 68 insertions(+), 9 deletions(-)
>>
>> diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
>> index 86218382d29c..a2d376b8bce3 100644
>> --- a/arch/s390/include/asm/uv.h
>> +++ b/arch/s390/include/asm/uv.h
>> @@ -80,6 +80,7 @@ enum uv_cmds_inst {
>>   enum uv_feat_ind {
>>       BIT_UV_FEAT_MISC = 0,
>> +    BIT_UV_FEAT_AIV = 1,
>>   };
>>   struct uv_cb_header {
>> diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
>> index db933c252dbc..d45e26e31d3d 100644
>> --- a/arch/s390/kvm/interrupt.c
>> +++ b/arch/s390/kvm/interrupt.c
>> @@ -1901,13 +1901,12 @@ static int __inject_io(struct kvm *kvm, struct 
>> kvm_s390_interrupt_info *inti)
>>       isc = int_word_to_isc(inti->io.io_int_word);
>>       /*
>> -     * Do not make use of gisa in protected mode. We do not use the lock
>> -     * checking variant as this is just a performance optimization 
>> and we
>> -     * do not hold the lock here. This is ok as the code will pick
>> -     * interrupts from both "lists" for delivery.
>> +     * We do not use the lock checking variant as this is just a
>> +     * performance optimization and we do not hold the lock here.
>> +     * This is ok as the code will pick interrupts from both "lists"
>> +     * for delivery.
>>        */
>> -    if (!kvm_s390_pv_get_handle(kvm) &&
>> -        gi->origin && inti->type & KVM_S390_INT_IO_AI_MASK) {
>> +    if (gi->origin && inti->type & KVM_S390_INT_IO_AI_MASK) {
>>           VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc);
>>           gisa_set_ipm_gisc(gi->origin, isc);
>>           kfree(inti);
>> @@ -3171,9 +3170,32 @@ void kvm_s390_gisa_init(struct kvm *kvm)
>>       VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin);
>>   }
>> +void kvm_s390_gisa_enable(struct kvm *kvm)
>> +{
>> +    struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
>> +    struct kvm_vcpu *vcpu;
>> +    unsigned long i;
>> +
>> +    if (gi->origin)
>> +        return;
>> +    kvm_s390_gisa_init(kvm);
>> +    kvm_for_each_vcpu(i, vcpu, kvm) {
>> +        mutex_lock(&vcpu->mutex);
>> +        vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(kvm);
> 
> The gisa is a per VM structure so this value could be cached outside the 
> loop. Also I'd expect that we would need to do the != 0 check only once, 
> no?

Will do in front of the vcpu loop-

> 
>> +        if (vcpu->arch.sie_block->gd) {
>> +            vcpu->arch.sie_block->eca |= ECA_AIV;
>> +            VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu 
>> %03u",
>> +                   vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
>> +        }
>> +        mutex_unlock(&vcpu->mutex);
>> +    }
>> +}
>> +
>> +
> 
> One \n too much

yep

> 
>>   void kvm_s390_gisa_destroy(struct kvm *kvm)
>>   {
>>       struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
>> +    struct kvm_s390_gisa *gisa = gi->origin;
>>       if (!gi->origin)
>>           return;
>> @@ -3184,6 +3206,25 @@ void kvm_s390_gisa_destroy(struct kvm *kvm)
>>           cpu_relax();
>>       hrtimer_cancel(&gi->timer);
>>       gi->origin = NULL;
>> +    VM_EVENT(kvm, 3, "gisa 0x%pK destroyed", gisa);
>> +}
>> +
>> +void kvm_s390_gisa_disable(struct kvm *kvm)
>> +{
>> +    struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
>> +    struct kvm_vcpu *vcpu;
>> +    unsigned long i;
>> +
>> +    if (!gi->origin)
>> +        return;
>> +    kvm_for_each_vcpu(i, vcpu, kvm) {
>> +        mutex_lock(&vcpu->mutex);
>> +        vcpu->arch.sie_block->eca &= ~ECA_AIV;
>> +        vcpu->arch.sie_block->gd = 0U;
>> +        mutex_unlock(&vcpu->mutex);
>> +        VCPU_EVENT(vcpu, 3, "AIV disabled for cpu %03u", vcpu->vcpu_id);
>> +    }
>> +    kvm_s390_gisa_destroy(kvm);
>>   }
>>   /**
>> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
>> index 577f1ead6a51..72a3c9a7c9dd 100644
>> --- a/arch/s390/kvm/kvm-s390.c
>> +++ b/arch/s390/kvm/kvm-s390.c
>> @@ -2194,6 +2194,9 @@ static int kvm_s390_cpus_from_pv(struct kvm 
>> *kvm, u16 *rcp, u16 *rrcp)
>>           }
>>           mutex_unlock(&vcpu->mutex);
>>       }
>> +    /* Finally enable the GISA if required for traditional KVM guests */
> 
> Ensure that we re-enable gisa if the non-PV guest used it but the PV 
> guest did not.

I will change the comment if that makes it more clear.

> 
>> +    if (use_gisa)
>> +        kvm_s390_gisa_enable(kvm);
>>       return ret;
>>   }
>> @@ -2205,6 +2208,10 @@ static int kvm_s390_cpus_to_pv(struct kvm *kvm, 
>> u16 *rc, u16 *rrc)
>>       struct kvm_vcpu *vcpu;
>> +    /* First disable the GISA if the ultravisor does not support AIV. */
> 
> I think we should drop the "First" here.

ok

> 
>> +    if (!test_bit_inv(BIT_UV_FEAT_AIV, &uv_info.uv_feature_indications))
>> +        kvm_s390_gisa_disable(kvm);
>> +
>>       kvm_for_each_vcpu(i, vcpu, kvm) {
>>           mutex_lock(&vcpu->mutex);
>>           r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
>> @@ -2268,6 +2275,7 @@ static int kvm_s390_handle_pv(struct kvm *kvm, 
>> struct kvm_pv_cmd *cmd)
>>            */
>>           if (r)
>>               break;
>> +
> 
> Stray whitespace change

ups

> 
>>           r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc);
>>           /* no need to block service interrupts any more */
>> @@ -3263,9 +3271,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
>>       vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
>>       spin_lock_init(&vcpu->arch.local_int.lock);
>> -    vcpu->arch.sie_block->gd = 
>> (u32)(u64)vcpu->kvm->arch.gisa_int.origin;
>> -    if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
>> -        vcpu->arch.sie_block->gd |= GISA_FORMAT1;
>> +    vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm);
> 
> I like that way of handling the gisa setup of VCPUs

Thanks a lot Janosch, for providing review feedback on this item.

Michael

> 
>>       seqcount_init(&vcpu->arch.cputm_seqcount);
>>       vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
>> diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
>> index 098831e815e6..4ba8fc30d87a 100644
>> --- a/arch/s390/kvm/kvm-s390.h
>> +++ b/arch/s390/kvm/kvm-s390.h
>> @@ -231,6 +231,15 @@ static inline unsigned long 
>> kvm_s390_get_gfn_end(struct kvm_memslots *slots)
>>       return ms->base_gfn + ms->npages;
>>   }
>> +static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm)
>> +{
>> +    u32 gd = (u32)(u64)kvm->arch.gisa_int.origin;
>> +
>> +    if (gd && sclp.has_gisaf)
>> +        gd |= GISA_FORMAT1;
>> +    return gd;
>> +}
>> +
>>   /* implemented in pv.c */
>>   int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
>>   int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
>> @@ -450,6 +459,8 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu,
>>   void kvm_s390_gisa_init(struct kvm *kvm);
>>   void kvm_s390_gisa_clear(struct kvm *kvm);
>>   void kvm_s390_gisa_destroy(struct kvm *kvm);
>> +void kvm_s390_gisa_disable(struct kvm *kvm);
>> +void kvm_s390_gisa_enable(struct kvm *kvm);
>>   int kvm_s390_gib_init(u8 nisc);
>>   void kvm_s390_gib_destroy(void);
>
diff mbox series

Patch

diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
index 86218382d29c..a2d376b8bce3 100644
--- a/arch/s390/include/asm/uv.h
+++ b/arch/s390/include/asm/uv.h
@@ -80,6 +80,7 @@  enum uv_cmds_inst {
 
 enum uv_feat_ind {
 	BIT_UV_FEAT_MISC = 0,
+	BIT_UV_FEAT_AIV = 1,
 };
 
 struct uv_cb_header {
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index db933c252dbc..d45e26e31d3d 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1901,13 +1901,12 @@  static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
 	isc = int_word_to_isc(inti->io.io_int_word);
 
 	/*
-	 * Do not make use of gisa in protected mode. We do not use the lock
-	 * checking variant as this is just a performance optimization and we
-	 * do not hold the lock here. This is ok as the code will pick
-	 * interrupts from both "lists" for delivery.
+	 * We do not use the lock checking variant as this is just a
+	 * performance optimization and we do not hold the lock here.
+	 * This is ok as the code will pick interrupts from both "lists"
+	 * for delivery.
 	 */
-	if (!kvm_s390_pv_get_handle(kvm) &&
-	    gi->origin && inti->type & KVM_S390_INT_IO_AI_MASK) {
+	if (gi->origin && inti->type & KVM_S390_INT_IO_AI_MASK) {
 		VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc);
 		gisa_set_ipm_gisc(gi->origin, isc);
 		kfree(inti);
@@ -3171,9 +3170,32 @@  void kvm_s390_gisa_init(struct kvm *kvm)
 	VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin);
 }
 
+void kvm_s390_gisa_enable(struct kvm *kvm)
+{
+	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
+	struct kvm_vcpu *vcpu;
+	unsigned long i;
+
+	if (gi->origin)
+		return;
+	kvm_s390_gisa_init(kvm);
+	kvm_for_each_vcpu(i, vcpu, kvm) {
+		mutex_lock(&vcpu->mutex);
+		vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(kvm);
+		if (vcpu->arch.sie_block->gd) {
+			vcpu->arch.sie_block->eca |= ECA_AIV;
+			VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
+				   vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
+		}
+		mutex_unlock(&vcpu->mutex);
+	}
+}
+
+
 void kvm_s390_gisa_destroy(struct kvm *kvm)
 {
 	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
+	struct kvm_s390_gisa *gisa = gi->origin;
 
 	if (!gi->origin)
 		return;
@@ -3184,6 +3206,25 @@  void kvm_s390_gisa_destroy(struct kvm *kvm)
 		cpu_relax();
 	hrtimer_cancel(&gi->timer);
 	gi->origin = NULL;
+	VM_EVENT(kvm, 3, "gisa 0x%pK destroyed", gisa);
+}
+
+void kvm_s390_gisa_disable(struct kvm *kvm)
+{
+	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
+	struct kvm_vcpu *vcpu;
+	unsigned long i;
+
+	if (!gi->origin)
+		return;
+	kvm_for_each_vcpu(i, vcpu, kvm) {
+		mutex_lock(&vcpu->mutex);
+		vcpu->arch.sie_block->eca &= ~ECA_AIV;
+		vcpu->arch.sie_block->gd = 0U;
+		mutex_unlock(&vcpu->mutex);
+		VCPU_EVENT(vcpu, 3, "AIV disabled for cpu %03u", vcpu->vcpu_id);
+	}
+	kvm_s390_gisa_destroy(kvm);
 }
 
 /**
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 577f1ead6a51..72a3c9a7c9dd 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -2194,6 +2194,9 @@  static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp)
 		}
 		mutex_unlock(&vcpu->mutex);
 	}
+	/* Finally enable the GISA if required for traditional KVM guests */
+	if (use_gisa)
+		kvm_s390_gisa_enable(kvm);
 	return ret;
 }
 
@@ -2205,6 +2208,10 @@  static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
 
 	struct kvm_vcpu *vcpu;
 
+	/* First disable the GISA if the ultravisor does not support AIV. */
+	if (!test_bit_inv(BIT_UV_FEAT_AIV, &uv_info.uv_feature_indications))
+		kvm_s390_gisa_disable(kvm);
+
 	kvm_for_each_vcpu(i, vcpu, kvm) {
 		mutex_lock(&vcpu->mutex);
 		r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
@@ -2268,6 +2275,7 @@  static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
 		 */
 		if (r)
 			break;
+
 		r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc);
 
 		/* no need to block service interrupts any more */
@@ -3263,9 +3271,7 @@  int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 
 	vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
 	spin_lock_init(&vcpu->arch.local_int.lock);
-	vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin;
-	if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
-		vcpu->arch.sie_block->gd |= GISA_FORMAT1;
+	vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm);
 	seqcount_init(&vcpu->arch.cputm_seqcount);
 
 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 098831e815e6..4ba8fc30d87a 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -231,6 +231,15 @@  static inline unsigned long kvm_s390_get_gfn_end(struct kvm_memslots *slots)
 	return ms->base_gfn + ms->npages;
 }
 
+static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm)
+{
+	u32 gd = (u32)(u64)kvm->arch.gisa_int.origin;
+
+	if (gd && sclp.has_gisaf)
+		gd |= GISA_FORMAT1;
+	return gd;
+}
+
 /* implemented in pv.c */
 int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
 int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
@@ -450,6 +459,8 @@  int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu,
 void kvm_s390_gisa_init(struct kvm *kvm);
 void kvm_s390_gisa_clear(struct kvm *kvm);
 void kvm_s390_gisa_destroy(struct kvm *kvm);
+void kvm_s390_gisa_disable(struct kvm *kvm);
+void kvm_s390_gisa_enable(struct kvm *kvm);
 int kvm_s390_gib_init(u8 nisc);
 void kvm_s390_gib_destroy(void);