diff mbox

KVM: VMX: fix interrupt lost when enable virtual interrupt delivery

Message ID 1361882056-10482-1-git-send-email-yang.z.zhang@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Zhang, Yang Z Feb. 26, 2013, 12:34 p.m. UTC
From: Yang Zhang <yang.z.zhang@Intel.com>

In the platform which supporing virtual interrupt delivery feature,
hardware will clear vIRR atomatically when target vcpu is running.
So software should not modify vIRR when target vcpu is running. This
patch will record the virtual interrupt into posted_irr when delivering
virtual interrupt to guest. And then sync posted_irr into vIRR in target
vcpu context.

The patch to enable Posted Interrupt has the similar logic. Since we are
still discussing it, so split this part from Posted Interrupt patch to
fix the virtual interrupt delivery issue.

Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>
---
 arch/x86/kvm/lapic.c |   39 ++++++++++++++++++++++++++++++++++++++-
 arch/x86/kvm/lapic.h |    3 +++
 arch/x86/kvm/vmx.c   |    3 ++-
 3 files changed, 43 insertions(+), 2 deletions(-)

Comments

Gleb Natapov Feb. 26, 2013, 12:41 p.m. UTC | #1
On Tue, Feb 26, 2013 at 08:34:16PM +0800, Yang Zhang wrote:
> From: Yang Zhang <yang.z.zhang@Intel.com>
> 
> In the platform which supporing virtual interrupt delivery feature,
> hardware will clear vIRR atomatically when target vcpu is running.
> So software should not modify vIRR when target vcpu is running. This
> patch will record the virtual interrupt into posted_irr when delivering
> virtual interrupt to guest. And then sync posted_irr into vIRR in target
> vcpu context.
> 
> The patch to enable Posted Interrupt has the similar logic. Since we are
> still discussing it, so split this part from Posted Interrupt patch to
> fix the virtual interrupt delivery issue.
> 
What the point of having this over posted interrupt patches considering
that PI patches do this and more?

> Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>
> ---
>  arch/x86/kvm/lapic.c |   39 ++++++++++++++++++++++++++++++++++++++-
>  arch/x86/kvm/lapic.h |    3 +++
>  arch/x86/kvm/vmx.c   |    3 ++-
>  3 files changed, 43 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
> index 02b51dd..98bc37e 100644
> --- a/arch/x86/kvm/lapic.c
> +++ b/arch/x86/kvm/lapic.c
> @@ -357,6 +357,11 @@ static u8 count_vectors(void *bitmap)
>  	return count;
>  }
>  
> +static inline bool apic_test_irr(int vec, struct kvm_lapic *apic)
> +{
> +	return apic_test_vector(vec, apic->regs + APIC_IRR);
> +}
> +
>  static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic)
>  {
>  	apic->irr_pending = true;
> @@ -368,6 +373,26 @@ static inline int apic_search_irr(struct kvm_lapic *apic)
>  	return find_highest_vector(apic->regs + APIC_IRR);
>  }
>  
> +static inline void kvm_apic_update_irr(struct kvm_lapic *apic)
> +{
> +	int index;
> +
> +	if (!kvm_x86_ops->vm_has_apicv(apic->vcpu->kvm))
> +		return;
> +
> +	if (apic->posted_irr_changed) {
> +		spin_lock(&apic->posted_irr_lock);
> +		for_each_set_bit(index,
> +			(unsigned long *)(&apic->posted_irr_changed), 8) {
> +			*((u32 *)(apic->regs + APIC_IRR + index * 0x10)) |=
> +						apic->posted_irr[index];
> +			apic->posted_irr[index] = 0;
> +		}
> +		apic->posted_irr_changed = 0;
> +		spin_unlock(&apic->posted_irr_lock);
> +	}
> +}
> +
>  static inline int apic_find_highest_irr(struct kvm_lapic *apic)
>  {
>  	int result;
> @@ -379,6 +404,7 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic)
>  	if (!apic->irr_pending)
>  		return -1;
>  
> +	kvm_apic_update_irr(apic);
>  	result = apic_search_irr(apic);
>  	ASSERT(result == -1 || result >= 16);
>  
> @@ -700,7 +726,17 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
>  		} else
>  			apic_clear_vector(vector, apic->regs + APIC_TMR);
>  
> -		result = !apic_test_and_set_irr(vector, apic);
> +		if (kvm_x86_ops->vm_has_apicv(vcpu->kvm)) {
> +			spin_lock(&apic->posted_irr_lock);
> +			if (!apic_test_irr(vector, apic))
> +				result = !test_and_set_bit(vector,
> +					(unsigned long *)apic->posted_irr);
> +			if (result)
> +				apic->posted_irr_changed |= 1 << (vector >> 5);
> +			spin_unlock(&apic->posted_irr_lock);
> +		} else {
> +			result = !apic_test_and_set_irr(vector, apic);
> +		}
>  		trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
>  					  trig_mode, vector, !result);
>  		if (!result) {
> @@ -1567,6 +1603,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
>  	static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
>  	kvm_lapic_reset(vcpu);
>  	kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
> +	spin_lock_init(&apic->posted_irr_lock);
>  
>  	return 0;
>  nomem_free_apic:
> diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
> index 1676d34..38bebc8 100644
> --- a/arch/x86/kvm/lapic.h
> +++ b/arch/x86/kvm/lapic.h
> @@ -20,6 +20,9 @@ struct kvm_lapic {
>  	u32 divide_count;
>  	struct kvm_vcpu *vcpu;
>  	bool irr_pending;
> +	u32 posted_irr[8];
> +	u8 posted_irr_changed;
> +	spinlock_t posted_irr_lock;
>  	/* Number of bits set in ISR. */
>  	s16 isr_count;
>  	/* The highest vector set in ISR; if -1 - invalid, must scan ISR. */
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index c1b3041..c8e6036 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -84,7 +84,8 @@ module_param(vmm_exclusive, bool, S_IRUGO);
>  static bool __read_mostly fasteoi = 1;
>  module_param(fasteoi, bool, S_IRUGO);
>  
> -static bool __read_mostly enable_apicv_reg_vid;
> +static bool __read_mostly enable_apicv_reg_vid = 1;
> +module_param(enable_apicv_reg_vid, bool, S_IRUGO);
>  
>  /*
>   * If nested=1, nested virtualization is supported, i.e., guests may use
> -- 
> 1.7.1

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Zhang, Yang Z Feb. 26, 2013, 12:48 p.m. UTC | #2
Gleb Natapov wrote on 2013-02-26:
> On Tue, Feb 26, 2013 at 08:34:16PM +0800, Yang Zhang wrote:
>> From: Yang Zhang <yang.z.zhang@Intel.com>
>> 
>> In the platform which supporing virtual interrupt delivery feature,
>> hardware will clear vIRR atomatically when target vcpu is running.
>> So software should not modify vIRR when target vcpu is running. This
>> patch will record the virtual interrupt into posted_irr when delivering
>> virtual interrupt to guest. And then sync posted_irr into vIRR in target
>> vcpu context.
>> 
>> The patch to enable Posted Interrupt has the similar logic. Since we are
>> still discussing it, so split this part from Posted Interrupt patch to
>> fix the virtual interrupt delivery issue.
>> 
> What the point of having this over posted interrupt patches considering
> that PI patches do this and more?
We are not doing duplicated work. Just split it from posted interrupt patches and sent out it to fix the current issue related to virtual interrupt delivery. Then we can benefit from virtual interrupt delivery feature.

>> Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>
>> ---
>>  arch/x86/kvm/lapic.c |   39 ++++++++++++++++++++++++++++++++++++++-
>>  arch/x86/kvm/lapic.h |    3 +++ arch/x86/kvm/vmx.c   |    3 ++- 3
>>  files changed, 43 insertions(+), 2 deletions(-)
>> diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
>> index 02b51dd..98bc37e 100644
>> --- a/arch/x86/kvm/lapic.c
>> +++ b/arch/x86/kvm/lapic.c
>> @@ -357,6 +357,11 @@ static u8 count_vectors(void *bitmap)
>>  	return count;
>>  }
>> +static inline bool apic_test_irr(int vec, struct kvm_lapic *apic)
>> +{
>> +	return apic_test_vector(vec, apic->regs + APIC_IRR);
>> +}
>> +
>>  static inline int apic_test_and_set_irr(int vec, struct kvm_lapic
>>  *apic) { 	apic->irr_pending = true; @@ -368,6 +373,26 @@ static inline
>>  int apic_search_irr(struct kvm_lapic *apic) 	return
>>  find_highest_vector(apic->regs + APIC_IRR); }
>> +static inline void kvm_apic_update_irr(struct kvm_lapic *apic)
>> +{
>> +	int index;
>> +
>> +	if (!kvm_x86_ops->vm_has_apicv(apic->vcpu->kvm))
>> +		return;
>> +
>> +	if (apic->posted_irr_changed) {
>> +		spin_lock(&apic->posted_irr_lock);
>> +		for_each_set_bit(index,
>> +			(unsigned long *)(&apic->posted_irr_changed), 8) {
>> +			*((u32 *)(apic->regs + APIC_IRR + index * 0x10)) |=
>> +						apic->posted_irr[index];
>> +			apic->posted_irr[index] = 0;
>> +		}
>> +		apic->posted_irr_changed = 0;
>> +		spin_unlock(&apic->posted_irr_lock);
>> +	}
>> +}
>> +
>>  static inline int apic_find_highest_irr(struct kvm_lapic *apic)
>>  {
>>  	int result;
>> @@ -379,6 +404,7 @@ static inline int apic_find_highest_irr(struct kvm_lapic
> *apic)
>>  	if (!apic->irr_pending)
>>  		return -1;
>> +	kvm_apic_update_irr(apic);
>>  	result = apic_search_irr(apic);
>>  	ASSERT(result == -1 || result >= 16);
>> @@ -700,7 +726,17 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int
> delivery_mode,
>>  		} else
>>  			apic_clear_vector(vector, apic->regs + APIC_TMR);
>> -		result = !apic_test_and_set_irr(vector, apic);
>> +		if (kvm_x86_ops->vm_has_apicv(vcpu->kvm)) {
>> +			spin_lock(&apic->posted_irr_lock);
>> +			if (!apic_test_irr(vector, apic))
>> +				result = !test_and_set_bit(vector,
>> +					(unsigned long *)apic->posted_irr);
>> +			if (result)
>> +				apic->posted_irr_changed |= 1 << (vector >> 5);
>> +			spin_unlock(&apic->posted_irr_lock);
>> +		} else {
>> +			result = !apic_test_and_set_irr(vector, apic);
>> +		}
>>  		trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode, 					 
>>  trig_mode, vector, !result); 		if (!result) { @@ -1567,6 +1603,7 @@
>>  int kvm_create_lapic(struct kvm_vcpu *vcpu)
>>  	static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset
>>  */ 	kvm_lapic_reset(vcpu); 	kvm_iodevice_init(&apic->dev,
>>  &apic_mmio_ops);
>> +	spin_lock_init(&apic->posted_irr_lock);
>> 
>>  	return 0;
>>  nomem_free_apic:
>> diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
>> index 1676d34..38bebc8 100644
>> --- a/arch/x86/kvm/lapic.h
>> +++ b/arch/x86/kvm/lapic.h
>> @@ -20,6 +20,9 @@ struct kvm_lapic {
>>  	u32 divide_count;
>>  	struct kvm_vcpu *vcpu;
>>  	bool irr_pending;
>> +	u32 posted_irr[8];
>> +	u8 posted_irr_changed;
>> +	spinlock_t posted_irr_lock;
>>  	/* Number of bits set in ISR. */
>>  	s16 isr_count;
>>  	/* The highest vector set in ISR; if -1 - invalid, must scan ISR. */
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> index c1b3041..c8e6036 100644
>> --- a/arch/x86/kvm/vmx.c
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -84,7 +84,8 @@ module_param(vmm_exclusive, bool, S_IRUGO);
>>  static bool __read_mostly fasteoi = 1;
>>  module_param(fasteoi, bool, S_IRUGO);
>> -static bool __read_mostly enable_apicv_reg_vid;
>> +static bool __read_mostly enable_apicv_reg_vid = 1;
>> +module_param(enable_apicv_reg_vid, bool, S_IRUGO);
>> 
>>  /*
>>   * If nested=1, nested virtualization is supported, i.e., guests may use
>> --
>> 1.7.1
> 
> --
> 			Gleb.
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html


Best regards,
Yang

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Gleb Natapov Feb. 26, 2013, 12:54 p.m. UTC | #3
On Tue, Feb 26, 2013 at 12:48:15PM +0000, Zhang, Yang Z wrote:
> Gleb Natapov wrote on 2013-02-26:
> > On Tue, Feb 26, 2013 at 08:34:16PM +0800, Yang Zhang wrote:
> >> From: Yang Zhang <yang.z.zhang@Intel.com>
> >> 
> >> In the platform which supporing virtual interrupt delivery feature,
> >> hardware will clear vIRR atomatically when target vcpu is running.
> >> So software should not modify vIRR when target vcpu is running. This
> >> patch will record the virtual interrupt into posted_irr when delivering
> >> virtual interrupt to guest. And then sync posted_irr into vIRR in target
> >> vcpu context.
> >> 
> >> The patch to enable Posted Interrupt has the similar logic. Since we are
> >> still discussing it, so split this part from Posted Interrupt patch to
> >> fix the virtual interrupt delivery issue.
> >> 
> > What the point of having this over posted interrupt patches considering
> > that PI patches do this and more?
> We are not doing duplicated work. Just split it from posted interrupt patches and sent out it to fix the current issue related to virtual interrupt delivery. Then we can benefit from virtual interrupt delivery feature.
> 
It is duplicated work because PI patch will replace half of the code in
this patch. This patch have the same lock I want to avoid in PI patches
anyway, so it is not good as is. We have time till 3.10 for PI to make
it so no need to waste energy on half-solutions.

>> Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>
> >> ---
> >>  arch/x86/kvm/lapic.c |   39 ++++++++++++++++++++++++++++++++++++++-
> >>  arch/x86/kvm/lapic.h |    3 +++ arch/x86/kvm/vmx.c   |    3 ++- 3
> >>  files changed, 43 insertions(+), 2 deletions(-)
> >> diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
> >> index 02b51dd..98bc37e 100644
> >> --- a/arch/x86/kvm/lapic.c
> >> +++ b/arch/x86/kvm/lapic.c
> >> @@ -357,6 +357,11 @@ static u8 count_vectors(void *bitmap)
> >>  	return count;
> >>  }
> >> +static inline bool apic_test_irr(int vec, struct kvm_lapic *apic)
> >> +{
> >> +	return apic_test_vector(vec, apic->regs + APIC_IRR);
> >> +}
> >> +
> >>  static inline int apic_test_and_set_irr(int vec, struct kvm_lapic
> >>  *apic) { 	apic->irr_pending = true; @@ -368,6 +373,26 @@ static inline
> >>  int apic_search_irr(struct kvm_lapic *apic) 	return
> >>  find_highest_vector(apic->regs + APIC_IRR); }
> >> +static inline void kvm_apic_update_irr(struct kvm_lapic *apic)
> >> +{
> >> +	int index;
> >> +
> >> +	if (!kvm_x86_ops->vm_has_apicv(apic->vcpu->kvm))
> >> +		return;
> >> +
> >> +	if (apic->posted_irr_changed) {
> >> +		spin_lock(&apic->posted_irr_lock);
> >> +		for_each_set_bit(index,
> >> +			(unsigned long *)(&apic->posted_irr_changed), 8) {
> >> +			*((u32 *)(apic->regs + APIC_IRR + index * 0x10)) |=
> >> +						apic->posted_irr[index];
> >> +			apic->posted_irr[index] = 0;
> >> +		}
> >> +		apic->posted_irr_changed = 0;
> >> +		spin_unlock(&apic->posted_irr_lock);
> >> +	}
> >> +}
> >> +
> >>  static inline int apic_find_highest_irr(struct kvm_lapic *apic)
> >>  {
> >>  	int result;
> >> @@ -379,6 +404,7 @@ static inline int apic_find_highest_irr(struct kvm_lapic
> > *apic)
> >>  	if (!apic->irr_pending)
> >>  		return -1;
> >> +	kvm_apic_update_irr(apic);
> >>  	result = apic_search_irr(apic);
> >>  	ASSERT(result == -1 || result >= 16);
> >> @@ -700,7 +726,17 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int
> > delivery_mode,
> >>  		} else
> >>  			apic_clear_vector(vector, apic->regs + APIC_TMR);
> >> -		result = !apic_test_and_set_irr(vector, apic);
> >> +		if (kvm_x86_ops->vm_has_apicv(vcpu->kvm)) {
> >> +			spin_lock(&apic->posted_irr_lock);
> >> +			if (!apic_test_irr(vector, apic))
> >> +				result = !test_and_set_bit(vector,
> >> +					(unsigned long *)apic->posted_irr);
> >> +			if (result)
> >> +				apic->posted_irr_changed |= 1 << (vector >> 5);
> >> +			spin_unlock(&apic->posted_irr_lock);
> >> +		} else {
> >> +			result = !apic_test_and_set_irr(vector, apic);
> >> +		}
> >>  		trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode, 					 
> >>  trig_mode, vector, !result); 		if (!result) { @@ -1567,6 +1603,7 @@
> >>  int kvm_create_lapic(struct kvm_vcpu *vcpu)
> >>  	static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset
> >>  */ 	kvm_lapic_reset(vcpu); 	kvm_iodevice_init(&apic->dev,
> >>  &apic_mmio_ops);
> >> +	spin_lock_init(&apic->posted_irr_lock);
> >> 
> >>  	return 0;
> >>  nomem_free_apic:
> >> diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
> >> index 1676d34..38bebc8 100644
> >> --- a/arch/x86/kvm/lapic.h
> >> +++ b/arch/x86/kvm/lapic.h
> >> @@ -20,6 +20,9 @@ struct kvm_lapic {
> >>  	u32 divide_count;
> >>  	struct kvm_vcpu *vcpu;
> >>  	bool irr_pending;
> >> +	u32 posted_irr[8];
> >> +	u8 posted_irr_changed;
> >> +	spinlock_t posted_irr_lock;
> >>  	/* Number of bits set in ISR. */
> >>  	s16 isr_count;
> >>  	/* The highest vector set in ISR; if -1 - invalid, must scan ISR. */
> >> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> >> index c1b3041..c8e6036 100644
> >> --- a/arch/x86/kvm/vmx.c
> >> +++ b/arch/x86/kvm/vmx.c
> >> @@ -84,7 +84,8 @@ module_param(vmm_exclusive, bool, S_IRUGO);
> >>  static bool __read_mostly fasteoi = 1;
> >>  module_param(fasteoi, bool, S_IRUGO);
> >> -static bool __read_mostly enable_apicv_reg_vid;
> >> +static bool __read_mostly enable_apicv_reg_vid = 1;
> >> +module_param(enable_apicv_reg_vid, bool, S_IRUGO);
> >> 
> >>  /*
> >>   * If nested=1, nested virtualization is supported, i.e., guests may use
> >> --
> >> 1.7.1
> > 
> > --
> > 			Gleb.
> > --
> > To unsubscribe from this list: send the line "unsubscribe kvm" in
> > the body of a message to majordomo@vger.kernel.org
> > More majordomo info at  http://vger.kernel.org/majordomo-info.html
> 
> 
> Best regards,
> Yang

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 02b51dd..98bc37e 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -357,6 +357,11 @@  static u8 count_vectors(void *bitmap)
 	return count;
 }
 
+static inline bool apic_test_irr(int vec, struct kvm_lapic *apic)
+{
+	return apic_test_vector(vec, apic->regs + APIC_IRR);
+}
+
 static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic)
 {
 	apic->irr_pending = true;
@@ -368,6 +373,26 @@  static inline int apic_search_irr(struct kvm_lapic *apic)
 	return find_highest_vector(apic->regs + APIC_IRR);
 }
 
+static inline void kvm_apic_update_irr(struct kvm_lapic *apic)
+{
+	int index;
+
+	if (!kvm_x86_ops->vm_has_apicv(apic->vcpu->kvm))
+		return;
+
+	if (apic->posted_irr_changed) {
+		spin_lock(&apic->posted_irr_lock);
+		for_each_set_bit(index,
+			(unsigned long *)(&apic->posted_irr_changed), 8) {
+			*((u32 *)(apic->regs + APIC_IRR + index * 0x10)) |=
+						apic->posted_irr[index];
+			apic->posted_irr[index] = 0;
+		}
+		apic->posted_irr_changed = 0;
+		spin_unlock(&apic->posted_irr_lock);
+	}
+}
+
 static inline int apic_find_highest_irr(struct kvm_lapic *apic)
 {
 	int result;
@@ -379,6 +404,7 @@  static inline int apic_find_highest_irr(struct kvm_lapic *apic)
 	if (!apic->irr_pending)
 		return -1;
 
+	kvm_apic_update_irr(apic);
 	result = apic_search_irr(apic);
 	ASSERT(result == -1 || result >= 16);
 
@@ -700,7 +726,17 @@  static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
 		} else
 			apic_clear_vector(vector, apic->regs + APIC_TMR);
 
-		result = !apic_test_and_set_irr(vector, apic);
+		if (kvm_x86_ops->vm_has_apicv(vcpu->kvm)) {
+			spin_lock(&apic->posted_irr_lock);
+			if (!apic_test_irr(vector, apic))
+				result = !test_and_set_bit(vector,
+					(unsigned long *)apic->posted_irr);
+			if (result)
+				apic->posted_irr_changed |= 1 << (vector >> 5);
+			spin_unlock(&apic->posted_irr_lock);
+		} else {
+			result = !apic_test_and_set_irr(vector, apic);
+		}
 		trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
 					  trig_mode, vector, !result);
 		if (!result) {
@@ -1567,6 +1603,7 @@  int kvm_create_lapic(struct kvm_vcpu *vcpu)
 	static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
 	kvm_lapic_reset(vcpu);
 	kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
+	spin_lock_init(&apic->posted_irr_lock);
 
 	return 0;
 nomem_free_apic:
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 1676d34..38bebc8 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -20,6 +20,9 @@  struct kvm_lapic {
 	u32 divide_count;
 	struct kvm_vcpu *vcpu;
 	bool irr_pending;
+	u32 posted_irr[8];
+	u8 posted_irr_changed;
+	spinlock_t posted_irr_lock;
 	/* Number of bits set in ISR. */
 	s16 isr_count;
 	/* The highest vector set in ISR; if -1 - invalid, must scan ISR. */
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c1b3041..c8e6036 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -84,7 +84,8 @@  module_param(vmm_exclusive, bool, S_IRUGO);
 static bool __read_mostly fasteoi = 1;
 module_param(fasteoi, bool, S_IRUGO);
 
-static bool __read_mostly enable_apicv_reg_vid;
+static bool __read_mostly enable_apicv_reg_vid = 1;
+module_param(enable_apicv_reg_vid, bool, S_IRUGO);
 
 /*
  * If nested=1, nested virtualization is supported, i.e., guests may use