@@ -704,6 +704,10 @@ struct kvm_x86_ops {
void (*hwapic_isr_update)(struct kvm *kvm, int isr);
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
+ bool (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
+ void (*posted_intr_clear_on)(struct kvm_vcpu *vcpu);
+ void (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
+ bool (*hwapic_has_interrupt)(struct kvm_vcpu *vcpu);
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
int (*get_tdp_level)(void);
u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
@@ -357,6 +357,19 @@ static u8 count_vectors(void *bitmap)
return count;
}
+void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
+{
+ u32 i, pir_val;
+ struct kvm_lapic *apic = vcpu->arch.apic;
+
+ for (i = 0; i <= 7; i++) {
+ pir_val = xchg(&pir[i], 0);
+ if (pir_val)
+ *((u32 *)(apic->regs + APIC_IRR + i * 0x10)) |= pir_val;
+ }
+}
+EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
+
static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic)
{
apic->irr_pending = true;
@@ -157,5 +157,6 @@ static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr)
void kvm_calculate_eoi_exitmap(struct kvm_vcpu *vcpu,
struct kvm_lapic_irq *irq,
u64 *eoi_bitmap);
+void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir);
#endif
@@ -3591,6 +3591,26 @@ static void svm_hwapic_isr_update(struct kvm *kvm, int isr)
return;
}
+static bool svm_hwapic_has_interrupt(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
+static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu)
+{
+ return;
+}
+
+static bool svm_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
+{
+ return false;
+}
+
+static void svm_posted_intr_clear_on(struct kvm_vcpu *vcpu)
+{
+ return;
+}
+
static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -4319,6 +4339,10 @@ static struct kvm_x86_ops svm_x86_ops = {
.vm_has_apicv = svm_vm_has_apicv,
.load_eoi_exitmap = svm_load_eoi_exitmap,
.hwapic_isr_update = svm_hwapic_isr_update,
+ .sync_pir_to_irr = svm_sync_pir_to_irr,
+ .hwapic_has_interrupt = svm_hwapic_has_interrupt,
+ .deliver_posted_interrupt = svm_deliver_posted_interrupt,
+ .posted_intr_clear_on = svm_posted_intr_clear_on,
.set_tss_addr = svm_set_tss_addr,
.get_tdp_level = get_npt_level,
@@ -379,6 +379,17 @@ struct pi_desc {
} u;
} __aligned(64);
+static bool pi_test_and_set_on(struct pi_desc *pi_desc)
+{
+ return test_and_set_bit(POSTED_INTR_ON,
+ (unsigned long *)&pi_desc->u.control);
+}
+
+static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
+{
+ return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
+}
+
struct vcpu_vmx {
struct kvm_vcpu vcpu;
unsigned long host_rsp;
@@ -3911,6 +3922,58 @@ static int vmx_vm_has_apicv(struct kvm *kvm)
return enable_apicv && irqchip_in_kernel(kvm);
}
+static void vmx_posted_intr_clear_on(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (!vmx_vm_has_apicv(vcpu->kvm))
+ return;
+
+ clear_bit(POSTED_INTR_ON, (unsigned long *)&vmx->pi_desc.u.control);
+}
+
+static bool vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (!vmx_vm_has_apicv(vcpu->kvm))
+ return false;
+
+ if (pi_test_and_set_pir(vector, &vmx->pi_desc))
+ return true;
+
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ if ((vcpu->mode == IN_GUEST_MODE)) {
+ if (!pi_test_and_set_on(&vmx->pi_desc))
+ apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
+ POSTED_INTR_VECTOR);
+ } else
+ kvm_vcpu_kick(vcpu);
+
+ return true;
+}
+
+static bool vmx_hwapic_has_interrupt(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (!vmx_vm_has_apicv(vcpu->kvm))
+ return false;
+
+ if (bitmap_empty((unsigned long *)vmx->pi_desc.pir, 256))
+ return false;
+
+ return true;
+}
+
+static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (vmx_hwapic_has_interrupt(vcpu))
+ kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
+}
+
/*
* Set up the vmcs's constant host-state fields, i.e., host-state fields that
* will not change in the lifetime of the guest.
@@ -7735,6 +7798,10 @@ static struct kvm_x86_ops vmx_x86_ops = {
.load_eoi_exitmap = vmx_load_eoi_exitmap,
.hwapic_irr_update = vmx_hwapic_irr_update,
.hwapic_isr_update = vmx_hwapic_isr_update,
+ .sync_pir_to_irr = vmx_sync_pir_to_irr,
+ .hwapic_has_interrupt = vmx_hwapic_has_interrupt,
+ .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
+ .posted_intr_clear_on = vmx_posted_intr_clear_on,
.set_tss_addr = vmx_set_tss_addr,
.get_tdp_level = get_ept_level,
@@ -1692,6 +1692,7 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
smp_send_reschedule(cpu);
put_cpu();
}
+EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
#endif /* !CONFIG_S390 */
void kvm_resched(struct kvm_vcpu *vcpu)