@@ -577,7 +577,7 @@ static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
}
-static void vgic_its_invalidate_cache(struct kvm *kvm)
+void vgic_its_invalidate_cache(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct vgic_translation_cache_entry *cte;
@@ -200,8 +200,10 @@ static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu,
vgic_cpu->lpis_enabled = val & GICR_CTLR_ENABLE_LPIS;
- if (was_enabled && !vgic_cpu->lpis_enabled)
+ if (was_enabled && !vgic_cpu->lpis_enabled) {
vgic_flush_pending_lpis(vcpu);
+ vgic_its_invalidate_cache(vcpu->kvm);
+ }
if (!was_enabled && vgic_cpu->lpis_enabled)
vgic_enable_lpis(vcpu);
@@ -318,6 +318,7 @@ int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
u32 devid, u32 eventid, struct vgic_irq **irq);
struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi);
void vgic_lpi_translation_cache_init(struct kvm *kvm);
+void vgic_its_invalidate_cache(struct kvm *kvm);
#define LPI_CACHE_SIZE(kvm) (atomic_read(&(kvm)->online_vcpus) * 4)
If a vcpu disables LPIs at its redistributor level, we need to make sure we won't pend more interrupts. For this, we need to invalidate the LPI translation cache. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> --- virt/kvm/arm/vgic/vgic-its.c | 2 +- virt/kvm/arm/vgic/vgic-mmio-v3.c | 4 +++- virt/kvm/arm/vgic/vgic.h | 1 + 3 files changed, 5 insertions(+), 2 deletions(-)