@@ -239,6 +239,7 @@ struct vgic_cpu {
struct vgic_io_device rd_iodev;
struct vgic_redist_region *rdreg;
uint32_t rdreg_index;
+ atomic_t syncr_busy;
struct vgic_io_device sgi_iodev;
/* Contains the attributes and gpa of the LPI pending tables. */
@@ -583,6 +583,65 @@ static void vgic_mmio_write_pendbase(struct vcpu *vcpu, paddr_t addr,
old_pendbaser );
}
+static unsigned long vgic_mmio_read_sync(struct vcpu *vcpu, paddr_t addr,
+ unsigned int len)
+{
+ return !!atomic_read(&vcpu->arch.vgic.syncr_busy);
+}
+
+static void vgic_set_rdist_busy(struct vcpu *vcpu, bool busy)
+{
+ if ( busy )
+ {
+ atomic_inc(&vcpu->arch.vgic.syncr_busy);
+ smp_mb__after_atomic();
+ }
+ else
+ {
+ smp_mb__before_atomic();
+ atomic_dec(&vcpu->arch.vgic.syncr_busy);
+ }
+}
+
+static void vgic_mmio_write_invlpi(struct vcpu *vcpu, paddr_t addr,
+ unsigned int len, unsigned long val)
+{
+ struct vgic_irq *irq;
+
+ /*
+ * If the guest wrote only to the upper 32bit part of the
+ * register, drop the write on the floor, as it is only for
+ * vPEs (which we don't support for obvious reasons).
+ *
+ * Also discard the access if LPIs are not enabled.
+ */
+ if ( (addr & 4) || !vgic_lpis_enabled(vcpu) )
+ return;
+
+ vgic_set_rdist_busy(vcpu, true);
+
+ irq = vgic_get_irq(vcpu->domain, NULL, val & 0xffffffff);
+ if ( irq )
+ {
+ vgic_its_inv_lpi(vcpu->domain, irq);
+ vgic_put_irq(vcpu->domain, irq);
+ }
+
+ vgic_set_rdist_busy(vcpu, false);
+}
+
+static void vgic_mmio_write_invall(struct vcpu *vcpu, paddr_t addr,
+ unsigned int len, unsigned long val)
+{
+ /* See vgic_mmio_write_invlpi() for the early return rationale */
+ if ( (addr & 4) || !vgic_lpis_enabled(vcpu) )
+ return;
+
+ vgic_set_rdist_busy(vcpu, true);
+ vgic_its_invall(vcpu);
+ vgic_set_rdist_busy(vcpu, false);
+}
+
static const struct vgic_register_region vgic_v3_dist_registers[] = {
REGISTER_DESC_WITH_LENGTH(GICD_CTLR,
vgic_mmio_read_v3_misc, vgic_mmio_write_v3_misc,
@@ -655,13 +714,13 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
vgic_mmio_read_pendbase, vgic_mmio_write_pendbase, 8,
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GICR_INVLPIR,
- vgic_mmio_read_raz, vgic_mmio_write_wi, 8,
+ vgic_mmio_read_raz, vgic_mmio_write_invlpi, 8,
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GICR_INVALLR,
- vgic_mmio_read_raz, vgic_mmio_write_wi, 8,
+ vgic_mmio_read_raz, vgic_mmio_write_invall, 8,
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GICR_SYNCR,
- vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
+ vgic_mmio_read_sync, vgic_mmio_write_wi, 4,
VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GICR_IDREGS,
vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
@@ -153,4 +153,19 @@ unsigned long extract_bytes(uint64_t data, unsigned int offset,
uint64_t update_64bit_reg(u64 reg, unsigned int offset, unsigned int len,
unsigned long val);
+#ifdef CONFIG_HAS_ITS
+int vgic_its_inv_lpi(struct domain *d, struct vgic_irq *irq);
+int vgic_its_invall(struct vcpu *vcpu);
+#else
+static inline int vgic_its_inv_lpi(struct domain *d, struct vgic_irq *irq)
+{
+ return 0;
+}
+
+static inline int vgic_its_invall(struct vcpu *vcpu)
+{
+ return 0;
+}
+#endif
+
#endif
Since GICv4.1, it has become legal for an implementation to advertise GICR_{INVLPIR,INVALLR,SYNCR} while having an ITS, allowing for a more efficient invalidation scheme (no guest command queue contention when multiple CPUs are generating invalidations). Provide the invalidation registers as a primitive to their ITS counterpart. Note that we don't advertise them to the guest yet. Based on Linux commit 4645d11f4a553 by Marc Zyngier Signed-off-by: Mykyta Poturai <mykyta_poturai@epam.com> --- xen/arch/arm/include/asm/new_vgic.h | 1 + xen/arch/arm/vgic/vgic-mmio-v3.c | 65 +++++++++++++++++++++++++++-- xen/arch/arm/vgic/vgic-mmio.h | 15 +++++++ 3 files changed, 78 insertions(+), 3 deletions(-)