@@ -38,7 +38,7 @@ const struct gic_hw_operations *gic_hw_ops;
static void __init __maybe_unused build_assertions(void)
{
/* Check our enum gic_sgi only covers SGIs */
- BUILD_BUG_ON(GIC_SGI_MAX > NR_GIC_SGI);
+ BUILD_BUG_ON(GIC_SGI_STATIC_MAX > NR_GIC_SGI);
}
void register_gic_ops(const struct gic_hw_operations *ops)
@@ -117,7 +117,9 @@ void gic_route_irq_to_xen(struct irq_desc *desc, unsigned int priority)
desc->handler = gic_hw_ops->gic_host_irq_type;
- gic_set_irq_type(desc, desc->arch.type);
+ /* SGIs are always edge-triggered, so there is need to set it */
+ if ( desc->irq >= NR_GIC_SGI)
+ gic_set_irq_type(desc, desc->arch.type);
gic_set_irq_priority(desc, priority);
}
@@ -322,7 +324,7 @@ void gic_disable_cpu(void)
gic_hw_ops->disable_interface();
}
-static void do_sgi(struct cpu_user_regs *regs, enum gic_sgi sgi)
+static void do_static_sgi(struct cpu_user_regs *regs, enum gic_sgi sgi)
{
struct irq_desc *desc = irq_to_desc(sgi);
@@ -367,7 +369,7 @@ void gic_interrupt(struct cpu_user_regs *regs, int is_fiq)
/* Reading IRQ will ACK it */
irq = gic_hw_ops->read_irq();
- if ( likely(irq >= 16 && irq < 1020) )
+ if ( likely(irq >= GIC_SGI_STATIC_MAX && irq < 1020) )
{
isb();
do_IRQ(regs, irq, is_fiq);
@@ -379,7 +381,7 @@ void gic_interrupt(struct cpu_user_regs *regs, int is_fiq)
}
else if ( unlikely(irq < 16) )
{
- do_sgi(regs, irq);
+ do_static_sgi(regs, irq);
}
else
{
@@ -285,7 +285,7 @@ enum gic_sgi {
GIC_SGI_EVENT_CHECK,
GIC_SGI_DUMP_STATE,
GIC_SGI_CALL_FUNCTION,
- GIC_SGI_MAX,
+ GIC_SGI_STATIC_MAX,
};
/* SGI irq mode types */
@@ -142,7 +142,13 @@ void __init init_IRQ(void)
spin_lock(&local_irqs_type_lock);
for ( irq = 0; irq < NR_LOCAL_IRQS; irq++ )
- local_irqs_type[irq] = IRQ_TYPE_INVALID;
+ {
+ /* SGIs are always edge-triggered */
+ if ( irq < NR_GIC_SGI )
+ local_irqs_type[irq] = IRQ_TYPE_EDGE_RISING;
+ else
+ local_irqs_type[irq] = IRQ_TYPE_INVALID;
+ }
spin_unlock(&local_irqs_type_lock);
BUG_ON(init_local_irq_data(smp_processor_id()) < 0);
@@ -214,9 +220,12 @@ void do_IRQ(struct cpu_user_regs *regs, unsigned int irq, int is_fiq)
perfc_incr(irqs);
- ASSERT(irq >= 16); /* SGIs do not come down this path */
+ /* Statically assigned SGIs do not come down this path */
+ ASSERT(irq >= GIC_SGI_STATIC_MAX);
- if ( irq < 32 )
+ if ( irq < NR_GIC_SGI )
+ perfc_incr(ipis);
+ else if ( irq < NR_GIC_LOCAL_IRQS )
perfc_incr(ppis);
else
perfc_incr(spis);
@@ -250,6 +259,7 @@ void do_IRQ(struct cpu_user_regs *regs, unsigned int irq, int is_fiq)
* The irq cannot be a PPI, we only support delivery of SPIs to
* guests.
*/
+ ASSERT(irq >= NR_GIC_SGI);
vgic_inject_irq(info->d, NULL, info->virq, true);
goto out_no_end;
}
@@ -386,7 +396,7 @@ int setup_irq(unsigned int irq, unsigned int irqflags, struct irqaction *new)
{
gic_route_irq_to_xen(desc, GIC_PRI_IRQ);
/* It's fine to use smp_processor_id() because:
- * For PPI: irq_desc is banked
+ * For SGI and PPI: irq_desc is banked
* For SPI: we don't care for now which CPU will receive the
* interrupt
* TODO: Handle case where SPI is setup on different CPU than