@@ -61,6 +61,7 @@ struct irq_desc {
irq_preflow_handler_t preflow_handler;
#endif
struct irqaction *action; /* IRQ action list */
+ atomic_t kstat_irq_cpus; /* #cpus handling this IRQ */
unsigned int status_use_accessors;
unsigned int core_internal_state__do_not_mess_with_it;
unsigned int depth; /* nested irq disables */
@@ -244,7 +244,8 @@ static inline void irq_state_set_masked(struct irq_desc *desc)
static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc)
{
- __this_cpu_inc(*desc->kstat_irqs);
+ if (unlikely(__this_cpu_inc_return(*desc->kstat_irqs) == 1))
+ atomic_inc(&desc->kstat_irq_cpus);
__this_cpu_inc(kstat.irqs_sum);
}
@@ -922,7 +922,7 @@ unsigned int kstat_irqs(unsigned int irq)
int cpu;
unsigned int sum = 0;
- if (!desc || !desc->kstat_irqs)
+ if (!desc || !desc->kstat_irqs || !atomic_read(&desc->kstat_irq_cpus))
return 0;
for_each_possible_cpu(cpu)
sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
Recent computer systems may have hundreds or even thousands of IRQs available. However, most of them may not be active and their IRQ counts are zero. It is just a waste of CPU cycles to do percpu summation of those zero counts. In order to find out if an IRQ is active, we track the transition of the percpu count from 0 to 1 and atomically increment a new kstat_irq_cpus counter which counts the number of CPUs that handle this particular IRQ. The IRQ descriptor is zalloc'ed, so there is no need to initialize the new counter. On a 4-socket Broadwell server wwith 112 vCPUs and 2952 IRQs (2877 of them are 0), the system time needs to read /proc/stat 50k times was reduced from 11.200s to 8.048s. That was a execution time reduction of 28%. Signed-off-by: Waiman Long <longman@redhat.com> --- include/linux/irqdesc.h | 1 + kernel/irq/internals.h | 3 ++- kernel/irq/irqdesc.c | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-)