Message ID | 5CC6DEF70200007800229EA3@prv1-mh.provo.novell.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | x86: IRQ management adjustments | expand |
On Mon, Apr 29, 2019 at 05:24:39AM -0600, Jan Beulich wrote: > desc->arch.cpu_mask reflects the actual set of target CPUs. Don't ever > fiddle with desc->affinity itself, except to store caller requested > values. > > This renders both set_native_irq_info() uses (which weren't using proper > locking anyway) redundant - drop the function altogether. > > Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Roger Pau Monné <roger.pau@citrix.com> > > --- a/xen/arch/x86/io_apic.c > +++ b/xen/arch/x86/io_apic.c > @@ -1042,7 +1042,6 @@ static void __init setup_IO_APIC_irqs(vo > SET_DEST(entry, logical, cpu_mask_to_apicid(TARGET_CPUS)); > spin_lock_irqsave(&ioapic_lock, flags); > __ioapic_write_entry(apic, pin, 0, entry); > - set_native_irq_info(irq, TARGET_CPUS); > spin_unlock_irqrestore(&ioapic_lock, flags); > } > } > @@ -2251,7 +2250,6 @@ int io_apic_set_pci_routing (int ioapic, > > spin_lock_irqsave(&ioapic_lock, flags); > __ioapic_write_entry(ioapic, pin, 0, entry); > - set_native_irq_info(irq, TARGET_CPUS); > spin_unlock(&ioapic_lock); > > spin_lock(&desc->lock); > --- a/xen/arch/x86/irq.c > +++ b/xen/arch/x86/irq.c > @@ -572,11 +572,16 @@ int assign_irq_vector(int irq, const cpu > > spin_lock_irqsave(&vector_lock, flags); > ret = __assign_irq_vector(irq, desc, mask ?: TARGET_CPUS); > - if (!ret) { > + if ( !ret ) > + { > ret = desc->arch.vector; > - cpumask_copy(desc->affinity, desc->arch.cpu_mask); > + if ( mask ) > + cpumask_copy(desc->affinity, mask); > + else > + cpumask_setall(desc->affinity); I guess it's fine to use setall instead of copying the cpu online map here? AFAICT __assign_irq_vector already filters offline CPUs from the passed mask. Thanks, Roger.
>>> On 03.05.19 at 18:21, <roger.pau@citrix.com> wrote: > On Mon, Apr 29, 2019 at 05:24:39AM -0600, Jan Beulich wrote: >> desc->arch.cpu_mask reflects the actual set of target CPUs. Don't ever >> fiddle with desc->affinity itself, except to store caller requested >> values. >> >> This renders both set_native_irq_info() uses (which weren't using proper >> locking anyway) redundant - drop the function altogether. >> >> Signed-off-by: Jan Beulich <jbeulich@suse.com> > > Reviewed-by: Roger Pau Monné <roger.pau@citrix.com> Thanks. >> --- a/xen/arch/x86/irq.c >> +++ b/xen/arch/x86/irq.c >> @@ -572,11 +572,16 @@ int assign_irq_vector(int irq, const cpu >> >> spin_lock_irqsave(&vector_lock, flags); >> ret = __assign_irq_vector(irq, desc, mask ?: TARGET_CPUS); >> - if (!ret) { >> + if ( !ret ) >> + { >> ret = desc->arch.vector; >> - cpumask_copy(desc->affinity, desc->arch.cpu_mask); >> + if ( mask ) >> + cpumask_copy(desc->affinity, mask); >> + else >> + cpumask_setall(desc->affinity); > > I guess it's fine to use setall instead of copying the cpu online map > here? It's not only fine, it's actually one of the goals: This way you can set affinities such that they won't need adjustment after bringing up another CPU. I've added a respective sentence to the description. > AFAICT __assign_irq_vector already filters offline CPUs from the > passed mask. Indeed. And all other involved code should, too, by now. I think there is at least one place left somewhere where the online map is used for setting affinities, but I suppose this can be dealt with at another time. Jan
--- a/xen/arch/x86/io_apic.c +++ b/xen/arch/x86/io_apic.c @@ -1042,7 +1042,6 @@ static void __init setup_IO_APIC_irqs(vo SET_DEST(entry, logical, cpu_mask_to_apicid(TARGET_CPUS)); spin_lock_irqsave(&ioapic_lock, flags); __ioapic_write_entry(apic, pin, 0, entry); - set_native_irq_info(irq, TARGET_CPUS); spin_unlock_irqrestore(&ioapic_lock, flags); } } @@ -2251,7 +2250,6 @@ int io_apic_set_pci_routing (int ioapic, spin_lock_irqsave(&ioapic_lock, flags); __ioapic_write_entry(ioapic, pin, 0, entry); - set_native_irq_info(irq, TARGET_CPUS); spin_unlock(&ioapic_lock); spin_lock(&desc->lock); --- a/xen/arch/x86/irq.c +++ b/xen/arch/x86/irq.c @@ -572,11 +572,16 @@ int assign_irq_vector(int irq, const cpu spin_lock_irqsave(&vector_lock, flags); ret = __assign_irq_vector(irq, desc, mask ?: TARGET_CPUS); - if (!ret) { + if ( !ret ) + { ret = desc->arch.vector; - cpumask_copy(desc->affinity, desc->arch.cpu_mask); + if ( mask ) + cpumask_copy(desc->affinity, mask); + else + cpumask_setall(desc->affinity); } spin_unlock_irqrestore(&vector_lock, flags); + return ret; } @@ -2318,9 +2323,10 @@ static void dump_irqs(unsigned char key) spin_lock_irqsave(&desc->lock, flags); - printk(" IRQ:%4d aff:%*pb vec:%02x %-15s status=%03x ", - irq, nr_cpu_ids, cpumask_bits(desc->affinity), desc->arch.vector, - desc->handler->typename, desc->status); + printk(" IRQ:%4d aff:%*pb/%*pb vec:%02x %-15s status=%03x ", + irq, nr_cpu_ids, cpumask_bits(desc->affinity), + nr_cpu_ids, cpumask_bits(desc->arch.cpu_mask), + desc->arch.vector, desc->handler->typename, desc->status); if ( ssid ) printk("Z=%-25s ", ssid); @@ -2408,8 +2414,7 @@ void fixup_irqs(const cpumask_t *mask, b release_old_vec(desc); } - cpumask_copy(&affinity, desc->affinity); - if ( !desc->action || cpumask_subset(&affinity, mask) ) + if ( !desc->action || cpumask_subset(desc->affinity, mask) ) { spin_unlock(&desc->lock); continue; @@ -2433,12 +2438,13 @@ void fixup_irqs(const cpumask_t *mask, b desc->arch.move_in_progress = 0; } - cpumask_and(&affinity, &affinity, mask); - if ( cpumask_empty(&affinity) ) + if ( !cpumask_intersects(mask, desc->affinity) ) { break_affinity = true; - cpumask_copy(&affinity, mask); + cpumask_setall(&affinity); } + else + cpumask_copy(&affinity, desc->affinity); if ( desc->handler->disable ) desc->handler->disable(desc); --- a/xen/include/xen/irq.h +++ b/xen/include/xen/irq.h @@ -162,11 +162,6 @@ extern irq_desc_t *domain_spin_lock_irq_ extern irq_desc_t *pirq_spin_lock_irq_desc( const struct pirq *, unsigned long *pflags); -static inline void set_native_irq_info(unsigned int irq, const cpumask_t *mask) -{ - cpumask_copy(irq_to_desc(irq)->affinity, mask); -} - unsigned int set_desc_affinity(struct irq_desc *, const cpumask_t *); #ifndef arch_hwdom_irqs
desc->arch.cpu_mask reflects the actual set of target CPUs. Don't ever fiddle with desc->affinity itself, except to store caller requested values. This renders both set_native_irq_info() uses (which weren't using proper locking anyway) redundant - drop the function altogether. Signed-off-by: Jan Beulich <jbeulich@suse.com>