@@ -151,6 +151,7 @@ struct irq_domain_chip_generic;
* drivers using the generic chip library which uses this pointer.
* @parent: Pointer to parent irq_domain to support hierarchy irq_domains
* @debugfs_file: dentry for the domain debugfs file
+ * @max_affinity: mask of CPUs targetable by this IRQ domain
*
* Revmap data, used internally by irq_domain
* @revmap_direct_max_irq: The largest hwirq that can be set for controllers that
@@ -177,6 +178,7 @@ struct irq_domain {
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
struct dentry *debugfs_file;
#endif
+ const struct cpumask *max_affinity;
/* reverse map data. The linear map gets appended to the irq_domain */
irq_hw_number_t hwirq_max;
@@ -453,6 +455,8 @@ extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
void *chip_data, irq_flow_handler_t handler,
void *handler_data, const char *handler_name);
extern void irq_domain_reset_irq_data(struct irq_data *irq_data);
+extern int irq_domain_set_affinity(struct irq_domain *domain,
+ const struct cpumask *affinity);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
extern struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
unsigned int flags, unsigned int size,
@@ -661,7 +661,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
/* Allocate a virtual interrupt number */
virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node),
- NULL, NULL);
+ NULL, domain->max_affinity);
if (virq <= 0) {
pr_debug("-> virq allocation failed\n");
return 0;
@@ -1078,6 +1078,27 @@ void irq_domain_reset_irq_data(struct irq_data *irq_data)
}
EXPORT_SYMBOL_GPL(irq_domain_reset_irq_data);
+/**
+ * irq_domain_set_affinity - Set maximum CPU affinity for domain
+ * @parent: Domain to set affinity for
+ * @affinity: Pointer to cpumask, consumed by domain
+ *
+ * Sets the maximal set of CPUs to which interrupts in this domain may
+ * be delivered. Must only be called after creation, before any interrupts
+ * have been in the domain.
+ *
+ * This function retains a pointer to the cpumask which is passed in.
+ */
+int irq_domain_set_affinity(struct irq_domain *domain,
+ const struct cpumask *affinity)
+{
+ if (cpumask_empty(affinity))
+ return -EINVAL;
+ domain->max_affinity = affinity;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(irq_domain_set_affinity);
+
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
/**
* irq_domain_create_hierarchy - Add a irqdomain into the hierarchy
@@ -1110,6 +1131,9 @@ struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
if (domain) {
domain->parent = parent;
domain->flags |= flags;
+ if (parent && parent->max_affinity)
+ irq_domain_set_affinity(domain,
+ parent->max_affinity);
}
return domain;
@@ -1375,7 +1399,7 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
virq = irq_base;
} else {
virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node,
- affinity, NULL);
+ affinity, domain->max_affinity);
if (virq < 0) {
pr_debug("cannot allocate IRQ(base %d, count %d)\n",
irq_base, nr_irqs);
@@ -345,6 +345,10 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
struct irq_desc *desc = irq_data_to_desc(data);
int ret = 0;
+ if (data->domain && data->domain->max_affinity &&
+ !cpumask_subset(mask, data->domain->max_affinity))
+ return -EINVAL;
+
if (!chip || !chip->irq_set_affinity)
return -EINVAL;
@@ -484,13 +488,20 @@ int irq_setup_affinity(struct irq_desc *desc)
struct cpumask *set = irq_default_affinity;
int ret, node = irq_desc_get_node(desc);
static DEFINE_RAW_SPINLOCK(mask_lock);
- static struct cpumask mask;
+ static struct cpumask mask, max_mask;
/* Excludes PER_CPU and NO_BALANCE interrupts */
if (!__irq_can_set_affinity(desc))
return 0;
raw_spin_lock(&mask_lock);
+
+ if (desc->irq_data.domain && desc->irq_data.domain->max_affinity)
+ cpumask_and(&max_mask, cpu_online_mask,
+ desc->irq_data.domain->max_affinity);
+ else
+ cpumask_copy(&max_mask, cpu_online_mask);
+
/*
* Preserve the managed affinity setting and a userspace affinity
* setup, but make sure that one of the targets is online.
@@ -498,15 +509,15 @@ int irq_setup_affinity(struct irq_desc *desc)
if (irqd_affinity_is_managed(&desc->irq_data) ||
irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
if (cpumask_intersects(desc->irq_common_data.affinity,
- cpu_online_mask))
+ &max_mask))
set = desc->irq_common_data.affinity;
else
irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
}
- cpumask_and(&mask, cpu_online_mask, set);
+ cpumask_and(&mask, &max_mask, set);
if (cpumask_empty(&mask))
- cpumask_copy(&mask, cpu_online_mask);
+ cpumask_copy(&mask, &max_mask);
if (node != NUMA_NO_NODE) {
const struct cpumask *nodemask = cpumask_of_node(node);