diff mbox series

[v2,05/12] irqchip/armada-370-xp: Put static variables into driver private structure

Message ID 20240807164104.4140-6-kabel@kernel.org (mailing list archive)
State New, archived
Headers show
Series armada-370-xp irqchip updates round 5 | expand

Commit Message

Marek BehĂșn Aug. 7, 2024, 4:40 p.m. UTC
In preparation for converting the driver to modern style put all the
interrupt controller private static variables into driver private
structure.

Access to these variables changes as:
  main_int_base		mpic->base
  per_cpu_int_base	mpic->per_cpu
  mpic_domain		mpic->domain
  parent_irq		mpic->parent_irq
  ...

Signed-off-by: Marek BehĂșn <kabel@kernel.org>
---
 drivers/irqchip/irq-armada-370-xp.c | 223 +++++++++++++++-------------
 1 file changed, 122 insertions(+), 101 deletions(-)
diff mbox series

Patch

diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 07004ecec165..00f38428d2ba 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -66,11 +66,11 @@ 
  *
  * The "global interrupt mask/unmask" is modified using the
  * MPIC_INT_SET_ENABLE and MPIC_INT_CLEAR_ENABLE
- * registers, which are relative to "main_int_base".
+ * registers, which are relative to "mpic->base".
  *
  * The "per-CPU mask/unmask" is modified using the MPIC_INT_SET_MASK
  * and MPIC_INT_CLEAR_MASK registers, which are relative to
- * "per_cpu_int_base". This base address points to a special address,
+ * "mpic->per_cpu". This base address points to a special address,
  * which automatically accesses the registers of the current CPU.
  *
  * The per-CPU mask/unmask can also be adjusted using the global
@@ -112,7 +112,7 @@ 
  *    at the per-CPU level.
  */
 
-/* Registers relative to main_int_base */
+/* Registers relative to mpic->base */
 #define MPIC_INT_CONTROL			0x00
 #define MPIC_INT_CONTROL_NUMINT_MASK		GENMASK(12, 2)
 #define MPIC_SW_TRIG_INT			0x04
@@ -122,7 +122,7 @@ 
 #define MPIC_INT_SOURCE_CPU_MASK		GENMASK(3, 0)
 #define MPIC_INT_IRQ_FIQ_MASK(cpuid)		((BIT(0) | BIT(8)) << (cpuid))
 
-/* Registers relative to per_cpu_int_base */
+/* Registers relative to mpic->per_cpu */
 #define MPIC_IN_DRBEL_CAUSE			0x08
 #define MPIC_IN_DRBEL_MASK			0x0c
 #define MPIC_PPI_CAUSE				0x10
@@ -149,18 +149,40 @@ 
 #define PCI_MSI_FULL_DOORBELL_SRC0_MASK		GENMASK(15, 0)
 #define PCI_MSI_FULL_DOORBELL_SRC1_MASK		GENMASK(31, 16)
 
-static void __iomem *per_cpu_int_base;
-static void __iomem *main_int_base;
-static struct irq_domain *mpic_domain;
-static u32 doorbell_mask_reg;
-static int parent_irq;
+/**
+ * struct mpic - MPIC private data structure
+ * @base:		MPIC registers base address
+ * @per_cpu:		per-CPU registers base address
+ * @parent_irq:		parent IRQ if MPIC is not top-level interrupt controller
+ * @domain:		MPIC main interrupt domain
+ * @ipi_domain:		IPI domain
+ * @msi_domain:		MSI domain
+ * @msi_inner_domain:	MSI inner domain
+ * @msi_used:		bitmap of used MSI numbers
+ * @msi_lock:		mutex serializing access to @msi_used
+ * @msi_doorbell_addr:	physical address of MSI doorbell register
+ * @doorbell_mask:	doorbell mask of MSIs and IPIs, stored on suspend, restored on resume
+ */
+struct mpic {
+	void __iomem *base;
+	void __iomem *per_cpu;
+	int parent_irq;
+	struct irq_domain *domain;
+#ifdef CONFIG_SMP
+	struct irq_domain *ipi_domain;
+#endif
 #ifdef CONFIG_PCI_MSI
-static struct irq_domain *mpic_msi_domain;
-static struct irq_domain *mpic_msi_inner_domain;
-static DECLARE_BITMAP(msi_used, PCI_MSI_FULL_DOORBELL_NR);
-static DEFINE_MUTEX(msi_used_lock);
-static phys_addr_t msi_doorbell_addr;
+	struct irq_domain *msi_domain;
+	struct irq_domain *msi_inner_domain;
+	DECLARE_BITMAP(msi_used, PCI_MSI_FULL_DOORBELL_NR);
+	struct mutex msi_lock;
+	phys_addr_t msi_doorbell_addr;
 #endif
+	u32 doorbell_mask;
+};
+
+static struct mpic mpic_data;
+static struct mpic * const mpic = &mpic_data;
 
 static inline bool mpic_is_ipi_available(void)
 {
@@ -170,7 +192,7 @@  static inline bool mpic_is_ipi_available(void)
 	 * interrupt controller (e.g. GIC) that takes care of inter-processor
 	 * interrupts.
 	 */
-	return parent_irq <= 0;
+	return mpic->parent_irq <= 0;
 }
 
 static inline u32 msi_doorbell_mask(void)
@@ -203,9 +225,9 @@  static void mpic_irq_mask(struct irq_data *d)
 	irq_hw_number_t hwirq = irqd_to_hwirq(d);
 
 	if (!mpic_is_percpu_irq(hwirq))
-		writel(hwirq, main_int_base + MPIC_INT_CLEAR_ENABLE);
+		writel(hwirq, mpic->base + MPIC_INT_CLEAR_ENABLE);
 	else
-		writel(hwirq, per_cpu_int_base + MPIC_INT_SET_MASK);
+		writel(hwirq, mpic->per_cpu + MPIC_INT_SET_MASK);
 }
 
 static void mpic_irq_unmask(struct irq_data *d)
@@ -213,9 +235,9 @@  static void mpic_irq_unmask(struct irq_data *d)
 	irq_hw_number_t hwirq = irqd_to_hwirq(d);
 
 	if (!mpic_is_percpu_irq(hwirq))
-		writel(hwirq, main_int_base + MPIC_INT_SET_ENABLE);
+		writel(hwirq, mpic->base + MPIC_INT_SET_ENABLE);
 	else
-		writel(hwirq, per_cpu_int_base + MPIC_INT_CLEAR_MASK);
+		writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
 }
 
 #ifdef CONFIG_PCI_MSI
@@ -236,8 +258,8 @@  static void mpic_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
 {
 	unsigned int cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
 
-	msg->address_lo = lower_32_bits(msi_doorbell_addr);
-	msg->address_hi = upper_32_bits(msi_doorbell_addr);
+	msg->address_lo = lower_32_bits(mpic->msi_doorbell_addr);
+	msg->address_hi = upper_32_bits(mpic->msi_doorbell_addr);
 	msg->data = BIT(cpu + 8) | (d->hwirq + msi_doorbell_start());
 }
 
@@ -269,10 +291,10 @@  static int mpic_msi_alloc(struct irq_domain *domain, unsigned int virq, unsigned
 {
 	int hwirq;
 
-	mutex_lock(&msi_used_lock);
-	hwirq = bitmap_find_free_region(msi_used, msi_doorbell_size(),
+	mutex_lock(&mpic->msi_lock);
+	hwirq = bitmap_find_free_region(mpic->msi_used, msi_doorbell_size(),
 					order_base_2(nr_irqs));
-	mutex_unlock(&msi_used_lock);
+	mutex_unlock(&mpic->msi_lock);
 
 	if (hwirq < 0)
 		return -ENOSPC;
@@ -291,9 +313,9 @@  static void mpic_msi_free(struct irq_domain *domain, unsigned int virq, unsigned
 {
 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
 
-	mutex_lock(&msi_used_lock);
-	bitmap_release_region(msi_used, d->hwirq, order_base_2(nr_irqs));
-	mutex_unlock(&msi_used_lock);
+	mutex_lock(&mpic->msi_lock);
+	bitmap_release_region(mpic->msi_used, d->hwirq, order_base_2(nr_irqs));
+	mutex_unlock(&mpic->msi_lock);
 }
 
 static const struct irq_domain_ops mpic_msi_domain_ops = {
@@ -306,27 +328,29 @@  static void mpic_msi_reenable_percpu(void)
 	u32 reg;
 
 	/* Enable MSI doorbell mask and combined cpu local interrupt */
-	reg = readl(per_cpu_int_base + MPIC_IN_DRBEL_MASK);
+	reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK);
 	reg |= msi_doorbell_mask();
-	writel(reg, per_cpu_int_base + MPIC_IN_DRBEL_MASK);
+	writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
 
 	/* Unmask local doorbell interrupt */
-	writel(1, per_cpu_int_base + MPIC_INT_CLEAR_MASK);
+	writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
 }
 
 static int __init mpic_msi_init(struct device_node *node, phys_addr_t main_int_phys_base)
 {
-	msi_doorbell_addr = main_int_phys_base + MPIC_SW_TRIG_INT;
+	mpic->msi_doorbell_addr = main_int_phys_base + MPIC_SW_TRIG_INT;
+
+	mutex_init(&mpic->msi_lock);
 
-	mpic_msi_inner_domain = irq_domain_add_linear(NULL, msi_doorbell_size(),
-						      &mpic_msi_domain_ops, NULL);
-	if (!mpic_msi_inner_domain)
+	mpic->msi_inner_domain = irq_domain_add_linear(NULL, msi_doorbell_size(),
+						       &mpic_msi_domain_ops, NULL);
+	if (!mpic->msi_inner_domain)
 		return -ENOMEM;
 
-	mpic_msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), &mpic_msi_domain_info,
-						    mpic_msi_inner_domain);
-	if (!mpic_msi_domain) {
-		irq_domain_remove(mpic_msi_inner_domain);
+	mpic->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), &mpic_msi_domain_info,
+						     mpic->msi_inner_domain);
+	if (!mpic->msi_domain) {
+		irq_domain_remove(mpic->msi_inner_domain);
 		return -ENOMEM;
 	}
 
@@ -334,7 +358,7 @@  static int __init mpic_msi_init(struct device_node *node, phys_addr_t main_int_p
 
 	/* Unmask low 16 MSI irqs on non-IPI platforms */
 	if (!mpic_is_ipi_available())
-		writel(0, per_cpu_int_base + MPIC_INT_CLEAR_MASK);
+		writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
 
 	return 0;
 }
@@ -362,29 +386,26 @@  static void mpic_perf_init(void)
 	cpuid = cpu_logical_map(smp_processor_id());
 
 	/* Enable Performance Counter Overflow interrupts */
-	writel(MPIC_INT_CAUSE_PERF(cpuid),
-	       per_cpu_int_base + MPIC_INT_FABRIC_MASK);
+	writel(MPIC_INT_CAUSE_PERF(cpuid), mpic->per_cpu + MPIC_INT_FABRIC_MASK);
 }
 
 #ifdef CONFIG_SMP
-static struct irq_domain *mpic_ipi_domain;
-
 static void mpic_ipi_mask(struct irq_data *d)
 {
 	u32 reg;
 
-	reg = readl(per_cpu_int_base + MPIC_IN_DRBEL_MASK);
+	reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK);
 	reg &= ~BIT(d->hwirq);
-	writel(reg, per_cpu_int_base + MPIC_IN_DRBEL_MASK);
+	writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
 }
 
 static void mpic_ipi_unmask(struct irq_data *d)
 {
 	u32 reg;
 
-	reg = readl(per_cpu_int_base + MPIC_IN_DRBEL_MASK);
+	reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK);
 	reg |= BIT(d->hwirq);
-	writel(reg, per_cpu_int_base + MPIC_IN_DRBEL_MASK);
+	writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
 }
 
 static void mpic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
@@ -403,12 +424,12 @@  static void mpic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
 	dsb();
 
 	/* submit softirq */
-	writel((map << 8) | d->hwirq, main_int_base + MPIC_SW_TRIG_INT);
+	writel((map << 8) | d->hwirq, mpic->base + MPIC_SW_TRIG_INT);
 }
 
 static void mpic_ipi_ack(struct irq_data *d)
 {
-	writel(~BIT(d->hwirq), per_cpu_int_base + MPIC_IN_DRBEL_CAUSE);
+	writel(~BIT(d->hwirq), mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
 }
 
 static struct irq_chip mpic_ipi_irqchip = {
@@ -445,13 +466,13 @@  static const struct irq_domain_ops mpic_ipi_domain_ops = {
 static void mpic_ipi_resume(void)
 {
 	for (irq_hw_number_t i = 0; i < IPI_DOORBELL_NR; i++) {
-		unsigned int virq = irq_find_mapping(mpic_ipi_domain, i);
+		unsigned int virq = irq_find_mapping(mpic->ipi_domain, i);
 		struct irq_data *d;
 
 		if (!virq || !irq_percpu_is_enabled(virq))
 			continue;
 
-		d = irq_domain_get_irq_data(mpic_ipi_domain, virq);
+		d = irq_domain_get_irq_data(mpic->ipi_domain, virq);
 		mpic_ipi_unmask(d);
 	}
 }
@@ -460,13 +481,13 @@  static int __init mpic_ipi_init(struct device_node *node)
 {
 	int base_ipi;
 
-	mpic_ipi_domain = irq_domain_create_linear(of_node_to_fwnode(node), IPI_DOORBELL_NR,
-						   &mpic_ipi_domain_ops, NULL);
-	if (WARN_ON(!mpic_ipi_domain))
+	mpic->ipi_domain = irq_domain_create_linear(of_node_to_fwnode(node), IPI_DOORBELL_NR,
+						    &mpic_ipi_domain_ops, NULL);
+	if (WARN_ON(!mpic->ipi_domain))
 		return -ENOMEM;
 
-	irq_domain_update_bus_token(mpic_ipi_domain, DOMAIN_BUS_IPI);
-	base_ipi = irq_domain_alloc_irqs(mpic_ipi_domain, IPI_DOORBELL_NR, NUMA_NO_NODE, NULL);
+	irq_domain_update_bus_token(mpic->ipi_domain, DOMAIN_BUS_IPI);
+	base_ipi = irq_domain_alloc_irqs(mpic->ipi_domain, IPI_DOORBELL_NR, NUMA_NO_NODE, NULL);
 	if (WARN_ON(!base_ipi))
 		return -ENOMEM;
 
@@ -483,7 +504,7 @@  static int mpic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
 	/* Select a single core from the affinity mask which is online */
 	cpu = cpumask_any_and(mask_val, cpu_online_mask);
 
-	atomic_io_modify(main_int_base + MPIC_INT_SOURCE_CTL(hwirq),
+	atomic_io_modify(mpic->base + MPIC_INT_SOURCE_CTL(hwirq),
 			 MPIC_INT_SOURCE_CPU_MASK, BIT(cpu_logical_map(cpu)));
 
 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
@@ -493,27 +514,27 @@  static int mpic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
 
 static void mpic_smp_cpu_init(void)
 {
-	for (irq_hw_number_t i = 0; i < mpic_domain->hwirq_max; i++)
-		writel(i, per_cpu_int_base + MPIC_INT_SET_MASK);
+	for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++)
+		writel(i, mpic->per_cpu + MPIC_INT_SET_MASK);
 
 	if (!mpic_is_ipi_available())
 		return;
 
 	/* Disable all IPIs */
-	writel(0, per_cpu_int_base + MPIC_IN_DRBEL_MASK);
+	writel(0, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
 
 	/* Clear pending IPIs */
-	writel(0, per_cpu_int_base + MPIC_IN_DRBEL_CAUSE);
+	writel(0, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
 
 	/* Unmask IPI interrupt */
-	writel(0, per_cpu_int_base + MPIC_INT_CLEAR_MASK);
+	writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
 }
 
 static void mpic_reenable_percpu(void)
 {
 	/* Re-enable per-CPU interrupts that were enabled before suspend */
 	for (irq_hw_number_t i = 0; i < MPIC_MAX_PER_CPU_IRQS; i++) {
-		unsigned int virq = irq_linear_revmap(mpic_domain, i);
+		unsigned int virq = irq_linear_revmap(mpic->domain, i);
 		struct irq_data *d;
 
 		if (!virq || !irq_percpu_is_enabled(virq))
@@ -542,7 +563,7 @@  static int mpic_cascaded_starting_cpu(unsigned int cpu)
 {
 	mpic_perf_init();
 	mpic_reenable_percpu();
-	enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
+	enable_percpu_irq(mpic->parent_irq, IRQ_TYPE_NONE);
 
 	return 0;
 }
@@ -571,9 +592,9 @@  static int mpic_irq_map(struct irq_domain *h, unsigned int virq,
 
 	mpic_irq_mask(irq_get_irq_data(virq));
 	if (!mpic_is_percpu_irq(hwirq))
-		writel(hwirq, per_cpu_int_base + MPIC_INT_CLEAR_MASK);
+		writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
 	else
-		writel(hwirq, main_int_base + MPIC_INT_SET_ENABLE);
+		writel(hwirq, mpic->base + MPIC_INT_SET_ENABLE);
 	irq_set_status_flags(virq, IRQ_LEVEL);
 
 	if (mpic_is_percpu_irq(hwirq)) {
@@ -598,12 +619,12 @@  static void mpic_handle_msi_irq(void)
 	unsigned long cause;
 	unsigned int i;
 
-	cause = readl_relaxed(per_cpu_int_base + MPIC_IN_DRBEL_CAUSE);
+	cause = readl_relaxed(mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
 	cause &= msi_doorbell_mask();
-	writel(~cause, per_cpu_int_base + MPIC_IN_DRBEL_CAUSE);
+	writel(~cause, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
 
 	for_each_set_bit(i, &cause, BITS_PER_LONG)
-		generic_handle_domain_irq(mpic_msi_inner_domain,
+		generic_handle_domain_irq(mpic->msi_inner_domain,
 					  i - msi_doorbell_start());
 }
 #else
@@ -616,11 +637,11 @@  static void mpic_handle_ipi_irq(void)
 	unsigned long cause;
 	irq_hw_number_t i;
 
-	cause = readl_relaxed(per_cpu_int_base + MPIC_IN_DRBEL_CAUSE);
+	cause = readl_relaxed(mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
 	cause &= IPI_DOORBELL_MASK;
 
 	for_each_set_bit(i, &cause, IPI_DOORBELL_NR)
-		generic_handle_domain_irq(mpic_ipi_domain, i);
+		generic_handle_domain_irq(mpic->ipi_domain, i);
 }
 #else
 static inline void mpic_handle_ipi_irq(void) {}
@@ -635,11 +656,11 @@  static void mpic_handle_cascade_irq(struct irq_desc *desc)
 
 	chained_irq_enter(chip, desc);
 
-	cause = readl_relaxed(per_cpu_int_base + MPIC_PPI_CAUSE);
+	cause = readl_relaxed(mpic->per_cpu + MPIC_PPI_CAUSE);
 	cpuid = cpu_logical_map(smp_processor_id());
 
 	for_each_set_bit(i, &cause, BITS_PER_LONG) {
-		irqsrc = readl_relaxed(main_int_base + MPIC_INT_SOURCE_CTL(i));
+		irqsrc = readl_relaxed(mpic->base + MPIC_INT_SOURCE_CTL(i));
 
 		/* Check if the interrupt is not masked on current CPU.
 		 * Test IRQ (0-1) and FIQ (8-9) mask bits.
@@ -652,7 +673,7 @@  static void mpic_handle_cascade_irq(struct irq_desc *desc)
 			continue;
 		}
 
-		generic_handle_domain_irq(mpic_domain, i);
+		generic_handle_domain_irq(mpic->domain, i);
 	}
 
 	chained_irq_exit(chip, desc);
@@ -664,14 +685,14 @@  static void __exception_irq_entry mpic_handle_irq(struct pt_regs *regs)
 	u32 irqstat;
 
 	do {
-		irqstat = readl_relaxed(per_cpu_int_base + MPIC_CPU_INTACK);
+		irqstat = readl_relaxed(mpic->per_cpu + MPIC_CPU_INTACK);
 		i = FIELD_GET(MPIC_CPU_INTACK_IID_MASK, irqstat);
 
 		if (i > 1022)
 			break;
 
 		if (i > 1)
-			generic_handle_domain_irq(mpic_domain, i);
+			generic_handle_domain_irq(mpic->domain, i);
 
 		/* MSI handling */
 		if (i == 1)
@@ -685,7 +706,7 @@  static void __exception_irq_entry mpic_handle_irq(struct pt_regs *regs)
 
 static int mpic_suspend(void)
 {
-	doorbell_mask_reg = readl(per_cpu_int_base + MPIC_IN_DRBEL_MASK);
+	mpic->doorbell_mask = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK);
 
 	return 0;
 }
@@ -695,8 +716,8 @@  static void mpic_resume(void)
 	bool src0, src1;
 
 	/* Re-enable interrupts */
-	for (irq_hw_number_t i = 0; i < mpic_domain->hwirq_max; i++) {
-		unsigned int virq = irq_linear_revmap(mpic_domain, i);
+	for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++) {
+		unsigned int virq = irq_linear_revmap(mpic->domain, i);
 		struct irq_data *d;
 
 		if (!virq)
@@ -706,12 +727,12 @@  static void mpic_resume(void)
 
 		if (!mpic_is_percpu_irq(i)) {
 			/* Non per-CPU interrupts */
-			writel(i, per_cpu_int_base + MPIC_INT_CLEAR_MASK);
+			writel(i, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
 			if (!irqd_irq_disabled(d))
 				mpic_irq_unmask(d);
 		} else {
 			/* Per-CPU interrupts */
-			writel(i, main_int_base + MPIC_INT_SET_ENABLE);
+			writel(i, mpic->base + MPIC_INT_SET_ENABLE);
 
 			/*
 			 * Re-enable on the current CPU, mpic_reenable_percpu()
@@ -723,20 +744,20 @@  static void mpic_resume(void)
 	}
 
 	/* Reconfigure doorbells for IPIs and MSIs */
-	writel(doorbell_mask_reg, per_cpu_int_base + MPIC_IN_DRBEL_MASK);
+	writel(mpic->doorbell_mask, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
 
 	if (mpic_is_ipi_available()) {
-		src0 = doorbell_mask_reg & IPI_DOORBELL_MASK;
-		src1 = doorbell_mask_reg & PCI_MSI_DOORBELL_MASK;
+		src0 = mpic->doorbell_mask & IPI_DOORBELL_MASK;
+		src1 = mpic->doorbell_mask & PCI_MSI_DOORBELL_MASK;
 	} else {
-		src0 = doorbell_mask_reg & PCI_MSI_FULL_DOORBELL_SRC0_MASK;
-		src1 = doorbell_mask_reg & PCI_MSI_FULL_DOORBELL_SRC1_MASK;
+		src0 = mpic->doorbell_mask & PCI_MSI_FULL_DOORBELL_SRC0_MASK;
+		src1 = mpic->doorbell_mask & PCI_MSI_FULL_DOORBELL_SRC1_MASK;
 	}
 
 	if (src0)
-		writel(0, per_cpu_int_base + MPIC_INT_CLEAR_MASK);
+		writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
 	if (src1)
-		writel(1, per_cpu_int_base + MPIC_INT_CLEAR_MASK);
+		writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
 
 	if (mpic_is_ipi_available())
 		mpic_ipi_resume();
@@ -784,32 +805,32 @@  static int __init mpic_of_init(struct device_node *node, struct device_node *par
 	unsigned int nr_irqs;
 	int err;
 
-	err = mpic_map_region(node, 0, &main_int_base, &phys_base);
+	err = mpic_map_region(node, 0, &mpic->base, &phys_base);
 	if (err)
 		return err;
 
-	err = mpic_map_region(node, 1, &per_cpu_int_base, NULL);
+	err = mpic_map_region(node, 1, &mpic->per_cpu, NULL);
 	if (err)
 		return err;
 
-	nr_irqs = FIELD_GET(MPIC_INT_CONTROL_NUMINT_MASK, readl(main_int_base + MPIC_INT_CONTROL));
+	nr_irqs = FIELD_GET(MPIC_INT_CONTROL_NUMINT_MASK, readl(mpic->base + MPIC_INT_CONTROL));
 
 	for (irq_hw_number_t i = 0; i < nr_irqs; i++)
-		writel(i, main_int_base + MPIC_INT_CLEAR_ENABLE);
+		writel(i, mpic->base + MPIC_INT_CLEAR_ENABLE);
 
-	mpic_domain = irq_domain_add_linear(node, nr_irqs, &mpic_irq_ops, NULL);
-	if (!mpic_domain) {
+	mpic->domain = irq_domain_add_linear(node, nr_irqs, &mpic_irq_ops, NULL);
+	if (!mpic->domain) {
 		pr_err("%pOF: Unable to add IRQ domain\n", node);
 		return -ENOMEM;
 	}
 
-	irq_domain_update_bus_token(mpic_domain, DOMAIN_BUS_WIRED);
+	irq_domain_update_bus_token(mpic->domain, DOMAIN_BUS_WIRED);
 
 	/*
-	 * Initialize parent_irq before calling any other functions, since it is
-	 * used to distinguish between IPI and non-IPI platforms.
+	 * Initialize mpic->parent_irq before calling any other functions, since
+	 * it is used to distinguish between IPI and non-IPI platforms.
 	 */
-	parent_irq = irq_of_parse_and_map(node, 0);
+	mpic->parent_irq = irq_of_parse_and_map(node, 0);
 
 	/* Setup for the boot CPU */
 	mpic_perf_init();
@@ -821,8 +842,8 @@  static int __init mpic_of_init(struct device_node *node, struct device_node *par
 		return err;
 	}
 
-	if (parent_irq <= 0) {
-		irq_set_default_host(mpic_domain);
+	if (mpic->parent_irq <= 0) {
+		irq_set_default_host(mpic->domain);
 		set_handle_irq(mpic_handle_irq);
 #ifdef CONFIG_SMP
 		err = mpic_ipi_init(node);
@@ -841,7 +862,7 @@  static int __init mpic_of_init(struct device_node *node, struct device_node *par
 					  "irqchip/armada/cascade:starting",
 					  mpic_cascaded_starting_cpu, NULL);
 #endif
-		irq_set_chained_handler(parent_irq, mpic_handle_cascade_irq);
+		irq_set_chained_handler(mpic->parent_irq, mpic_handle_cascade_irq);
 	}
 
 	register_syscore_ops(&mpic_syscore_ops);