Message ID | 20230113094216.116036-1-mason.huo@starfivetech.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | Palmer Dabbelt |
Headers | show |
Series | [v1] irqchip/irq-sifive-plic: Add syscore callbacks for hibernation | expand |
Context | Check | Description |
---|---|---|
conchuod/patch_count | success | Link |
conchuod/cover_letter | success | Single patches do not need cover letters |
conchuod/tree_selection | success | Guessed tree name to be for-next |
conchuod/fixes_present | success | Fixes tag not required for -next series |
conchuod/maintainers_pattern | success | MAINTAINERS pattern errors before the patch: 13 and now 13 |
conchuod/verify_signedoff | success | Signed-off-by tag matches author and committer |
conchuod/kdoc | success | Errors and warnings before: 0 this patch: 0 |
conchuod/module_param | success | Was 0 now: 0 |
conchuod/build_rv64_gcc_allmodconfig | fail | Errors and warnings before: 0 this patch: 2054 |
conchuod/alphanumeric_selects | success | Out of order selects before the patch: 57 and now 57 |
conchuod/build_rv32_defconfig | success | Build OK |
conchuod/dtb_warn_rv64 | success | Errors and warnings before: 4 this patch: 4 |
conchuod/header_inline | success | No static functions without inline keyword in header files |
conchuod/checkpatch | warning | CHECK: Alignment should match open parenthesis |
conchuod/source_inline | success | Was 0 now: 0 |
conchuod/build_rv64_nommu_k210_defconfig | success | Build OK |
conchuod/verify_fixes | success | No Fixes tag |
conchuod/build_rv64_nommu_virt_defconfig | success | Build OK |
On 2023/1/13 17:42, Mason Huo wrote: > The priority and enable registers of plic will be reset > during hibernation power cycle in poweroff mode, > add the syscore callbacks to save/restore those registers. > > Signed-off-by: Mason Huo <mason.huo@starfivetech.com> > Reviewed-by: Ley Foon Tan <leyfoon.tan@starfivetech.com> > Reviewed-by: Sia Jee Heng <jeeheng.sia@starfivetech.com> > --- > drivers/irqchip/irq-sifive-plic.c | 93 ++++++++++++++++++++++++++++++- > 1 file changed, 91 insertions(+), 2 deletions(-) > > diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c > index ff47bd0dec45..80306de45d2b 100644 > --- a/drivers/irqchip/irq-sifive-plic.c > +++ b/drivers/irqchip/irq-sifive-plic.c > @@ -17,6 +17,7 @@ > #include <linux/of_irq.h> > #include <linux/platform_device.h> > #include <linux/spinlock.h> > +#include <linux/syscore_ops.h> > #include <asm/smp.h> > > /* > @@ -67,6 +68,8 @@ struct plic_priv { > struct irq_domain *irqdomain; > void __iomem *regs; > unsigned long plic_quirks; > + unsigned int nr_irqs; > + u32 *priority_reg; > }; > > struct plic_handler { > @@ -79,10 +82,13 @@ struct plic_handler { > raw_spinlock_t enable_lock; > void __iomem *enable_base; > struct plic_priv *priv; > + /* To record interrupts that are enabled before suspend. */ > + u32 enable_reg[MAX_DEVICES / 32]; > }; > static int plic_parent_irq __ro_after_init; > static bool plic_cpuhp_setup_done __ro_after_init; > static DEFINE_PER_CPU(struct plic_handler, plic_handlers); > +static struct plic_priv *priv_data; > > static int plic_irq_set_type(struct irq_data *d, unsigned int type); > > @@ -229,6 +235,78 @@ static int plic_irq_set_type(struct irq_data *d, unsigned int type) > return IRQ_SET_MASK_OK; > } > > +static void plic_irq_resume(void) > +{ > + unsigned int i, cpu; > + u32 __iomem *reg; > + > + for (i = 0; i < priv_data->nr_irqs; i++) > + writel(priv_data->priority_reg[i], > + priv_data->regs + PRIORITY_BASE + i * PRIORITY_PER_ID); > + > + for_each_cpu(cpu, cpu_present_mask) { > + struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); > + > + if (!handler->present) > + continue; > + > + for (i = 0; i < DIV_ROUND_UP(priv_data->nr_irqs, 32); i++) { > + reg = handler->enable_base + i * sizeof(u32); > + raw_spin_lock(&handler->enable_lock); > + writel(handler->enable_reg[i], reg); > + raw_spin_unlock(&handler->enable_lock); > + } > + } > +} > + > +static int plic_irq_suspend(void) > +{ > + unsigned int i, cpu; > + u32 __iomem *reg; > + > + for (i = 0; i < priv_data->nr_irqs; i++) > + priv_data->priority_reg[i] = > + readl(priv_data->regs + PRIORITY_BASE + i * PRIORITY_PER_ID); > + > + for_each_cpu(cpu, cpu_present_mask) { > + struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); > + > + if (!handler->present) > + continue; > + > + for (i = 0; i < DIV_ROUND_UP(priv_data->nr_irqs, 32); i++) { > + reg = handler->enable_base + i * sizeof(u32); > + raw_spin_lock(&handler->enable_lock); > + handler->enable_reg[i] = readl(reg); > + raw_spin_unlock(&handler->enable_lock); > + } > + } > + > + return 0; > +} > + > +static struct syscore_ops plic_irq_syscore_ops = { > + .suspend = plic_irq_suspend, > + .resume = plic_irq_resume, > +}; > + > +static void plic_irq_pm_init(void) > +{ > + unsigned int cpu; > + > + for_each_cpu(cpu, cpu_present_mask) { > + struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); > + > + if (!handler->present) > + continue; > + > + memset(&handler->enable_reg[0], 0, > + sizeof(handler->enable_reg)); > + } > + > + register_syscore_ops(&plic_irq_syscore_ops); > +} > + > static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, > irq_hw_number_t hwirq) > { > @@ -351,6 +429,7 @@ static int __init __plic_init(struct device_node *node, > return -ENOMEM; > > priv->plic_quirks = plic_quirks; > + priv_data = priv; > > priv->regs = of_iomap(node, 0); > if (WARN_ON(!priv->regs)) { > @@ -363,15 +442,21 @@ static int __init __plic_init(struct device_node *node, > if (WARN_ON(!nr_irqs)) > goto out_iounmap; > > + priv->nr_irqs = nr_irqs; > + > + priv->priority_reg = kcalloc(nr_irqs, sizeof(u32), GFP_KERNEL); > + if (!priv->priority_reg) > + goto out_free_priority_reg; > + > nr_contexts = of_irq_count(node); > if (WARN_ON(!nr_contexts)) > - goto out_iounmap; > + goto out_free_priority_reg; > > error = -ENOMEM; > priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1, > &plic_irqdomain_ops, priv); > if (WARN_ON(!priv->irqdomain)) > - goto out_iounmap; > + goto out_free_priority_reg; > > for (i = 0; i < nr_contexts; i++) { > struct of_phandle_args parent; > @@ -461,11 +546,15 @@ static int __init __plic_init(struct device_node *node, > plic_starting_cpu, plic_dying_cpu); > plic_cpuhp_setup_done = true; > } > + plic_irq_pm_init(); > > pr_info("%pOFP: mapped %d interrupts with %d handlers for" > " %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts); > return 0; > > +out_free_priority_reg: > + kfree(priv->priority_reg); > + > out_iounmap: > iounmap(priv->regs); > out_free_priv: Hi all, Could you please help to review and provide comments on this patch? Looking for your reply. Thanks Mason
On Fri, 13 Jan 2023 09:42:16 +0000, Mason Huo <mason.huo@starfivetech.com> wrote: > > The priority and enable registers of plic will be reset > during hibernation power cycle in poweroff mode, > add the syscore callbacks to save/restore those registers. > > Signed-off-by: Mason Huo <mason.huo@starfivetech.com> > Reviewed-by: Ley Foon Tan <leyfoon.tan@starfivetech.com> > Reviewed-by: Sia Jee Heng <jeeheng.sia@starfivetech.com> > --- > drivers/irqchip/irq-sifive-plic.c | 93 ++++++++++++++++++++++++++++++- > 1 file changed, 91 insertions(+), 2 deletions(-) > > diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c > index ff47bd0dec45..80306de45d2b 100644 > --- a/drivers/irqchip/irq-sifive-plic.c > +++ b/drivers/irqchip/irq-sifive-plic.c > @@ -17,6 +17,7 @@ > #include <linux/of_irq.h> > #include <linux/platform_device.h> > #include <linux/spinlock.h> > +#include <linux/syscore_ops.h> > #include <asm/smp.h> > > /* > @@ -67,6 +68,8 @@ struct plic_priv { > struct irq_domain *irqdomain; > void __iomem *regs; > unsigned long plic_quirks; > + unsigned int nr_irqs; > + u32 *priority_reg; > }; > > struct plic_handler { > @@ -79,10 +82,13 @@ struct plic_handler { > raw_spinlock_t enable_lock; > void __iomem *enable_base; > struct plic_priv *priv; > + /* To record interrupts that are enabled before suspend. */ > + u32 enable_reg[MAX_DEVICES / 32]; What does MAX_DEVICES represent here? How is it related to the number of interrupts you're trying to save? It seems to be related to the number of CPUs, so it hardly makes any sense so far. > }; > static int plic_parent_irq __ro_after_init; > static bool plic_cpuhp_setup_done __ro_after_init; > static DEFINE_PER_CPU(struct plic_handler, plic_handlers); > +static struct plic_priv *priv_data; > > static int plic_irq_set_type(struct irq_data *d, unsigned int type); > > @@ -229,6 +235,78 @@ static int plic_irq_set_type(struct irq_data *d, unsigned int type) > return IRQ_SET_MASK_OK; > } > > +static void plic_irq_resume(void) > +{ > + unsigned int i, cpu; > + u32 __iomem *reg; > + > + for (i = 0; i < priv_data->nr_irqs; i++) > + writel(priv_data->priority_reg[i], > + priv_data->regs + PRIORITY_BASE + i * PRIORITY_PER_ID); From what I can tell, this driver uses exactly 2 priorities: 0 and 1. And yet you use a full 32bit to encode those. Does it seem like a good idea? > + > + for_each_cpu(cpu, cpu_present_mask) { > + struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); > + > + if (!handler->present) > + continue; > + > + for (i = 0; i < DIV_ROUND_UP(priv_data->nr_irqs, 32); i++) { > + reg = handler->enable_base + i * sizeof(u32); > + raw_spin_lock(&handler->enable_lock); > + writel(handler->enable_reg[i], reg); > + raw_spin_unlock(&handler->enable_lock); Why do you need to take/release the lock around *each* register access? Isn't that lock constant for a given CPU? > + } > + } > +} > + > +static int plic_irq_suspend(void) > +{ > + unsigned int i, cpu; > + u32 __iomem *reg; > + > + for (i = 0; i < priv_data->nr_irqs; i++) > + priv_data->priority_reg[i] = > + readl(priv_data->regs + PRIORITY_BASE + i * PRIORITY_PER_ID); > + > + for_each_cpu(cpu, cpu_present_mask) { > + struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); > + > + if (!handler->present) > + continue; > + > + for (i = 0; i < DIV_ROUND_UP(priv_data->nr_irqs, 32); i++) { > + reg = handler->enable_base + i * sizeof(u32); > + raw_spin_lock(&handler->enable_lock); > + handler->enable_reg[i] = readl(reg); > + raw_spin_unlock(&handler->enable_lock); Same remarks. M.
On 2023/2/5 18:51, Marc Zyngier wrote: > On Fri, 13 Jan 2023 09:42:16 +0000, > Mason Huo <mason.huo@starfivetech.com> wrote: >> >> The priority and enable registers of plic will be reset >> during hibernation power cycle in poweroff mode, >> add the syscore callbacks to save/restore those registers. >> >> Signed-off-by: Mason Huo <mason.huo@starfivetech.com> >> Reviewed-by: Ley Foon Tan <leyfoon.tan@starfivetech.com> >> Reviewed-by: Sia Jee Heng <jeeheng.sia@starfivetech.com> >> --- >> drivers/irqchip/irq-sifive-plic.c | 93 ++++++++++++++++++++++++++++++- >> 1 file changed, 91 insertions(+), 2 deletions(-) >> >> diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c >> index ff47bd0dec45..80306de45d2b 100644 >> --- a/drivers/irqchip/irq-sifive-plic.c >> +++ b/drivers/irqchip/irq-sifive-plic.c >> @@ -17,6 +17,7 @@ >> #include <linux/of_irq.h> >> #include <linux/platform_device.h> >> #include <linux/spinlock.h> >> +#include <linux/syscore_ops.h> >> #include <asm/smp.h> >> >> /* >> @@ -67,6 +68,8 @@ struct plic_priv { >> struct irq_domain *irqdomain; >> void __iomem *regs; >> unsigned long plic_quirks; >> + unsigned int nr_irqs; >> + u32 *priority_reg; >> }; >> >> struct plic_handler { >> @@ -79,10 +82,13 @@ struct plic_handler { >> raw_spinlock_t enable_lock; >> void __iomem *enable_base; >> struct plic_priv *priv; >> + /* To record interrupts that are enabled before suspend. */ >> + u32 enable_reg[MAX_DEVICES / 32]; > > What does MAX_DEVICES represent here? How is it related to the number > of interrupts you're trying to save? It seems to be related to the > number of CPUs, so it hardly makes any sense so far. > The comment of this macro describes that "The largest number supported by devices marked as 'sifive,plic-1.0.0', is 1024, of which device 0 is defined as non-existent by the RISC-V Privileged Spec." As far as I understand, the *device* here means HW IRQ source, and the HW IRQ 0 is non-existent. >> }; >> static int plic_parent_irq __ro_after_init; >> static bool plic_cpuhp_setup_done __ro_after_init; >> static DEFINE_PER_CPU(struct plic_handler, plic_handlers); >> +static struct plic_priv *priv_data; >> >> static int plic_irq_set_type(struct irq_data *d, unsigned int type); >> >> @@ -229,6 +235,78 @@ static int plic_irq_set_type(struct irq_data *d, unsigned int type) >> return IRQ_SET_MASK_OK; >> } >> >> +static void plic_irq_resume(void) >> +{ >> + unsigned int i, cpu; >> + u32 __iomem *reg; >> + >> + for (i = 0; i < priv_data->nr_irqs; i++) >> + writel(priv_data->priority_reg[i], >> + priv_data->regs + PRIORITY_BASE + i * PRIORITY_PER_ID); > > From what I can tell, this driver uses exactly 2 priorities: 0 and 1. > And yet you use a full 32bit to encode those. Does it seem like a good > idea? > Yes, currently this driver uses oly 2 priorities. But, according to the sifive spec, the priority register is a 32bit register, and it supports 7 levels of priority. >> + >> + for_each_cpu(cpu, cpu_present_mask) { >> + struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); >> + >> + if (!handler->present) >> + continue; >> + >> + for (i = 0; i < DIV_ROUND_UP(priv_data->nr_irqs, 32); i++) { >> + reg = handler->enable_base + i * sizeof(u32); >> + raw_spin_lock(&handler->enable_lock); >> + writel(handler->enable_reg[i], reg); >> + raw_spin_unlock(&handler->enable_lock); > > Why do you need to take/release the lock around *each* register > access? Isn't that lock constant for a given CPU? > OK, will fix it in the next version. >> + } >> + } >> +} >> + >> +static int plic_irq_suspend(void) >> +{ >> + unsigned int i, cpu; >> + u32 __iomem *reg; >> + >> + for (i = 0; i < priv_data->nr_irqs; i++) >> + priv_data->priority_reg[i] = >> + readl(priv_data->regs + PRIORITY_BASE + i * PRIORITY_PER_ID); >> + >> + for_each_cpu(cpu, cpu_present_mask) { >> + struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); >> + >> + if (!handler->present) >> + continue; >> + >> + for (i = 0; i < DIV_ROUND_UP(priv_data->nr_irqs, 32); i++) { >> + reg = handler->enable_base + i * sizeof(u32); >> + raw_spin_lock(&handler->enable_lock); >> + handler->enable_reg[i] = readl(reg); >> + raw_spin_unlock(&handler->enable_lock); > > Same remarks. > > M. >
On Mon, 06 Feb 2023 06:13:11 +0000, Mason Huo <mason.huo@starfivetech.com> wrote: > > > > On 2023/2/5 18:51, Marc Zyngier wrote: > > On Fri, 13 Jan 2023 09:42:16 +0000, > > Mason Huo <mason.huo@starfivetech.com> wrote: > >> > >> The priority and enable registers of plic will be reset > >> during hibernation power cycle in poweroff mode, > >> add the syscore callbacks to save/restore those registers. > >> > >> Signed-off-by: Mason Huo <mason.huo@starfivetech.com> > >> Reviewed-by: Ley Foon Tan <leyfoon.tan@starfivetech.com> > >> Reviewed-by: Sia Jee Heng <jeeheng.sia@starfivetech.com> > >> --- > >> drivers/irqchip/irq-sifive-plic.c | 93 ++++++++++++++++++++++++++++++- > >> 1 file changed, 91 insertions(+), 2 deletions(-) > >> > >> diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c > >> index ff47bd0dec45..80306de45d2b 100644 > >> --- a/drivers/irqchip/irq-sifive-plic.c > >> +++ b/drivers/irqchip/irq-sifive-plic.c > >> @@ -17,6 +17,7 @@ > >> #include <linux/of_irq.h> > >> #include <linux/platform_device.h> > >> #include <linux/spinlock.h> > >> +#include <linux/syscore_ops.h> > >> #include <asm/smp.h> > >> > >> /* > >> @@ -67,6 +68,8 @@ struct plic_priv { > >> struct irq_domain *irqdomain; > >> void __iomem *regs; > >> unsigned long plic_quirks; > >> + unsigned int nr_irqs; > >> + u32 *priority_reg; > >> }; > >> > >> struct plic_handler { > >> @@ -79,10 +82,13 @@ struct plic_handler { > >> raw_spinlock_t enable_lock; > >> void __iomem *enable_base; > >> struct plic_priv *priv; > >> + /* To record interrupts that are enabled before suspend. */ > >> + u32 enable_reg[MAX_DEVICES / 32]; > > > > What does MAX_DEVICES represent here? How is it related to the number > > of interrupts you're trying to save? It seems to be related to the > > number of CPUs, so it hardly makes any sense so far. > > > The comment of this macro describes that "The largest number supported > by devices marked as 'sifive,plic-1.0.0', is 1024, of which > device 0 is defined as non-existent by the RISC-V Privileged Spec." > As far as I understand, the *device* here means HW IRQ source, > and the HW IRQ 0 is non-existent. So why is it sized to that maximum value? The binding gives you the *real* value that the HW implements. > > >> }; > >> static int plic_parent_irq __ro_after_init; > >> static bool plic_cpuhp_setup_done __ro_after_init; > >> static DEFINE_PER_CPU(struct plic_handler, plic_handlers); > >> +static struct plic_priv *priv_data; > >> > >> static int plic_irq_set_type(struct irq_data *d, unsigned int type); > >> > >> @@ -229,6 +235,78 @@ static int plic_irq_set_type(struct irq_data *d, unsigned int type) > >> return IRQ_SET_MASK_OK; > >> } > >> > >> +static void plic_irq_resume(void) > >> +{ > >> + unsigned int i, cpu; > >> + u32 __iomem *reg; > >> + > >> + for (i = 0; i < priv_data->nr_irqs; i++) > >> + writel(priv_data->priority_reg[i], > >> + priv_data->regs + PRIORITY_BASE + i * PRIORITY_PER_ID); > > > > From what I can tell, this driver uses exactly 2 priorities: 0 and 1. > > And yet you use a full 32bit to encode those. Does it seem like a good > > idea? > > > Yes, currently this driver uses oly 2 priorities. > But, according to the sifive spec, the priority register is a 32bit register, > and it supports 7 levels of priority. And? This is a Linux driver, not an implementation validation tool. What is the point of saving/restoring stuff that is *never* used? :-( M.
On 2023/2/6 20:48, Marc Zyngier wrote: > On Mon, 06 Feb 2023 06:13:11 +0000, > Mason Huo <mason.huo@starfivetech.com> wrote: >> >> >> >> On 2023/2/5 18:51, Marc Zyngier wrote: >> > On Fri, 13 Jan 2023 09:42:16 +0000, >> > Mason Huo <mason.huo@starfivetech.com> wrote: >> >> >> >> The priority and enable registers of plic will be reset >> >> during hibernation power cycle in poweroff mode, >> >> add the syscore callbacks to save/restore those registers. >> >> >> >> Signed-off-by: Mason Huo <mason.huo@starfivetech.com> >> >> Reviewed-by: Ley Foon Tan <leyfoon.tan@starfivetech.com> >> >> Reviewed-by: Sia Jee Heng <jeeheng.sia@starfivetech.com> >> >> --- >> >> drivers/irqchip/irq-sifive-plic.c | 93 ++++++++++++++++++++++++++++++- >> >> 1 file changed, 91 insertions(+), 2 deletions(-) >> >> >> >> diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c >> >> index ff47bd0dec45..80306de45d2b 100644 >> >> --- a/drivers/irqchip/irq-sifive-plic.c >> >> +++ b/drivers/irqchip/irq-sifive-plic.c >> >> @@ -17,6 +17,7 @@ >> >> #include <linux/of_irq.h> >> >> #include <linux/platform_device.h> >> >> #include <linux/spinlock.h> >> >> +#include <linux/syscore_ops.h> >> >> #include <asm/smp.h> >> >> >> >> /* >> >> @@ -67,6 +68,8 @@ struct plic_priv { >> >> struct irq_domain *irqdomain; >> >> void __iomem *regs; >> >> unsigned long plic_quirks; >> >> + unsigned int nr_irqs; >> >> + u32 *priority_reg; >> >> }; >> >> >> >> struct plic_handler { >> >> @@ -79,10 +82,13 @@ struct plic_handler { >> >> raw_spinlock_t enable_lock; >> >> void __iomem *enable_base; >> >> struct plic_priv *priv; >> >> + /* To record interrupts that are enabled before suspend. */ >> >> + u32 enable_reg[MAX_DEVICES / 32]; >> > >> > What does MAX_DEVICES represent here? How is it related to the number >> > of interrupts you're trying to save? It seems to be related to the >> > number of CPUs, so it hardly makes any sense so far. >> > >> The comment of this macro describes that "The largest number supported >> by devices marked as 'sifive,plic-1.0.0', is 1024, of which >> device 0 is defined as non-existent by the RISC-V Privileged Spec." >> As far as I understand, the *device* here means HW IRQ source, >> and the HW IRQ 0 is non-existent. > > So why is it sized to that maximum value? The binding gives you the > *real* value that the HW implements. > OK, will change to use binding value. >> >> >> }; >> >> static int plic_parent_irq __ro_after_init; >> >> static bool plic_cpuhp_setup_done __ro_after_init; >> >> static DEFINE_PER_CPU(struct plic_handler, plic_handlers); >> >> +static struct plic_priv *priv_data; >> >> >> >> static int plic_irq_set_type(struct irq_data *d, unsigned int type); >> >> >> >> @@ -229,6 +235,78 @@ static int plic_irq_set_type(struct irq_data *d, unsigned int type) >> >> return IRQ_SET_MASK_OK; >> >> } >> >> >> >> +static void plic_irq_resume(void) >> >> +{ >> >> + unsigned int i, cpu; >> >> + u32 __iomem *reg; >> >> + >> >> + for (i = 0; i < priv_data->nr_irqs; i++) >> >> + writel(priv_data->priority_reg[i], >> >> + priv_data->regs + PRIORITY_BASE + i * PRIORITY_PER_ID); >> > >> > From what I can tell, this driver uses exactly 2 priorities: 0 and 1. >> > And yet you use a full 32bit to encode those. Does it seem like a good >> > idea? >> > >> Yes, currently this driver uses oly 2 priorities. >> But, according to the sifive spec, the priority register is a 32bit register, >> and it supports 7 levels of priority. > > And? This is a Linux driver, not an implementation validation > tool. What is the point of saving/restoring stuff that is *never* > used? :-( > > M. > OK, will save/restore the priority in 1 bit. Thanks Mason
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index ff47bd0dec45..80306de45d2b 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -17,6 +17,7 @@ #include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/spinlock.h> +#include <linux/syscore_ops.h> #include <asm/smp.h> /* @@ -67,6 +68,8 @@ struct plic_priv { struct irq_domain *irqdomain; void __iomem *regs; unsigned long plic_quirks; + unsigned int nr_irqs; + u32 *priority_reg; }; struct plic_handler { @@ -79,10 +82,13 @@ struct plic_handler { raw_spinlock_t enable_lock; void __iomem *enable_base; struct plic_priv *priv; + /* To record interrupts that are enabled before suspend. */ + u32 enable_reg[MAX_DEVICES / 32]; }; static int plic_parent_irq __ro_after_init; static bool plic_cpuhp_setup_done __ro_after_init; static DEFINE_PER_CPU(struct plic_handler, plic_handlers); +static struct plic_priv *priv_data; static int plic_irq_set_type(struct irq_data *d, unsigned int type); @@ -229,6 +235,78 @@ static int plic_irq_set_type(struct irq_data *d, unsigned int type) return IRQ_SET_MASK_OK; } +static void plic_irq_resume(void) +{ + unsigned int i, cpu; + u32 __iomem *reg; + + for (i = 0; i < priv_data->nr_irqs; i++) + writel(priv_data->priority_reg[i], + priv_data->regs + PRIORITY_BASE + i * PRIORITY_PER_ID); + + for_each_cpu(cpu, cpu_present_mask) { + struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); + + if (!handler->present) + continue; + + for (i = 0; i < DIV_ROUND_UP(priv_data->nr_irqs, 32); i++) { + reg = handler->enable_base + i * sizeof(u32); + raw_spin_lock(&handler->enable_lock); + writel(handler->enable_reg[i], reg); + raw_spin_unlock(&handler->enable_lock); + } + } +} + +static int plic_irq_suspend(void) +{ + unsigned int i, cpu; + u32 __iomem *reg; + + for (i = 0; i < priv_data->nr_irqs; i++) + priv_data->priority_reg[i] = + readl(priv_data->regs + PRIORITY_BASE + i * PRIORITY_PER_ID); + + for_each_cpu(cpu, cpu_present_mask) { + struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); + + if (!handler->present) + continue; + + for (i = 0; i < DIV_ROUND_UP(priv_data->nr_irqs, 32); i++) { + reg = handler->enable_base + i * sizeof(u32); + raw_spin_lock(&handler->enable_lock); + handler->enable_reg[i] = readl(reg); + raw_spin_unlock(&handler->enable_lock); + } + } + + return 0; +} + +static struct syscore_ops plic_irq_syscore_ops = { + .suspend = plic_irq_suspend, + .resume = plic_irq_resume, +}; + +static void plic_irq_pm_init(void) +{ + unsigned int cpu; + + for_each_cpu(cpu, cpu_present_mask) { + struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); + + if (!handler->present) + continue; + + memset(&handler->enable_reg[0], 0, + sizeof(handler->enable_reg)); + } + + register_syscore_ops(&plic_irq_syscore_ops); +} + static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { @@ -351,6 +429,7 @@ static int __init __plic_init(struct device_node *node, return -ENOMEM; priv->plic_quirks = plic_quirks; + priv_data = priv; priv->regs = of_iomap(node, 0); if (WARN_ON(!priv->regs)) { @@ -363,15 +442,21 @@ static int __init __plic_init(struct device_node *node, if (WARN_ON(!nr_irqs)) goto out_iounmap; + priv->nr_irqs = nr_irqs; + + priv->priority_reg = kcalloc(nr_irqs, sizeof(u32), GFP_KERNEL); + if (!priv->priority_reg) + goto out_free_priority_reg; + nr_contexts = of_irq_count(node); if (WARN_ON(!nr_contexts)) - goto out_iounmap; + goto out_free_priority_reg; error = -ENOMEM; priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1, &plic_irqdomain_ops, priv); if (WARN_ON(!priv->irqdomain)) - goto out_iounmap; + goto out_free_priority_reg; for (i = 0; i < nr_contexts; i++) { struct of_phandle_args parent; @@ -461,11 +546,15 @@ static int __init __plic_init(struct device_node *node, plic_starting_cpu, plic_dying_cpu); plic_cpuhp_setup_done = true; } + plic_irq_pm_init(); pr_info("%pOFP: mapped %d interrupts with %d handlers for" " %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts); return 0; +out_free_priority_reg: + kfree(priv->priority_reg); + out_iounmap: iounmap(priv->regs); out_free_priv: