diff mbox

[v3] PCI: Xilinx NWL: Modifying irq chip for legacy interrupts

Message ID 1485853152-31819-1-git-send-email-bharatku@xilinx.com (mailing list archive)
State New, archived
Delegated to: Bjorn Helgaas
Headers show

Commit Message

Bharat Kumar Gogada Jan. 31, 2017, 8:59 a.m. UTC
- Adding mutex lock for protecting legacy mask register
- Few wifi end points which only support legacy interrupts,
performs hardware reset functionalities after disabling interrupts
by invoking disable_irq and then re-enable using enable_irq, they
enable hardware interrupts first and then virtual irq line later.
- The legacy irq line goes low only after DEASSERT_INTx is
received.As the legacy irq line is high immediately after hardware
interrupts are enabled but virq of EP is still in disabled state
and EP handler is never executed resulting no DEASSERT_INTx.If dummy
irq chip is used, interrutps are not masked and system is
hanging with CPU stall.
- Adding irq chip functions instead of dummy irq chip for legacy
interrupts.
- Legacy interrupts are level sensitive, so using handle_level_irq
is more appropriate as it is masks interrupts until End point handles
interrupts and unmasks interrutps after End point handler is executed.
- Legacy interrupts are level triggered, virtual irq line of End
Point shows as edge in /proc/interrupts.
- Setting irq flags of virtual irq line of EP to level triggered
at the time of mapping.

Signed-off-by: Bharat Kumar Gogada <bharatku@xilinx.com>
---
 drivers/pci/host/pcie-xilinx-nwl.c |   43 +++++++++++++++++++++++++++++++++++-
 1 files changed, 42 insertions(+), 1 deletions(-)

Comments

Marc Zyngier Jan. 31, 2017, 9:19 a.m. UTC | #1
On Tue, Jan 31 2017 at 08:59:12 AM, Bharat Kumar Gogada <bharat.kumar.gogada@xilinx.com> wrote:
> - Adding mutex lock for protecting legacy mask register
> - Few wifi end points which only support legacy interrupts,
> performs hardware reset functionalities after disabling interrupts
> by invoking disable_irq and then re-enable using enable_irq, they
> enable hardware interrupts first and then virtual irq line later.
> - The legacy irq line goes low only after DEASSERT_INTx is
> received.As the legacy irq line is high immediately after hardware
> interrupts are enabled but virq of EP is still in disabled state
> and EP handler is never executed resulting no DEASSERT_INTx.If dummy
> irq chip is used, interrutps are not masked and system is
> hanging with CPU stall.
> - Adding irq chip functions instead of dummy irq chip for legacy
> interrupts.
> - Legacy interrupts are level sensitive, so using handle_level_irq
> is more appropriate as it is masks interrupts until End point handles
> interrupts and unmasks interrutps after End point handler is executed.
> - Legacy interrupts are level triggered, virtual irq line of End
> Point shows as edge in /proc/interrupts.
> - Setting irq flags of virtual irq line of EP to level triggered
> at the time of mapping.
>
> Signed-off-by: Bharat Kumar Gogada <bharatku@xilinx.com>
> ---
>  drivers/pci/host/pcie-xilinx-nwl.c |   43 +++++++++++++++++++++++++++++++++++-
>  1 files changed, 42 insertions(+), 1 deletions(-)
>
> diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
> index 43eaa4a..76dd094 100644
> --- a/drivers/pci/host/pcie-xilinx-nwl.c
> +++ b/drivers/pci/host/pcie-xilinx-nwl.c
> @@ -184,6 +184,7 @@ struct nwl_pcie {
>  	u8 root_busno;
>  	struct nwl_msi msi;
>  	struct irq_domain *legacy_irq_domain;
> +	struct mutex leg_mask_lock;
>  };
>  
>  static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off)
> @@ -395,11 +396,50 @@ static void nwl_pcie_msi_handler_low(struct irq_desc *desc)
>  	chained_irq_exit(chip, desc);
>  }
>  
> +static void nwl_mask_leg_irq(struct irq_data *data)
> +{
> +	struct irq_desc *desc = irq_to_desc(data->irq);
> +	struct nwl_pcie *pcie;
> +	u32 mask;
> +	u32 val;
> +
> +	pcie = irq_desc_get_chip_data(desc);
> +	mask = 1 << (data->hwirq - 1);
> +	mutex_lock(&pcie->leg_mask_lock);
> +	val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
> +	nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK);
> +	mutex_unlock(&pcie->leg_mask_lock);

Have you looked at which context this is called in? In a number of
cases, the mask/unmask methods are called whilst you're in an interrupt
context. If you sleep there (which is what happens with a contended
mutex), you die horribly.

Given these constraints, you should use raw_spin_lock_irqsave and co,
since this can be called from both interrupt and non-interrupt contexts.

Thanks,

        M.
Bharat Kumar Gogada Jan. 31, 2017, 9:34 a.m. UTC | #2
> On Tue, Jan 31 2017 at 08:59:12 AM, Bharat Kumar Gogada
> <bharat.kumar.gogada@xilinx.com> wrote:
> > - Adding mutex lock for protecting legacy mask register
> > - Few wifi end points which only support legacy interrupts, performs
> > hardware reset functionalities after disabling interrupts by invoking
> > disable_irq and then re-enable using enable_irq, they enable hardware
> > interrupts first and then virtual irq line later.
> > - The legacy irq line goes low only after DEASSERT_INTx is received.As
> > the legacy irq line is high immediately after hardware interrupts are
> > enabled but virq of EP is still in disabled state and EP handler is
> > never executed resulting no DEASSERT_INTx.If dummy irq chip is used,
> > interrutps are not masked and system is hanging with CPU stall.
> > - Adding irq chip functions instead of dummy irq chip for legacy
> > interrupts.
> > - Legacy interrupts are level sensitive, so using handle_level_irq is
> > more appropriate as it is masks interrupts until End point handles
> > interrupts and unmasks interrutps after End point handler is executed.
> > - Legacy interrupts are level triggered, virtual irq line of End Point
> > shows as edge in /proc/interrupts.
> > - Setting irq flags of virtual irq line of EP to level triggered at
> > the time of mapping.
> >
> > Signed-off-by: Bharat Kumar Gogada <bharatku@xilinx.com>
> > ---
> >  drivers/pci/host/pcie-xilinx-nwl.c |   43
> +++++++++++++++++++++++++++++++++++-
> >  1 files changed, 42 insertions(+), 1 deletions(-)
> >
> > diff --git a/drivers/pci/host/pcie-xilinx-nwl.c
> > b/drivers/pci/host/pcie-xilinx-nwl.c
> > index 43eaa4a..76dd094 100644
> > --- a/drivers/pci/host/pcie-xilinx-nwl.c
> > +++ b/drivers/pci/host/pcie-xilinx-nwl.c
> > @@ -184,6 +184,7 @@ struct nwl_pcie {
> >  	u8 root_busno;
> >  	struct nwl_msi msi;
> >  	struct irq_domain *legacy_irq_domain;
> > +	struct mutex leg_mask_lock;
> >  };
> >
> >  static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off) @@
> > -395,11 +396,50 @@ static void nwl_pcie_msi_handler_low(struct irq_desc
> *desc)
> >  	chained_irq_exit(chip, desc);
> >  }
> >
> > +static void nwl_mask_leg_irq(struct irq_data *data) {
> > +	struct irq_desc *desc = irq_to_desc(data->irq);
> > +	struct nwl_pcie *pcie;
> > +	u32 mask;
> > +	u32 val;
> > +
> > +	pcie = irq_desc_get_chip_data(desc);
> > +	mask = 1 << (data->hwirq - 1);
> > +	mutex_lock(&pcie->leg_mask_lock);
> > +	val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
> > +	nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK);
> > +	mutex_unlock(&pcie->leg_mask_lock);
> 
> Have you looked at which context this is called in? In a number of cases, the
> mask/unmask methods are called whilst you're in an interrupt context. If you
> sleep there (which is what happens with a contended mutex), you die horribly.
> 
> Given these constraints, you should use raw_spin_lock_irqsave and co, since this
> can be called from both interrupt and non-interrupt contexts.
> 
I have seen very few wifi drivers calling these in MAC flow, raw_spin_lock_irqsave 
looks more safe, will do it.

Thanks & Regards,
Bharat
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Marc Zyngier Jan. 31, 2017, 10:23 a.m. UTC | #3
On Tue, Jan 31 2017 at 09:34:43 AM, Bharat Kumar Gogada <bharat.kumar.gogada@xilinx.com> wrote:
>  > On Tue, Jan 31 2017 at 08:59:12 AM, Bharat Kumar Gogada
>> <bharat.kumar.gogada@xilinx.com> wrote:
>> > - Adding mutex lock for protecting legacy mask register
>> > - Few wifi end points which only support legacy interrupts, performs
>> > hardware reset functionalities after disabling interrupts by invoking
>> > disable_irq and then re-enable using enable_irq, they enable hardware
>> > interrupts first and then virtual irq line later.
>> > - The legacy irq line goes low only after DEASSERT_INTx is received.As
>> > the legacy irq line is high immediately after hardware interrupts are
>> > enabled but virq of EP is still in disabled state and EP handler is
>> > never executed resulting no DEASSERT_INTx.If dummy irq chip is used,
>> > interrutps are not masked and system is hanging with CPU stall.
>> > - Adding irq chip functions instead of dummy irq chip for legacy
>> > interrupts.
>> > - Legacy interrupts are level sensitive, so using handle_level_irq is
>> > more appropriate as it is masks interrupts until End point handles
>> > interrupts and unmasks interrutps after End point handler is executed.
>> > - Legacy interrupts are level triggered, virtual irq line of End Point
>> > shows as edge in /proc/interrupts.
>> > - Setting irq flags of virtual irq line of EP to level triggered at
>> > the time of mapping.
>> >
>> > Signed-off-by: Bharat Kumar Gogada <bharatku@xilinx.com>
>> > ---
>> >  drivers/pci/host/pcie-xilinx-nwl.c |   43
>> +++++++++++++++++++++++++++++++++++-
>> >  1 files changed, 42 insertions(+), 1 deletions(-)
>> >
>> > diff --git a/drivers/pci/host/pcie-xilinx-nwl.c
>> > b/drivers/pci/host/pcie-xilinx-nwl.c
>> > index 43eaa4a..76dd094 100644
>> > --- a/drivers/pci/host/pcie-xilinx-nwl.c
>> > +++ b/drivers/pci/host/pcie-xilinx-nwl.c
>> > @@ -184,6 +184,7 @@ struct nwl_pcie {
>> >  	u8 root_busno;
>> >  	struct nwl_msi msi;
>> >  	struct irq_domain *legacy_irq_domain;
>> > +	struct mutex leg_mask_lock;
>> >  };
>> >
>> >  static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off) @@
>> > -395,11 +396,50 @@ static void nwl_pcie_msi_handler_low(struct irq_desc
>> *desc)
>> >  	chained_irq_exit(chip, desc);
>> >  }
>> >
>> > +static void nwl_mask_leg_irq(struct irq_data *data) {
>> > +	struct irq_desc *desc = irq_to_desc(data->irq);
>> > +	struct nwl_pcie *pcie;
>> > +	u32 mask;
>> > +	u32 val;
>> > +
>> > +	pcie = irq_desc_get_chip_data(desc);
>> > +	mask = 1 << (data->hwirq - 1);
>> > +	mutex_lock(&pcie->leg_mask_lock);
>> > +	val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
>> > +	nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK);
>> > +	mutex_unlock(&pcie->leg_mask_lock);
>> 
>> Have you looked at which context this is called in? In a number of cases, the
>> mask/unmask methods are called whilst you're in an interrupt context. If you
>> sleep there (which is what happens with a contended mutex), you die horribly.
>> 
>> Given these constraints, you should use raw_spin_lock_irqsave and
>> co, since this
>> can be called from both interrupt and non-interrupt contexts.
>> 
> I have seen very few wifi drivers calling these in MAC flow,

Very few is already too many. But I'm afraid you're missing the point
entirely: This patch is about using handle_level_irq as the flow
handler. The first thing handle_level_irq does is to mask the
interrupt. If you have a competing mask or unmask operation in progress
on another CPU (or in the middle of one on the same CPU when the
interrupt fired), your system explodes.

Please have a look at Documentation/DocBook/kernel-locking.tmpl.

> raw_spin_lock_irqsave
> looks more safe, will do it.

Thanks,

	M.
Bharat Kumar Gogada Jan. 31, 2017, 12:03 p.m. UTC | #4
> Subject: Re: [PATCH v3] PCI: Xilinx NWL: Modifying irq chip for legacy interrupts
>
> On Tue, Jan 31 2017 at 09:34:43 AM, Bharat Kumar Gogada
> <bharat.kumar.gogada@xilinx.com> wrote:
> >  > On Tue, Jan 31 2017 at 08:59:12 AM, Bharat Kumar Gogada
> >> <bharat.kumar.gogada@xilinx.com> wrote:
> >> > - Adding mutex lock for protecting legacy mask register
> >> > - Few wifi end points which only support legacy interrupts,
> >> > performs hardware reset functionalities after disabling interrupts
> >> > by invoking disable_irq and then re-enable using enable_irq, they
> >> > enable hardware interrupts first and then virtual irq line later.
> >> > - The legacy irq line goes low only after DEASSERT_INTx is
> >> > received.As the legacy irq line is high immediately after hardware
> >> > interrupts are enabled but virq of EP is still in disabled state
> >> > and EP handler is never executed resulting no DEASSERT_INTx.If
> >> > dummy irq chip is used, interrutps are not masked and system is hanging
> with CPU stall.
> >> > - Adding irq chip functions instead of dummy irq chip for legacy
> >> > interrupts.
> >> > - Legacy interrupts are level sensitive, so using handle_level_irq
> >> > is more appropriate as it is masks interrupts until End point
> >> > handles interrupts and unmasks interrutps after End point handler is
> executed.
> >> > - Legacy interrupts are level triggered, virtual irq line of End
> >> > Point shows as edge in /proc/interrupts.
> >> > - Setting irq flags of virtual irq line of EP to level triggered at
> >> > the time of mapping.
> >> >
> >> > Signed-off-by: Bharat Kumar Gogada <bharatku@xilinx.com>
> >> > ---
> >> >  drivers/pci/host/pcie-xilinx-nwl.c |   43
> >> +++++++++++++++++++++++++++++++++++-
> >> >  1 files changed, 42 insertions(+), 1 deletions(-)
> >> >
> >> > diff --git a/drivers/pci/host/pcie-xilinx-nwl.c
> >> > b/drivers/pci/host/pcie-xilinx-nwl.c
> >> > index 43eaa4a..76dd094 100644
> >> > --- a/drivers/pci/host/pcie-xilinx-nwl.c
> >> > +++ b/drivers/pci/host/pcie-xilinx-nwl.c
> >> > @@ -184,6 +184,7 @@ struct nwl_pcie {
> >> >          u8 root_busno;
> >> >          struct nwl_msi msi;
> >> >          struct irq_domain *legacy_irq_domain;
> >> > +        struct mutex leg_mask_lock;
> >> >  };
> >> >
> >> >  static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off)
> >> > @@
> >> > -395,11 +396,50 @@ static void nwl_pcie_msi_handler_low(struct
> >> > irq_desc
> >> *desc)
> >> >          chained_irq_exit(chip, desc);
> >> >  }
> >> >
> >> > +static void nwl_mask_leg_irq(struct irq_data *data) {
> >> > +        struct irq_desc *desc = irq_to_desc(data->irq);
> >> > +        struct nwl_pcie *pcie;
> >> > +        u32 mask;
> >> > +        u32 val;
> >> > +
> >> > +        pcie = irq_desc_get_chip_data(desc);
> >> > +        mask = 1 << (data->hwirq - 1);
> >> > +        mutex_lock(&pcie->leg_mask_lock);
> >> > +        val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
> >> > +        nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK);
> >> > +        mutex_unlock(&pcie->leg_mask_lock);
> >>
> >> Have you looked at which context this is called in? In a number of
> >> cases, the mask/unmask methods are called whilst you're in an
> >> interrupt context. If you sleep there (which is what happens with a contended
> mutex), you die horribly.
> >>
> >> Given these constraints, you should use raw_spin_lock_irqsave and co,
> >> since this can be called from both interrupt and non-interrupt
> >> contexts.
> >>
> > I have seen very few wifi drivers calling these in MAC flow,
>
> Very few is already too many. But I'm afraid you're missing the point
> entirely: This patch is about using handle_level_irq as the flow handler. The first
> thing handle_level_irq does is to mask the interrupt. If you have a competing
> mask or unmask operation in progress on another CPU (or in the middle of one
> on the same CPU when the interrupt fired), your system explodes.
>
> Please have a look at Documentation/DocBook/kernel-locking.tmpl.
>
> > raw_spin_lock_irqsave
> > looks more safe, will do it.
>
Yeah, understood, thanks for the details.

Thanks & Regards,
Bharat


This email and any attachments are intended for the sole use of the named recipient(s) and contain(s) confidential information that may be proprietary, privileged or copyrighted under applicable law. If you are not the intended recipient, do not read, copy, or forward this email message or any attachments. Delete this email message and any attachments immediately.

--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index 43eaa4a..76dd094 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -184,6 +184,7 @@  struct nwl_pcie {
 	u8 root_busno;
 	struct nwl_msi msi;
 	struct irq_domain *legacy_irq_domain;
+	struct mutex leg_mask_lock;
 };
 
 static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off)
@@ -395,11 +396,50 @@  static void nwl_pcie_msi_handler_low(struct irq_desc *desc)
 	chained_irq_exit(chip, desc);
 }
 
+static void nwl_mask_leg_irq(struct irq_data *data)
+{
+	struct irq_desc *desc = irq_to_desc(data->irq);
+	struct nwl_pcie *pcie;
+	u32 mask;
+	u32 val;
+
+	pcie = irq_desc_get_chip_data(desc);
+	mask = 1 << (data->hwirq - 1);
+	mutex_lock(&pcie->leg_mask_lock);
+	val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
+	nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK);
+	mutex_unlock(&pcie->leg_mask_lock);
+}
+
+static void nwl_unmask_leg_irq(struct irq_data *data)
+{
+	struct irq_desc *desc = irq_to_desc(data->irq);
+	struct nwl_pcie *pcie;
+	u32 mask;
+	u32 val;
+
+	pcie = irq_desc_get_chip_data(desc);
+	mask = 1 << (data->hwirq - 1);
+	mutex_lock(&pcie->leg_mask_lock);
+	val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
+	nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK);
+	mutex_unlock(&pcie->leg_mask_lock);
+}
+
+static struct irq_chip nwl_leg_irq_chip = {
+	.name = "nwl_pcie:legacy",
+	.irq_enable = nwl_unmask_leg_irq,
+	.irq_disable = nwl_mask_leg_irq,
+	.irq_mask = nwl_mask_leg_irq,
+	.irq_unmask = nwl_unmask_leg_irq,
+};
+
 static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq,
 			  irq_hw_number_t hwirq)
 {
-	irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+	irq_set_chip_and_handler(irq, &nwl_leg_irq_chip, handle_level_irq);
 	irq_set_chip_data(irq, domain->host_data);
+	irq_set_status_flags(irq, IRQ_LEVEL);
 
 	return 0;
 }
@@ -538,6 +578,7 @@  static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
 		return -ENOMEM;
 	}
 
+	mutex_init(&pcie->leg_mask_lock);
 	nwl_pcie_init_msi_irq_domain(pcie);
 	return 0;
 }