diff mbox

[v4] PCI: Xilinx NWL: Modifying irq chip for legacy interrupts

Message ID 1486120110-15205-1-git-send-email-bharatku@xilinx.com (mailing list archive)
State New, archived
Delegated to: Bjorn Helgaas
Headers show

Commit Message

Bharat Kumar Gogada Feb. 3, 2017, 11:08 a.m. UTC
- Adding mutex lock for protecting legacy mask register
- Few wifi end points which only support legacy interrupts,
performs hardware reset functionalities after disabling interrupts
by invoking disable_irq and then re-enable using enable_irq, they
enable hardware interrupts first and then virtual irq line later.
- The legacy irq line goes low only after DEASSERT_INTx is
received.As the legacy irq line is high immediately after hardware
interrupts are enabled but virq of EP is still in disabled state
and EP handler is never executed resulting no DEASSERT_INTx.If dummy
irq chip is used, interrutps are not masked and system is
hanging with CPU stall.
- Adding irq chip functions instead of dummy irq chip for legacy
interrupts.
- Legacy interrupts are level sensitive, so using handle_level_irq
is more appropriate as it is masks interrupts until End point handles
interrupts and unmasks interrutps after End point handler is executed.
- Legacy interrupts are level triggered, virtual irq line of End
Point shows as edge in /proc/interrupts.
- Setting irq flags of virtual irq line of EP to level triggered
at the time of mapping.

Signed-off-by: Bharat Kumar Gogada <bharatku@xilinx.com>
---
 drivers/pci/host/pcie-xilinx-nwl.c |   45 +++++++++++++++++++++++++++++++++++-
 1 files changed, 44 insertions(+), 1 deletions(-)

Comments

Marc Zyngier Feb. 3, 2017, 11:43 a.m. UTC | #1
On 03/02/17 11:08, Bharat Kumar Gogada wrote:
> - Adding mutex lock for protecting legacy mask register
> - Few wifi end points which only support legacy interrupts,
> performs hardware reset functionalities after disabling interrupts
> by invoking disable_irq and then re-enable using enable_irq, they
> enable hardware interrupts first and then virtual irq line later.
> - The legacy irq line goes low only after DEASSERT_INTx is
> received.As the legacy irq line is high immediately after hardware
> interrupts are enabled but virq of EP is still in disabled state
> and EP handler is never executed resulting no DEASSERT_INTx.If dummy
> irq chip is used, interrutps are not masked and system is
> hanging with CPU stall.
> - Adding irq chip functions instead of dummy irq chip for legacy
> interrupts.
> - Legacy interrupts are level sensitive, so using handle_level_irq
> is more appropriate as it is masks interrupts until End point handles
> interrupts and unmasks interrutps after End point handler is executed.
> - Legacy interrupts are level triggered, virtual irq line of End
> Point shows as edge in /proc/interrupts.
> - Setting irq flags of virtual irq line of EP to level triggered
> at the time of mapping.
> 
> Signed-off-by: Bharat Kumar Gogada <bharatku@xilinx.com>
> ---
>  drivers/pci/host/pcie-xilinx-nwl.c |   45 +++++++++++++++++++++++++++++++++++-
>  1 files changed, 44 insertions(+), 1 deletions(-)
> 
> diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
> index 43eaa4a..e4605f9 100644
> --- a/drivers/pci/host/pcie-xilinx-nwl.c
> +++ b/drivers/pci/host/pcie-xilinx-nwl.c
> @@ -184,6 +184,7 @@ struct nwl_pcie {
>  	u8 root_busno;
>  	struct nwl_msi msi;
>  	struct irq_domain *legacy_irq_domain;
> +	spinlock_t leg_mask_lock;
>  };
>  
>  static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off)
> @@ -395,11 +396,52 @@ static void nwl_pcie_msi_handler_low(struct irq_desc *desc)
>  	chained_irq_exit(chip, desc);
>  }
>  
> +static void nwl_mask_leg_irq(struct irq_data *data)
> +{
> +	struct irq_desc *desc = irq_to_desc(data->irq);
> +	struct nwl_pcie *pcie;
> +	unsigned long flags;
> +	u32 mask;
> +	u32 val;
> +
> +	pcie = irq_desc_get_chip_data(desc);
> +	mask = 1 << (data->hwirq - 1);
> +	spin_lock_irqsave(&pcie->leg_mask_lock, flags);

I've asked you to use a raw spinlock for a reason. If using RT, this
gets turned into a sleeping lock...

	M.
Bharat Kumar Gogada Feb. 3, 2017, 12:16 p.m. UTC | #2
> On 03/02/17 11:08, Bharat Kumar Gogada wrote:
> > - Adding mutex lock for protecting legacy mask register
> > - Few wifi end points which only support legacy interrupts, performs
> > hardware reset functionalities after disabling interrupts by invoking
> > disable_irq and then re-enable using enable_irq, they enable hardware
> > interrupts first and then virtual irq line later.
> > - The legacy irq line goes low only after DEASSERT_INTx is received.As
> > the legacy irq line is high immediately after hardware interrupts are
> > enabled but virq of EP is still in disabled state and EP handler is
> > never executed resulting no DEASSERT_INTx.If dummy irq chip is used,
> > interrutps are not masked and system is hanging with CPU stall.
> > - Adding irq chip functions instead of dummy irq chip for legacy
> > interrupts.
> > - Legacy interrupts are level sensitive, so using handle_level_irq is
> > more appropriate as it is masks interrupts until End point handles
> > interrupts and unmasks interrutps after End point handler is executed.
> > - Legacy interrupts are level triggered, virtual irq line of End Point
> > shows as edge in /proc/interrupts.
> > - Setting irq flags of virtual irq line of EP to level triggered at
> > the time of mapping.
> >
> > Signed-off-by: Bharat Kumar Gogada <bharatku@xilinx.com>
> > ---
> >  drivers/pci/host/pcie-xilinx-nwl.c |   45
> +++++++++++++++++++++++++++++++++++-
> >  1 files changed, 44 insertions(+), 1 deletions(-)
> >
> > diff --git a/drivers/pci/host/pcie-xilinx-nwl.c
> > b/drivers/pci/host/pcie-xilinx-nwl.c
> > index 43eaa4a..e4605f9 100644
> > --- a/drivers/pci/host/pcie-xilinx-nwl.c
> > +++ b/drivers/pci/host/pcie-xilinx-nwl.c
> > @@ -184,6 +184,7 @@ struct nwl_pcie {
> >  	u8 root_busno;
> >  	struct nwl_msi msi;
> >  	struct irq_domain *legacy_irq_domain;
> > +	spinlock_t leg_mask_lock;
> >  };
> >
> >  static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off) @@
> > -395,11 +396,52 @@ static void nwl_pcie_msi_handler_low(struct irq_desc
> *desc)
> >  	chained_irq_exit(chip, desc);
> >  }
> >
> > +static void nwl_mask_leg_irq(struct irq_data *data) {
> > +	struct irq_desc *desc = irq_to_desc(data->irq);
> > +	struct nwl_pcie *pcie;
> > +	unsigned long flags;
> > +	u32 mask;
> > +	u32 val;
> > +
> > +	pcie = irq_desc_get_chip_data(desc);
> > +	mask = 1 << (data->hwirq - 1);
> > +	spin_lock_irqsave(&pcie->leg_mask_lock, flags);
> 
> I've asked you to use a raw spinlock for a reason. If using RT, this gets turned
> into a sleeping lock...
> 
 In include/linux/spinlock.h 
#define spin_lock_irqsave(lock, flags)                          \
do {                                                            \
        raw_spin_lock_irqsave(spinlock_check(lock), flags);     \
} while (0)

The above API invokes raw_spin_lock_irqsave know.
So is there any difference between raw_spin_lock_irqsave and spin_lock_irqsave ?

Thanks & Regards,
Bharat
Marc Zyngier Feb. 3, 2017, 1:18 p.m. UTC | #3
On 03/02/17 12:16, Bharat Kumar Gogada wrote:
>> On 03/02/17 11:08, Bharat Kumar Gogada wrote:
>>> - Adding mutex lock for protecting legacy mask register
>>> - Few wifi end points which only support legacy interrupts, performs
>>> hardware reset functionalities after disabling interrupts by invoking
>>> disable_irq and then re-enable using enable_irq, they enable hardware
>>> interrupts first and then virtual irq line later.
>>> - The legacy irq line goes low only after DEASSERT_INTx is received.As
>>> the legacy irq line is high immediately after hardware interrupts are
>>> enabled but virq of EP is still in disabled state and EP handler is
>>> never executed resulting no DEASSERT_INTx.If dummy irq chip is used,
>>> interrutps are not masked and system is hanging with CPU stall.
>>> - Adding irq chip functions instead of dummy irq chip for legacy
>>> interrupts.
>>> - Legacy interrupts are level sensitive, so using handle_level_irq is
>>> more appropriate as it is masks interrupts until End point handles
>>> interrupts and unmasks interrutps after End point handler is executed.
>>> - Legacy interrupts are level triggered, virtual irq line of End Point
>>> shows as edge in /proc/interrupts.
>>> - Setting irq flags of virtual irq line of EP to level triggered at
>>> the time of mapping.
>>>
>>> Signed-off-by: Bharat Kumar Gogada <bharatku@xilinx.com>
>>> ---
>>>  drivers/pci/host/pcie-xilinx-nwl.c |   45
>> +++++++++++++++++++++++++++++++++++-
>>>  1 files changed, 44 insertions(+), 1 deletions(-)
>>>
>>> diff --git a/drivers/pci/host/pcie-xilinx-nwl.c
>>> b/drivers/pci/host/pcie-xilinx-nwl.c
>>> index 43eaa4a..e4605f9 100644
>>> --- a/drivers/pci/host/pcie-xilinx-nwl.c
>>> +++ b/drivers/pci/host/pcie-xilinx-nwl.c
>>> @@ -184,6 +184,7 @@ struct nwl_pcie {
>>>  	u8 root_busno;
>>>  	struct nwl_msi msi;
>>>  	struct irq_domain *legacy_irq_domain;
>>> +	spinlock_t leg_mask_lock;
>>>  };
>>>
>>>  static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off) @@
>>> -395,11 +396,52 @@ static void nwl_pcie_msi_handler_low(struct irq_desc
>> *desc)
>>>  	chained_irq_exit(chip, desc);
>>>  }
>>>
>>> +static void nwl_mask_leg_irq(struct irq_data *data) {
>>> +	struct irq_desc *desc = irq_to_desc(data->irq);
>>> +	struct nwl_pcie *pcie;
>>> +	unsigned long flags;
>>> +	u32 mask;
>>> +	u32 val;
>>> +
>>> +	pcie = irq_desc_get_chip_data(desc);
>>> +	mask = 1 << (data->hwirq - 1);
>>> +	spin_lock_irqsave(&pcie->leg_mask_lock, flags);
>>
>> I've asked you to use a raw spinlock for a reason. If using RT, this gets turned
>> into a sleeping lock...
>>
>  In include/linux/spinlock.h 
> #define spin_lock_irqsave(lock, flags)                          \
> do {                                                            \
>         raw_spin_lock_irqsave(spinlock_check(lock), flags);     \
> } while (0)
> 
> The above API invokes raw_spin_lock_irqsave know.
> So is there any difference between raw_spin_lock_irqsave and spin_lock_irqsave ?

When using RT, yes. RT upgrades all spinlocks to sleeping locks. We're
trying hard not to break RT, despite most of it not being merged yet. So
your patch will work on mainline Linux, and will break horribly on Linux/RT.

So please, turn this into a raw spinlock as requested.

Thanks,

	M.
Bharat Kumar Gogada Feb. 3, 2017, 1:37 p.m. UTC | #4
> On 03/02/17 12:16, Bharat Kumar Gogada wrote:
> >> On 03/02/17 11:08, Bharat Kumar Gogada wrote:
> >>> - Adding mutex lock for protecting legacy mask register
> >>> - Few wifi end points which only support legacy interrupts, performs
> >>> hardware reset functionalities after disabling interrupts by
> >>> invoking disable_irq and then re-enable using enable_irq, they
> >>> enable hardware interrupts first and then virtual irq line later.
> >>> - The legacy irq line goes low only after DEASSERT_INTx is
> >>> received.As the legacy irq line is high immediately after hardware
> >>> interrupts are enabled but virq of EP is still in disabled state and
> >>> EP handler is never executed resulting no DEASSERT_INTx.If dummy irq
> >>> chip is used, interrutps are not masked and system is hanging with CPU stall.
> >>> - Adding irq chip functions instead of dummy irq chip for legacy
> >>> interrupts.
> >>> - Legacy interrupts are level sensitive, so using handle_level_irq
> >>> is more appropriate as it is masks interrupts until End point
> >>> handles interrupts and unmasks interrutps after End point handler is
> executed.
> >>> - Legacy interrupts are level triggered, virtual irq line of End
> >>> Point shows as edge in /proc/interrupts.
> >>> - Setting irq flags of virtual irq line of EP to level triggered at
> >>> the time of mapping.
> >>>
> >>> Signed-off-by: Bharat Kumar Gogada <bharatku@xilinx.com>
> >>> ---
> >>>  drivers/pci/host/pcie-xilinx-nwl.c |   45
> >> +++++++++++++++++++++++++++++++++++-
> >>>  1 files changed, 44 insertions(+), 1 deletions(-)
> >>>
> >>> diff --git a/drivers/pci/host/pcie-xilinx-nwl.c
> >>> b/drivers/pci/host/pcie-xilinx-nwl.c
> >>> index 43eaa4a..e4605f9 100644
> >>> --- a/drivers/pci/host/pcie-xilinx-nwl.c
> >>> +++ b/drivers/pci/host/pcie-xilinx-nwl.c
> >>> @@ -184,6 +184,7 @@ struct nwl_pcie {
> >>>  	u8 root_busno;
> >>>  	struct nwl_msi msi;
> >>>  	struct irq_domain *legacy_irq_domain;
> >>> +	spinlock_t leg_mask_lock;
> >>>  };
> >>>
> >>>  static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off)
> >>> @@
> >>> -395,11 +396,52 @@ static void nwl_pcie_msi_handler_low(struct
> >>> irq_desc
> >> *desc)
> >>>  	chained_irq_exit(chip, desc);
> >>>  }
> >>>
> >>> +static void nwl_mask_leg_irq(struct irq_data *data) {
> >>> +	struct irq_desc *desc = irq_to_desc(data->irq);
> >>> +	struct nwl_pcie *pcie;
> >>> +	unsigned long flags;
> >>> +	u32 mask;
> >>> +	u32 val;
> >>> +
> >>> +	pcie = irq_desc_get_chip_data(desc);
> >>> +	mask = 1 << (data->hwirq - 1);
> >>> +	spin_lock_irqsave(&pcie->leg_mask_lock, flags);
> >>
> >> I've asked you to use a raw spinlock for a reason. If using RT, this
> >> gets turned into a sleeping lock...
> >>
> >  In include/linux/spinlock.h
> > #define spin_lock_irqsave(lock, flags)                          \
> > do {                                                            \
> >         raw_spin_lock_irqsave(spinlock_check(lock), flags);     \
> > } while (0)
> >
> > The above API invokes raw_spin_lock_irqsave know.
> > So is there any difference between raw_spin_lock_irqsave and
> spin_lock_irqsave ?
> 
> When using RT, yes. RT upgrades all spinlocks to sleeping locks. We're trying
> hard not to break RT, despite most of it not being merged yet. So your patch will
> work on mainline Linux, and will break horribly on Linux/RT.
> 
> So please, turn this into a raw spinlock as requested.

Thanks for the explanation, will do it. 

Regards,
Bharat
diff mbox

Patch

diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index 43eaa4a..e4605f9 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -184,6 +184,7 @@  struct nwl_pcie {
 	u8 root_busno;
 	struct nwl_msi msi;
 	struct irq_domain *legacy_irq_domain;
+	spinlock_t leg_mask_lock;
 };
 
 static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off)
@@ -395,11 +396,52 @@  static void nwl_pcie_msi_handler_low(struct irq_desc *desc)
 	chained_irq_exit(chip, desc);
 }
 
+static void nwl_mask_leg_irq(struct irq_data *data)
+{
+	struct irq_desc *desc = irq_to_desc(data->irq);
+	struct nwl_pcie *pcie;
+	unsigned long flags;
+	u32 mask;
+	u32 val;
+
+	pcie = irq_desc_get_chip_data(desc);
+	mask = 1 << (data->hwirq - 1);
+	spin_lock_irqsave(&pcie->leg_mask_lock, flags);
+	val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
+	nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK);
+	spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
+}
+
+static void nwl_unmask_leg_irq(struct irq_data *data)
+{
+	struct irq_desc *desc = irq_to_desc(data->irq);
+	struct nwl_pcie *pcie;
+	unsigned long flags;
+	u32 mask;
+	u32 val;
+
+	pcie = irq_desc_get_chip_data(desc);
+	mask = 1 << (data->hwirq - 1);
+	spin_lock_irqsave(&pcie->leg_mask_lock, flags);
+	val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
+	nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK);
+	spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
+}
+
+static struct irq_chip nwl_leg_irq_chip = {
+	.name = "nwl_pcie:legacy",
+	.irq_enable = nwl_unmask_leg_irq,
+	.irq_disable = nwl_mask_leg_irq,
+	.irq_mask = nwl_mask_leg_irq,
+	.irq_unmask = nwl_unmask_leg_irq,
+};
+
 static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq,
 			  irq_hw_number_t hwirq)
 {
-	irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+	irq_set_chip_and_handler(irq, &nwl_leg_irq_chip, handle_level_irq);
 	irq_set_chip_data(irq, domain->host_data);
+	irq_set_status_flags(irq, IRQ_LEVEL);
 
 	return 0;
 }
@@ -538,6 +580,7 @@  static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
 		return -ENOMEM;
 	}
 
+	spin_lock_init(&pcie->leg_mask_lock);
 	nwl_pcie_init_msi_irq_domain(pcie);
 	return 0;
 }