@@ -521,6 +521,7 @@ static int wlcore_irq_locked(struct wl1271 *wl)
int ret = 0;
u32 intr;
int loopcount = WL1271_IRQ_MAX_LOOPS;
+ bool run_tx_queue = true;
bool done = false;
unsigned int defer_count;
unsigned long flags;
@@ -586,19 +587,22 @@ static int wlcore_irq_locked(struct wl1271 *wl)
goto err_ret;
/* Check if any tx blocks were freed */
- spin_lock_irqsave(&wl->wl_lock, flags);
- if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
- wl1271_tx_total_queue_count(wl) > 0) {
- spin_unlock_irqrestore(&wl->wl_lock, flags);
+ if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
+ if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
+ if (!wl1271_tx_total_queue_count(wl))
+ run_tx_queue = false;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ }
+
/*
* In order to avoid starvation of the TX path,
* call the work function directly.
*/
- ret = wlcore_tx_work_locked(wl);
- if (ret < 0)
- goto err_ret;
- } else {
- spin_unlock_irqrestore(&wl->wl_lock, flags);
+ if (run_tx_queue) {
+ ret = wlcore_tx_work_locked(wl);
+ if (ret < 0)
+ goto err_ret;
+ }
}
/* check for tx results */
We currently have a collection of flags and locking between the threaded irq and tx work: - wl->flags bitops - wl->mutex - wl->wl_lock spinlock The bitops flags do not need a spinlock around them, and wlcore_irq() already holds the mutex calling wlcore_irq_locked(). And we only need the spinlock to see if we need to run the queue or not. To simplify the locking, we can use spin_trylock and always run the tx queue unless we know there's nothing to do. Signed-off-by: Tony Lindgren <tony@atomide.com> --- drivers/net/wireless/ti/wlcore/main.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-)