@@ -1195,7 +1195,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
return;
}
- /* If another context is idling the device then defer */
+ /* If another context is idling the device then defer to kthread */
if (ctlr->idling) {
kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
@@ -1209,34 +1209,10 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
return;
}
- /* Only do teardown in the thread */
- if (!in_kthread) {
- kthread_queue_work(&ctlr->kworker,
- &ctlr->pump_messages);
- spin_unlock_irqrestore(&ctlr->queue_lock, flags);
- return;
- }
-
- ctlr->busy = false;
- ctlr->idling = true;
- spin_unlock_irqrestore(&ctlr->queue_lock, flags);
-
- kfree(ctlr->dummy_rx);
- ctlr->dummy_rx = NULL;
- kfree(ctlr->dummy_tx);
- ctlr->dummy_tx = NULL;
- if (ctlr->unprepare_transfer_hardware &&
- ctlr->unprepare_transfer_hardware(ctlr))
- dev_err(&ctlr->dev,
- "failed to unprepare transfer hardware\n");
- if (ctlr->auto_runtime_pm) {
- pm_runtime_mark_last_busy(ctlr->dev.parent);
- pm_runtime_put_autosuspend(ctlr->dev.parent);
- }
- trace_spi_controller_idle(ctlr);
-
- spin_lock_irqsave(&ctlr->queue_lock, flags);
- ctlr->idling = false;
+ /* schedule idle teardown with a delay of 1 second */
+ kthread_mod_delayed_work(&ctlr->kworker,
+ &ctlr->pump_idle_teardown,
+ HZ);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return;
}
@@ -1329,6 +1305,77 @@ static void spi_pump_messages(struct kthread_work *work)
__spi_pump_messages(ctlr, true);
}
+/**
+ * spi_pump_idle_teardown - kthread delayed work function which tears down
+ * the controller settings after some delay
+ * @work: pointer to kthread work struct contained in the controller struct
+ */
+static void spi_pump_idle_teardown(struct kthread_work *work)
+{
+ struct spi_controller *ctlr =
+ container_of(work, struct spi_controller,
+ pump_idle_teardown.work);
+ unsigned long flags;
+
+ /* Lock queue */
+ spin_lock_irqsave(&ctlr->queue_lock, flags);
+
+ /* Make sure we are not already running a message */
+ if (ctlr->cur_msg)
+ goto out;
+
+ /* if there is anything in the list then exit */
+ if (!list_empty(&ctlr->queue))
+ goto out;
+
+ /* if the controller is running then exit */
+ if (ctlr->running)
+ goto out;
+
+ /* if the controller is busy then exit */
+ if (ctlr->busy)
+ goto out;
+
+ /* if the controller is idling then exit
+ * this is actually a bit strange and would indicate that
+ * this function is scheduled twice, which should not happen
+ */
+ if (ctlr->idling)
+ goto out;
+
+ /* set up the initial states */
+ ctlr->busy = false;
+ ctlr->idling = true;
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+
+ /* free dummy receive buffers */
+ kfree(ctlr->dummy_rx);
+ ctlr->dummy_rx = NULL;
+ kfree(ctlr->dummy_tx);
+ ctlr->dummy_tx = NULL;
+
+ /* unprepare hardware */
+ if (ctlr->unprepare_transfer_hardware &&
+ ctlr->unprepare_transfer_hardware(ctlr))
+ dev_err(&ctlr->dev,
+ "failed to unprepare transfer hardware\n");
+ /* handle pm */
+ if (ctlr->auto_runtime_pm) {
+ pm_runtime_mark_last_busy(ctlr->dev.parent);
+ pm_runtime_put_autosuspend(ctlr->dev.parent);
+ }
+
+ /* mark controller as idle */
+ trace_spi_controller_idle(ctlr);
+
+ /* finally put us from idling into stopped */
+ spin_lock_irqsave(&ctlr->queue_lock, flags);
+ ctlr->idling = false;
+
+out:
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+}
+
static int spi_init_queue(struct spi_controller *ctlr)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
@@ -1344,7 +1391,8 @@ static int spi_init_queue(struct spi_controller *ctlr)
return PTR_ERR(ctlr->kworker_task);
}
kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
-
+ kthread_init_delayed_work(&ctlr->pump_idle_teardown,
+ spi_pump_idle_teardown);
/*
* Controller config will indicate if this controller should run the
* message pump with high (realtime) priority to reduce the transfer
@@ -1416,7 +1464,16 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
spin_lock_irqsave(&ctlr->queue_lock, flags);
ctlr->cur_msg = NULL;
ctlr->cur_msg_prepared = false;
- kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
+
+ /* if there is something queued, then wake the queue */
+ if (!list_empty(&ctlr->queue))
+ kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
+ else
+ /* otherwise schedule delayed teardown */
+ kthread_mod_delayed_work(&ctlr->kworker,
+ &ctlr->pump_idle_teardown,
+ HZ);
+
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
trace_spi_message_done(mesg);
@@ -1521,7 +1578,7 @@ static int __spi_queued_transfer(struct spi_device *spi,
msg->status = -EINPROGRESS;
list_add_tail(&msg->queue, &ctlr->queue);
- if (!ctlr->busy && need_pump)
+ if (need_pump)
kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
@@ -3618,4 +3675,3 @@ static int __init spi_init(void)
* include needing to have boardinfo data structures be much more public.
*/
postcore_initcall(spi_init);
-
@@ -326,6 +326,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @kworker: thread struct for message pump
* @kworker_task: pointer to task for message pump kworker thread
* @pump_messages: work struct for scheduling work to the message pump
+ * @pump_idle_teardown: work structure for scheduling a teardown delayed
* @queue_lock: spinlock to syncronise access to message queue
* @queue: message queue
* @idling: the device is entering idle state
@@ -516,6 +517,7 @@ struct spi_controller {
struct kthread_worker kworker;
struct task_struct *kworker_task;
struct kthread_work pump_messages;
+ struct kthread_delayed_work pump_idle_teardown;
spinlock_t queue_lock;
struct list_head queue;
struct spi_message *cur_msg;