@@ -241,6 +241,13 @@ static int dma_chan_get(struct dma_chan *chan)
goto err_out;
}
+ if (chan->device->device_pm_get) {
+ chan->pm_get_count = 1;
+ ret = chan->device->device_pm_get(chan);
+ if (ret < 0)
+ goto err_out;
+ }
+
if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
balance_ref_count(chan);
@@ -268,11 +275,44 @@ static void dma_chan_put(struct dma_chan *chan)
chan->client_count--;
module_put(dma_chan_to_owner(chan));
+ if (chan->device->device_pm_put && chan->pm_get_count)
+ chan->device->device_pm_put(chan);
+
/* This channel is not in use anymore, free it */
if (!chan->client_count && chan->device->device_free_chan_resources)
chan->device->device_free_chan_resources(chan);
}
+int dmaengine_pm_get(struct dma_chan *chan)
+{
+ int ret = -ENOSYS;
+
+ if (chan->device->device_pm_get) {
+ mutex_lock(&dma_list_mutex);
+ ret = chan->pm_get_count++ ?
+ 0 : chan->device->device_pm_get(chan);
+ mutex_unlock(&dma_list_mutex);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(dmaengine_pm_get);
+
+int dmaengine_pm_put(struct dma_chan *chan)
+{
+ int ret = -ENOSYS;
+
+ if (chan->device->device_pm_put) {
+ mutex_lock(&dma_list_mutex);
+ ret = --chan->pm_get_count ?
+ 0 : chan->device->device_pm_put(chan);
+ mutex_unlock(&dma_list_mutex);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(dmaengine_pm_put);
+
enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
{
enum dma_status status;
@@ -251,6 +251,7 @@ struct dma_chan {
struct dma_chan_percpu __percpu *local;
int client_count;
int table_count;
+ int pm_get_count;
void *private;
};
@@ -678,6 +679,8 @@ struct dma_device {
int (*device_config)(struct dma_chan *chan,
struct dma_slave_config *config);
+ int (*device_pm_get)(struct dma_chan *chan);
+ int (*device_pm_put)(struct dma_chan *chan);
int (*device_pause)(struct dma_chan *chan);
int (*device_resume)(struct dma_chan *chan);
int (*device_terminate_all)(struct dma_chan *chan);
@@ -891,6 +894,8 @@ static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
#ifdef CONFIG_DMA_ENGINE
void dmaengine_get(void);
void dmaengine_put(void);
+int dmaengine_pm_get(struct dma_chan *chan);
+int dmaengine_pm_put(struct dma_chan *chan);
#else
static inline void dmaengine_get(void)
{
@@ -898,6 +903,14 @@ static inline void dmaengine_get(void)
static inline void dmaengine_put(void)
{
}
+int dmaengine_pm_get(struct dma_chan *chan)
+{
+ return -ENXIO;
+}
+int dmaengine_pm_put(struct dma_chan *chan)
+{
+ return -ENXIO;
+}
#endif
#ifdef CONFIG_ASYNC_TX_DMA
This patch introduces dmaengine_pm_{get, put}() functions. They can be useful for better power management, as dma_issue_pending() can be called from atomic context, so we can't use non-irqsafe runtime pm inside of it. The new API functions can be called only from non-atomic context, and they can be used to tell dmaengine driver that we are going to use given DMA channel in near future. In the result power domain can be switched off when DMA client doesn't plan to use DMA channel for some time, which can give us some power save. For backward compatibility we call dmaengine_pm_get() automatically when channel is requested. If client want to use dmaengine_pm, he can call dmaengine_pm_put() after obtaining channel, and then embrace access to channel with dmaengine_pm_get/put() calls. Signed-off-by: Robert Baldyga <r.baldyga@samsung.com> --- drivers/dma/dmaengine.c | 40 ++++++++++++++++++++++++++++++++++++++++ include/linux/dmaengine.h | 13 +++++++++++++ 2 files changed, 53 insertions(+)